Revert "Pull from svn bleeding_edge@3716"
This reverts commit 888f6729be6a6f6fbe246cb5a9f122e2dbe455b7.
(Waiting until v8 issue 554101 is in v8 rather than patching it straight into
android)
diff --git a/src/SConscript b/src/SConscript
index 7950ab3..4eb8722 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -56,10 +56,10 @@
disassembler.cc
execution.cc
factory.cc
+ fast-codegen.cc
flags.cc
frame-element.cc
frames.cc
- full-codegen.cc
func-name-inferrer.cc
global-handles.cc
handles.cc
@@ -112,8 +112,8 @@
arm/cpu-arm.cc
arm/debug-arm.cc
arm/disasm-arm.cc
+ arm/fast-codegen-arm.cc
arm/frames-arm.cc
- arm/full-codegen-arm.cc
arm/ic-arm.cc
arm/jump-target-arm.cc
arm/macro-assembler-arm.cc
@@ -135,8 +135,8 @@
ia32/cpu-ia32.cc
ia32/debug-ia32.cc
ia32/disasm-ia32.cc
+ ia32/fast-codegen-ia32.cc
ia32/frames-ia32.cc
- ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/macro-assembler-ia32.cc
@@ -152,8 +152,8 @@
x64/cpu-x64.cc
x64/debug-x64.cc
x64/disasm-x64.cc
+ x64/fast-codegen-x64.cc
x64/frames-x64.cc
- x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
x64/macro-assembler-x64.cc
@@ -168,7 +168,6 @@
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
- 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
@@ -197,9 +196,6 @@
'os:openbsd': [
'd8-posix.cc'
],
- 'os:solaris': [
- 'd8-posix.cc'
- ],
'os:win32': [
'd8-windows.cc'
],
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 74547be..07da800 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -1371,36 +1371,6 @@
// Support for VFP.
-void Assembler::vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond) {
- // Ddst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(offset % 4 == 0);
- emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond) {
- // MEM(Rbase + offset) = Dsrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
- // Vsrc(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(offset % 4 == 0);
- emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
-}
-
-
void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 8b65b7c..cd53dd6 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
@@ -796,14 +796,6 @@
// However, some simple modifications can allow
// these APIs to support D16 to D31.
- void vldr(const DwVfpRegister dst,
- const Register base,
- int offset, // Offset must be a multiple of 4.
- const Condition cond = al);
- void vstr(const DwVfpRegister src,
- const Register base,
- int offset, // Offset must be a multiple of 4.
- const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 38f08d1..0c1dbcc 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -605,19 +605,14 @@
}
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+ : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
+ cgen_->UnloadReference(this);
}
@@ -666,7 +661,6 @@
frame_->Drop(size);
frame_->EmitPush(r0);
}
- ref->set_unloaded();
}
@@ -1250,6 +1244,8 @@
Reference target(this, node->proxy());
LoadAndSpill(val);
target.SetValue(NOT_CONST_INIT);
+ // The reference is removed from the stack (preserving TOS) when
+ // it goes out of scope.
}
// Get rid of the assigned value (declarations are statements).
frame_->Drop();
@@ -1936,17 +1932,25 @@
if (each.size() > 0) {
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2);
- } else {
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, r3 pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop();
+ }
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, r3 pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
+ if (each.size() > 0) {
+ // It's safe to pop the value lying on top of the reference before
+ // unloading the reference itself (which preserves the top of stack,
+ // ie, now the topmost value of the non-zero sized reference), since
+ // we will discard the top of stack after unloading the reference
+ // anyway.
+ frame_->EmitPop(r0);
}
}
}
+ // Discard the i'th entry pushed above or else the remainder of the
+ // reference, whichever is currently on top of the stack.
+ frame_->Drop();
+
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -2840,7 +2844,7 @@
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target(), node->is_compound());
+ { Reference target(this, node->target());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -2855,7 +2859,8 @@
node->op() == Token::INIT_CONST) {
LoadAndSpill(node->value());
- } else { // Assignment is a compound assignment.
+ } else {
+ // +=, *= and similar binary assignments.
// Get the old value of the lhs.
target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
@@ -2876,12 +2881,13 @@
frame_->EmitPush(r0);
}
}
+
Variable* var = node->target()->AsVariableProxy()->AsVariable();
if (var != NULL &&
(var->mode() == Variable::CONST) &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
- UnloadReference(&target);
+
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -3091,20 +3097,16 @@
// JavaScript example: 'array[index](1, 2, 3)'
// -------------------------------------------
- LoadAndSpill(property->obj());
- LoadAndSpill(property->key());
- EmitKeyedLoad(false);
- frame_->Drop(); // key
- // Put the function below the receiver.
+ // Load the function to call from the property through a reference.
+ Reference ref(this, property);
+ ref.GetValueAndSpill(); // receiver
+
+ // Pass receiver to called function.
if (property->is_synthetic()) {
- // Use the global receiver.
- frame_->Drop();
- frame_->EmitPush(r0);
LoadGlobalReceiver(r0);
} else {
- frame_->EmitPop(r1); // receiver
- frame_->EmitPush(r0); // function
- frame_->EmitPush(r1); // receiver
+ __ ldr(r0, frame_->ElementAt(ref.size()));
+ frame_->EmitPush(r0);
}
// Call the function.
@@ -3468,20 +3470,6 @@
}
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
- ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
- frame_->EmitPop(r0);
- __ tst(r0, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- cc_reg_ = ne;
-}
-
-
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3574,8 +3562,7 @@
Load(args->at(0));
Load(args->at(1));
- StringCompareStub stub;
- frame_->CallStub(&stub, 2);
+ frame_->CallRuntime(Runtime::kStringCompare, 2);
frame_->EmitPush(r0);
}
@@ -3805,9 +3792,7 @@
frame_->EmitPush(r0);
}
- // A constant reference is not saved to, so a constant reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
+ { Reference target(this, node->expression());
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -4268,16 +4253,6 @@
}
-void CodeGenerator::EmitKeyedLoad(bool is_global) {
- Comment cmnt(masm_, "[ Load from keyed Property");
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- RelocInfo::Mode rmode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- frame_->CallCodeObject(ic, rmode, 0);
-}
-
-
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() { return true; }
#endif
@@ -4344,21 +4319,23 @@
case KEYED: {
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
+ VirtualFrame* frame = cgen_->frame();
+ Comment cmnt(masm, "[ Load from keyed Property");
ASSERT(property != NULL);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
Variable* var = expression_->AsVariableProxy()->AsVariable();
ASSERT(var == NULL || var->is_global());
- cgen_->EmitKeyedLoad(var != NULL);
- cgen_->frame()->EmitPush(r0);
+ RelocInfo::Mode rmode = (var == NULL)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ frame->CallCodeObject(ic, rmode, 0);
+ frame->EmitPush(r0);
break;
}
default:
UNREACHABLE();
}
-
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
}
@@ -4420,7 +4397,6 @@
default:
UNREACHABLE();
}
- cgen_->UnloadReference(this);
}
@@ -4856,14 +4832,14 @@
Label* lhs_not_nan,
Label* slow,
bool strict) {
- Label rhs_is_smi;
+ Label lhs_is_smi;
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi);
+ __ b(eq, &lhs_is_smi);
- // Lhs is a Smi. Check whether the rhs is a heap number.
+ // Rhs is a Smi. Check whether the non-smi is a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
- // If rhs is not a number and lhs is a Smi then strict equality cannot
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
// succeed. Return non-equal (r0 is already not zero)
__ mov(pc, Operand(lr), LeaveCC, ne); // Return.
} else {
@@ -4872,67 +4848,57 @@
__ b(ne, slow);
}
- // Lhs (r1) is a smi, rhs (r0) is a number.
+ // Rhs is a smi, lhs is a number.
+ __ push(lr);
+
if (CpuFeatures::IsSupported(VFP3)) {
- // Convert lhs to a double in d7 .
CpuFeatures::Scope scope(VFP3);
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt(d7, s15);
- // Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, r0, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
} else {
- __ push(lr);
- // Convert lhs to a double in r2, r3.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Load rhs to a double in r0, r1.
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ pop(lr);
}
+
+ // r3 and r2 are rhs as double.
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
// We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a smi.
+ // since it's a Smi.
+ __ pop(lr);
__ jmp(lhs_not_nan);
- __ bind(&rhs_is_smi);
- // Rhs is a smi. Check whether the non-smi lhs is a heap number.
+ __ bind(&lhs_is_smi);
+ // Lhs is a Smi. Check whether the non-smi is a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
- // If lhs is not a number and rhs is a smi then strict equality cannot
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
// succeed. Return non-equal.
__ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
__ mov(pc, Operand(lr), LeaveCC, ne); // Return.
} else {
- // Smi compared non-strictly with a non-smi non-heap-number. Call
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
__ b(ne, slow);
}
- // Rhs (r0) is a smi, lhs (r1) is a heap number.
+ // Lhs is a smi, rhs is a number.
+ // r0 is Smi and r1 is heap number.
+ __ push(lr);
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+
if (CpuFeatures::IsSupported(VFP3)) {
- // Convert rhs to a double in d6 .
CpuFeatures::Scope scope(VFP3);
- // Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, r1, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt(d6, s13);
+ __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
} else {
- __ push(lr);
- // Load lhs to a double in r2, r3.
- __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
- // Convert rhs to a double in r0, r1.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
}
+
+ __ pop(lr);
// Fall through to both_loaded_as_doubles.
}
@@ -5081,18 +5047,10 @@
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ sub(r7, r0, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, r1, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- }
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ jmp(both_loaded_as_doubles);
}
@@ -5117,9 +5075,8 @@
}
-// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
-// On exit r0 is 0, positive or negative to indicate the result of
-// the comparison.
+// On entry r0 and r1 are the things to be compared. On exit r0 is 0,
+// positive or negative to indicate the result of the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
@@ -5144,19 +5101,21 @@
// 3) Fall through to both_loaded_as_doubles.
// 4) Jump to lhs_not_nan.
// In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison. If VFP3 is supported the double values of the numbers have
- // been loaded into d7 and d6. Otherwise, the double values have been loaded
- // into r0, r1, r2, and r3.
+ // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
__ bind(&both_loaded_as_doubles);
- // The arguments have been converted to doubles and stored in d6 and d7, if
- // VFP3 is supported, or in r0, r1, r2, and r3.
+ // r0, r1, r2, r3 are the double representations of the right hand side
+ // and the left hand side.
+
if (CpuFeatures::IsSupported(VFP3)) {
__ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
+ __ vmov(d6, r0, r1);
+ __ vmov(d7, r2, r3);
+
__ vcmp(d7, d6);
__ vmrs(pc); // Move vector status bits to normal status bits.
Label nan;
@@ -5195,7 +5154,6 @@
}
Label check_for_symbols;
- Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
// that case. If the inputs are not doubles then jumps to check_for_symbols.
@@ -5203,7 +5161,7 @@
EmitCheckForTwoHeapNumbers(masm,
&both_loaded_as_doubles,
&check_for_symbols,
- &flat_string_check);
+ &slow);
__ bind(&check_for_symbols);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
@@ -5211,27 +5169,10 @@
if (cc_ == eq && !strict_) {
// Either jumps to slow or returns the answer. Assumes that r2 is the type
// of r0 on entry.
- EmitCheckForSymbols(masm, &flat_string_check);
+ EmitCheckForSymbols(masm, &slow);
}
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
- __ bind(&flat_string_check);
-
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
-
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- r1,
- r0,
- r2,
- r3,
- r4,
- r5);
- // Never falls through to here.
-
__ bind(&slow);
-
__ push(r1);
__ push(r0);
// Figure out which native to call and setup the arguments.
@@ -5298,18 +5239,10 @@
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
- // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
- // using registers d7 and d6 for the double values.
- bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
- Token::MOD != operation;
- if (use_fp_registers) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt(d7, s15);
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt(d6, s13);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
} else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
@@ -5391,16 +5324,9 @@
if (mode == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
}
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r0 to d7.
- __ sub(r7, r0, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that second double is in r2 and r3.
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
- }
+ // Calling convention says that second double is in r2 and r3.
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
__ jmp(&finished_loading_r0);
__ bind(&r0_is_smi);
if (mode == OVERWRITE_RIGHT) {
@@ -5408,12 +5334,10 @@
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- if (use_fp_registers) {
+
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- // Convert smi in r0 to double in d7.
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt(d7, s15);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
@@ -5433,16 +5357,9 @@
if (mode == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
}
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r1 to d6.
- __ sub(r7, r1, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that first double is in r0 and r1.
- __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
- }
+ // Calling convention says that first double is in r0 and r1.
+ __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
__ jmp(&finished_loading_r1);
__ bind(&r1_is_smi);
if (mode == OVERWRITE_LEFT) {
@@ -5450,12 +5367,9 @@
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- if (use_fp_registers) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- // Convert smi in r1 to double in d6.
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt(d6, s13);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
@@ -5468,12 +5382,22 @@
__ bind(&finished_loading_r1);
__ bind(&do_the_call);
- // If we are inlining the operation using VFP3 instructions for
- // add, subtract, multiply, or divide, the arguments are in d6 and d7.
- if (use_fp_registers) {
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ // r5: Address of heap number for result.
+
+ if (CpuFeatures::IsSupported(VFP3) &&
+ ((Token::MUL == operation) ||
+ (Token::DIV == operation) ||
+ (Token::ADD == operation) ||
+ (Token::SUB == operation))) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
+ __ vmov(d6, r0, r1);
+ __ vmov(d7, r2, r3);
if (Token::MUL == operation) {
__ vmul(d5, d6, d7);
@@ -5486,20 +5410,15 @@
} else {
UNREACHABLE();
}
- __ sub(r0, r5, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
+
+ __ vmov(r0, r1, d5);
+
+ __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
+ __ mov(r0, Operand(r5));
__ mov(pc, lr);
return;
}
-
- // If we did not inline the operation, then the arguments are in:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
-
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
__ AlignStack(0);
@@ -6804,101 +6723,6 @@
}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- Label compare_lengths;
- // Find minimum length and length difference.
- __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
- Register length_delta = scratch3;
- __ mov(scratch1, scratch2, LeaveCC, gt);
- Register min_length = scratch1;
- __ tst(min_length, Operand(min_length));
- __ b(eq, &compare_lengths);
-
- // Setup registers so that we only need to increment one register
- // in the loop.
- __ add(scratch2, min_length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, Operand(scratch2));
- __ add(right, right, Operand(scratch2));
- // Registers left and right points to the min_length character of strings.
- __ rsb(min_length, min_length, Operand(-1));
- Register index = min_length;
- // Index starts at -min_length.
-
- {
- // Compare loop.
- Label loop;
- __ bind(&loop);
- // Compare characters.
- __ add(index, index, Operand(1), SetCC);
- __ ldrb(scratch2, MemOperand(left, index), ne);
- __ ldrb(scratch4, MemOperand(right, index), ne);
- // Skip to compare lengths with eq condition true.
- __ b(eq, &compare_lengths);
- __ cmp(scratch2, scratch4);
- __ b(eq, &loop);
- // Fallthrough with eq condition false.
- }
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use zero length_delta as result.
- __ mov(r0, Operand(length_delta), SetCC, eq);
- // Fall through to here if characters compare not-equal.
- __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
- __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
- __ Ret();
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // sp[0]: return address
- // sp[4]: right string
- // sp[8]: left string
-
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); // left
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); // right
-
- Label not_same;
- __ cmp(r0, r1);
- __ b(ne, ¬_same);
- ASSERT_EQ(0, EQUAL);
- ASSERT_EQ(0, kSmiTag);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(¬_same);
-
- // Check that both objects are sequential ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
-
- // Compare flat ascii strings natively. Remove arguments from stack first.
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index ccca2e9..f5de0eb 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,69 +43,57 @@
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
+ enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen, Expression* expression);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
+ ASSERT(type_ == ILLEGAL);
type_ = value;
}
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
// The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
+ int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
+ // the expression stack, and it is left in place with its value above it.
void GetValue();
- // Generate code to pop a reference, push the value of the reference,
- // and then spill the stack frame.
+ // Generate code to push the value of a reference on top of the expression
+ // stack and then spill the stack frame. This function is used temporarily
+ // while the code generator is being transformed.
inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
+ // on the expression stack. The stored value is left in place (with the
+ // reference intact below it) to support chained assignments.
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
};
@@ -286,9 +274,6 @@
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
- // Load a keyed property, leaving it in r0. The receiver and key are
- // passed on the stack, and remain there.
- void EmitKeyedLoad(bool is_global);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
@@ -356,7 +341,6 @@
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -443,8 +427,8 @@
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
+ friend class FastCodeGenerator;
+ friend class CodeGenSelector;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -527,28 +511,6 @@
};
-class StringCompareStub: public CodeStub {
- public:
- StringCompareStub() { }
-
- // Compare two flat ASCII strings and returns result in r0.
- // Does not use the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 8a32c95..9432207 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -237,7 +237,6 @@
inline int RnField() const { return Bits(19, 16); }
inline int RdField() const { return Bits(15, 12); }
- inline int CoprocessorField() const { return Bits(11, 8); }
// Support for VFP.
// Vn(19-16) | Vd(15-12) | Vm(3-0)
inline int VnField() const { return Bits(19, 16); }
@@ -247,8 +246,6 @@
inline int MField() const { return Bit(5); }
inline int DField() const { return Bit(22); }
inline int RtField() const { return Bits(15, 12); }
- inline int PField() const { return Bit(24); }
- inline int UField() const { return Bit(23); }
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
@@ -299,7 +296,6 @@
inline bool HasB() const { return BField() == 1; }
inline bool HasW() const { return WField() == 1; }
inline bool HasL() const { return LField() == 1; }
- inline bool HasU() const { return UField() == 1; }
inline bool HasSign() const { return SignField() == 1; }
inline bool HasH() const { return HField() == 1; }
inline bool HasLink() const { return LinkField() == 1; }
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 5b31455..afed0fa 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -998,43 +998,29 @@
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
-// Ddst = MEM(Rbase + 4*offset).
-// MEM(Rbase + 4*offset) = Dsrc.
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
- if (instr->CoprocessorField() != 0xB) {
+ if (instr->Bit(23) == 1) {
+ Unknown(instr); // Not used by V8.
+ } else if (instr->Bit(22) == 1) {
+ if ((instr->Bits(27, 24) == 0xC) &&
+ (instr->Bit(22) == 1) &&
+ (instr->Bits(11, 8) == 0xB) &&
+ (instr->Bits(7, 6) == 0x0) &&
+ (instr->Bit(4) == 1)) {
+ if (instr->Bit(20) == 0) {
+ Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+ } else if (instr->Bit(20) == 1) {
+ Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
Unknown(instr); // Not used by V8.
} else {
- switch (instr->OpcodeField()) {
- case 0x2:
- // Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
- Unknown(instr); // Not used by V8.
- } else if (instr->HasL()) {
- Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
- } else {
- Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
- }
- break;
- case 0x8:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]");
- } else {
- Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]");
- }
- break;
- case 0xC:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]");
- } else {
- Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]");
- }
- break;
- default:
- Unknown(instr); // Not used by V8.
- break;
- }
+ Unknown(instr); // Not used by V8.
}
}
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
similarity index 90%
rename from src/arm/full-codegen-arm.cc
rename to src/arm/fast-codegen-arm.cc
index 1844c28..4256e47 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -30,7 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "full-codegen.h"
+#include "fast-codegen.h"
#include "parser.h"
namespace v8 {
@@ -52,7 +52,7 @@
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun) {
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
SetFunctionPosition(fun);
int locals_count = fun->scope()->num_stack_slots();
@@ -167,7 +167,7 @@
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FastCodeGenerator::EmitReturnSequence(int position) {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ b(&return_label_);
@@ -214,7 +214,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -250,7 +250,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -270,7 +270,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -290,7 +290,7 @@
}
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+void FastCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -323,7 +323,7 @@
}
-void FullCodeGenerator::DropAndApply(int count,
+void FastCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
ASSERT(count > 0);
@@ -371,7 +371,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context,
+void FastCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
switch (context) {
@@ -432,7 +432,7 @@
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
+void FastCodeGenerator::DoTest(Expression::Context context) {
// The value to test is pushed on the stack, and duplicated on the stack
// if necessary (for value/test and test/value contexts).
ASSERT_NE(NULL, true_label_);
@@ -495,7 +495,7 @@
}
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -514,14 +514,14 @@
}
-void FullCodeGenerator::Move(Register destination, Slot* source) {
+void FastCodeGenerator::Move(Register destination, Slot* source) {
// Use destination as scratch.
MemOperand slot_operand = EmitSlotSearch(source, destination);
__ ldr(destination, slot_operand);
}
-void FullCodeGenerator::Move(Slot* dst,
+void FastCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
@@ -537,7 +537,7 @@
}
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
@@ -637,7 +637,7 @@
}
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
__ mov(r1, Operand(pairs));
@@ -648,7 +648,7 @@
}
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -666,21 +666,17 @@
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), context_);
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->slot();
- Property* property = var->AsProperty();
-
- if (var->is_global() && !var->is_this()) {
+ Expression* rewrite = var->rewrite();
+ if (rewrite == NULL) {
+ ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
@@ -690,24 +686,34 @@
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
DropAndApply(1, context, r0);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- Comment cmnt(masm_, "Lookup slot");
- __ mov(r1, Operand(var->name()));
- __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- Apply(context, r0);
-
- } else if (slot != NULL) {
- Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
- ? "Context slot"
- : "Stack slot");
+ } else if (rewrite->AsSlot() != NULL) {
+ Slot* slot = rewrite->AsSlot();
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ }
+ }
Apply(context, slot);
-
} else {
- Comment cmnt(masm_, "Rewritten parameter");
+ Comment cmnt(masm_, "Variable rewritten to property");
+ // A variable has been rewritten into an explicit access to an object
+ // property.
+ Property* property = rewrite->AsProperty();
ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
+
+ // The only property expressions that can occur are of the form
+ // "slot[literal]".
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -739,7 +745,7 @@
}
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
@@ -766,7 +772,7 @@
}
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
__ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
@@ -841,7 +847,7 @@
}
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
@@ -894,7 +900,7 @@
}
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
@@ -903,14 +909,14 @@
}
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FastCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
__ pop(r1);
GenericBinaryOpStub stub(op, NO_OVERWRITE);
@@ -919,17 +925,11 @@
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+void FastCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
- // Three main cases: global variables, lookup slots, and all other
- // types of slots. Left-hand-side parameters that rewrite to
- // explicit property accesses do not reach here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
-
- Slot* slot = var->slot();
if (var->is_global()) {
- ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in r0, variable name in
// r2, and the global object on the stack.
@@ -941,13 +941,6 @@
// Overwrite the global object on the stack with the result if needed.
DropAndApply(1, context, r0);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- __ push(result_register()); // Value.
- __ mov(r1, Operand(var->name()));
- __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- Apply(context, r0);
-
} else if (var->slot() != NULL) {
Slot* slot = var->slot();
switch (slot->type()) {
@@ -974,7 +967,6 @@
break;
}
Apply(context, result_register());
-
} else {
// Variables rewritten as properties are not treated as variables in
// assignments.
@@ -983,7 +975,7 @@
}
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
@@ -1019,7 +1011,7 @@
}
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// If the assignment starts a block of assignments to the same object,
@@ -1054,7 +1046,7 @@
}
-void FullCodeGenerator::VisitProperty(Property* expr) {
+void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
@@ -1073,7 +1065,7 @@
}
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
+void FastCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
@@ -1095,7 +1087,7 @@
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1113,7 +1105,7 @@
}
-void FullCodeGenerator::VisitCall(Call* expr) {
+void FastCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1184,7 +1176,7 @@
}
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -1219,7 +1211,7 @@
}
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1254,7 +1246,7 @@
}
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1359,26 +1351,13 @@
break;
}
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForValue(expr->expression(), kAccumulator);
- Label no_conversion;
- __ tst(result_register(), Operand(kSmiTagMask));
- __ b(eq, &no_conversion);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
- __ bind(&no_conversion);
- Apply(context_, result_register());
- break;
- }
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
// Expression can only be a property, a global or a (parameter or local)
@@ -1397,7 +1376,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
Location saved_location = location_;
- location_ = kAccumulator;
+ location_ = kStack;
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
@@ -1414,15 +1393,11 @@
VisitForValue(prop->key(), kStack);
EmitKeyedPropertyLoad(prop);
}
+ __ push(r0);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &no_conversion);
- __ push(r0);
+ // Convert to number.
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -1454,28 +1429,12 @@
}
}
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- if (loop_depth() > 0) {
- __ add(r0, r0, Operand(expr->op() == Token::INC
- ? Smi::FromInt(1)
- : Smi::FromInt(-1)));
- __ b(vs, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &done);
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- __ sub(r0, r0, Operand(r1));
- }
+ // Call stub for +1/-1.
__ mov(r1, Operand(expr->op() == Token::INC
? Smi::FromInt(1)
: Smi::FromInt(-1)));
GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
__ CallStub(&stub);
- __ bind(&done);
// Store the value returned in r0.
switch (assign_type) {
@@ -1524,7 +1483,7 @@
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
@@ -1559,7 +1518,7 @@
}
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
@@ -1674,25 +1633,25 @@
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, r0);
}
-Register FullCodeGenerator::result_register() { return r0; }
+Register FastCodeGenerator::result_register() { return r0; }
-Register FullCodeGenerator::context_register() { return cp; }
+Register FastCodeGenerator::context_register() { return cp; }
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
}
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
__ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
}
@@ -1700,7 +1659,7 @@
// ----------------------------------------------------------------------------
// Non-local control flow support.
-void FullCodeGenerator::EnterFinallyBlock() {
+void FastCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(r1));
// Store result register while executing finally block.
__ push(result_register());
@@ -1713,7 +1672,7 @@
}
-void FullCodeGenerator::ExitFinallyBlock() {
+void FastCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(r1));
// Restore result register from stack.
__ pop(r1);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index b59c3f0..a1f2613 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -569,10 +569,11 @@
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-
- // Check bit field.
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(kSlowCaseBitFieldMask));
+ __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6c3bbbb..18cadac 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1221,46 +1221,6 @@
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
- and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
- cmp(scratch1, Operand(kFlatAsciiStringTag));
- // Ignore second test if first test failed.
- cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
- b(ne, failure);
-}
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that neither is a smi.
- ASSERT_EQ(0, kSmiTag);
- and_(scratch1, first, Operand(second));
- tst(scratch1, Operand(kSmiTagMask));
- b(eq, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
-}
-
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index efc5bfa..8f2064a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -337,25 +337,6 @@
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
- // ---------------------------------------------------------------------------
- // String utilities
-
- // Checks if both objects are sequential ASCII strings and jumps to label
- // if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label *failure);
-
- // Checks if both objects are sequential ASCII strings and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* not_flat_ascii_strings);
-
private:
List<Unresolved> unresolved_;
bool generating_stub_;
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 9dd3b93..ed06eb2 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -63,6 +63,8 @@
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
+ * - at_start (if 1, we are starting at the start of the
+ * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - link address
@@ -74,8 +76,6 @@
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
- * - At start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
@@ -610,7 +610,6 @@
// Set frame pointer just above the arguments.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
- __ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -654,15 +653,6 @@
// Store this value in a local variable, for use when clearing
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- // Determine whether the start index is zero, that is at the start of the
- // string, and store that value in a local variable.
- __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
- __ tst(r1, Operand(r1));
- __ mov(r1, Operand(1), LeaveCC, eq);
- __ mov(r1, Operand(0), LeaveCC, ne);
- __ str(r1, MemOperand(frame_pointer(), kAtStart));
-
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 7de5f93..4459859 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -123,7 +123,8 @@
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kReturnAddress + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ static const int kAtStart = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kAtStart + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer.
@@ -135,9 +136,8 @@
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index f543151..c4b1e00 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,9 +47,9 @@
using ::v8::internal::DeleteArray;
// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent way through
-// ::v8::internal::OS in the same way as SNPrintF is that the
-// Windows C Run-Time Library does not provide vsscanf.
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
+// Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
// The Debugger class is used by the simulator while debugging simulated ARM
@@ -2033,62 +2033,42 @@
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
-// Ddst = MEM(Rbase + 4*offset).
-// MEM(Rbase + 4*offset) = Dsrc.
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
- if (instr->CoprocessorField() != 0xB) {
- UNIMPLEMENTED(); // Not used by V8.
- } else {
- switch (instr->OpcodeField()) {
- case 0x2:
- // Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
- UNIMPLEMENTED(); // Not used by V8.
- } else {
- int rt = instr->RtField();
- int rn = instr->RnField();
- int vm = instr->VmField();
- if (instr->HasL()) {
- int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
- int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
+ int rt = instr->RtField();
+ int rn = instr->RnField();
+ int vm = instr->VmField();
- set_register(rt, rt_int_value);
- set_register(rn, rn_int_value);
- } else {
- int32_t rs_val = get_register(rt);
- int32_t rn_val = get_register(rn);
+ if (instr->Bit(23) == 1) {
+ UNIMPLEMENTED();
+ } else if (instr->Bit(22) == 1) {
+ if ((instr->Bits(27, 24) == 0xC) &&
+ (instr->Bit(22) == 1) &&
+ (instr->Bits(11, 8) == 0xB) &&
+ (instr->Bits(7, 6) == 0x0) &&
+ (instr->Bit(4) == 1)) {
+ if (instr->Bit(20) == 0) {
+ int32_t rs_val = get_register(rt);
+ int32_t rn_val = get_register(rn);
- set_s_register_from_sinteger(2*vm, rs_val);
- set_s_register_from_sinteger((2*vm+1), rn_val);
- }
- }
- break;
- case 0x8:
- case 0xC: { // Load and store double to memory.
- int rn = instr->RnField();
- int vd = instr->VdField();
- int offset = instr->Immed8Field();
- if (!instr->HasU()) {
- offset = -offset;
- }
- int32_t address = get_register(rn) + 4 * offset;
- if (instr->HasL()) {
- // Load double from memory: vldr.
- set_s_register_from_sinteger(2*vd, ReadW(address, instr));
- set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
- } else {
- // Store double to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(2*vd), instr);
- WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
- }
- break;
+ set_s_register_from_sinteger(2*vm, rs_val);
+ set_s_register_from_sinteger((2*vm+1), rn_val);
+
+ } else if (instr->Bit(20) == 1) {
+ int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
+ int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
+
+ set_register(rt, rt_int_value);
+ set_register(rn, rn_int_value);
}
- default:
- UNIMPLEMENTED(); // Not used by V8.
- break;
+ } else {
+ UNIMPLEMENTED();
}
+ } else if (instr->Bit(21) == 1) {
+ UNIMPLEMENTED();
+ } else {
+ UNIMPLEMENTED();
}
}
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 1973730..3ce5b7a 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -63,8 +63,8 @@
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -79,9 +79,9 @@
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
assembler::arm::Simulator::current()->Call( \
- FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+ FUNCTION_ADDR(entry), 8, p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/src/codegen.cc b/src/codegen.cc
index aa2a2b8..fd7e0e8 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -217,10 +217,6 @@
Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval) {
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- int len = String::cast(script->source())->length();
- Counters::total_old_codegen_source_size.Increment(len);
- }
MakeCodePrologue(fun);
// Generate code.
const int kInitialBufferSize = 4 * KB;
@@ -348,7 +344,6 @@
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
{&CodeGenerator::GenerateIsObject, "_IsObject"},
{&CodeGenerator::GenerateIsFunction, "_IsFunction"},
- {&CodeGenerator::GenerateIsUndetectableObject, "_IsUndetectableObject"},
{&CodeGenerator::GenerateStringAdd, "_StringAdd"},
{&CodeGenerator::GenerateSubString, "_SubString"},
{&CodeGenerator::GenerateStringCompare, "_StringCompare"},
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 5427367..d8e186a 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -37,7 +37,9 @@
static const int kSubCacheCount = 4;
// The number of generations for each sub cache.
-#if defined(ANDROID)
+// TODO(andreip): remove this #ifdef if the page cycler confirms that all is
+// well and we can cache up to 5 script generations.
+#if 0 // defined(ANDROID)
static const int kScriptGenerations = 1;
static const int kEvalGlobalGenerations = 1;
static const int kEvalContextualGenerations = 1;
diff --git a/src/compiler.cc b/src/compiler.cc
index fe61571..b7aaedf 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -32,7 +32,7 @@
#include "compilation-cache.h"
#include "compiler.h"
#include "debug.h"
-#include "full-codegen.h"
+#include "fast-codegen.h"
#include "oprofile-agent.h"
#include "rewriter.h"
#include "scopes.h"
@@ -42,6 +42,29 @@
namespace internal {
+class CodeGenSelector: public AstVisitor {
+ public:
+ enum CodeGenTag { NORMAL, FAST };
+
+ CodeGenSelector() : has_supported_syntax_(true) {}
+
+ CodeGenTag Select(FunctionLiteral* fun);
+
+ private:
+ void VisitDeclarations(ZoneList<Declaration*>* decls);
+ void VisitStatements(ZoneList<Statement*>* stmts);
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ bool has_supported_syntax_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
+};
+
+
static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<Script> script,
Handle<Context> context,
@@ -94,11 +117,12 @@
!FLAG_always_fast_compiler) {
if (FLAG_trace_bailout) PrintF("No hint to try fast\n");
} else {
- FullCodeGenSyntaxChecker checker;
- checker.Check(literal);
- if (checker.has_supported_syntax()) {
- return FullCodeGenerator::MakeCode(literal, script, is_eval);
+ CodeGenSelector selector;
+ CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
+ if (code_gen == CodeGenSelector::FAST) {
+ return FastCodeGenerator::MakeCode(literal, script, is_eval);
}
+ ASSERT(code_gen == CodeGenSelector::NORMAL);
}
}
return CodeGenerator::MakeCode(literal, script, is_eval);
@@ -469,12 +493,11 @@
// Generate code and return it.
bool is_compiled = false;
- if (FLAG_always_fast_compiler ||
- (FLAG_fast_compiler && literal->try_fast_codegen())) {
- FullCodeGenSyntaxChecker checker;
- checker.Check(literal);
- if (checker.has_supported_syntax()) {
- code = FullCodeGenerator::MakeCode(literal,
+ if (FLAG_fast_compiler && literal->try_fast_codegen()) {
+ CodeGenSelector selector;
+ CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
+ if (code_gen == CodeGenSelector::FAST) {
+ code = FastCodeGenerator::MakeCode(literal,
script,
false); // Not eval.
is_compiled = true;
@@ -548,4 +571,418 @@
}
+CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
+ Scope* scope = fun->scope();
+
+ if (scope->num_heap_slots() > 0) {
+ // We support functions with a local context if they do not have
+ // parameters that need to be copied into the context.
+ for (int i = 0, len = scope->num_parameters(); i < len; i++) {
+ Slot* slot = scope->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ if (FLAG_trace_bailout) {
+ PrintF("Function has context-allocated parameters.\n");
+ }
+ return NORMAL;
+ }
+ }
+ }
+
+ has_supported_syntax_ = true;
+ VisitDeclarations(scope->declarations());
+ if (!has_supported_syntax_) return NORMAL;
+
+ VisitStatements(fun->body());
+ return has_supported_syntax_ ? FAST : NORMAL;
+}
+
+
+#define BAILOUT(reason) \
+ do { \
+ if (FLAG_trace_bailout) { \
+ PrintF("%s\n", reason); \
+ } \
+ has_supported_syntax_ = false; \
+ return; \
+ } while (false)
+
+
+#define CHECK_BAILOUT \
+ do { \
+ if (!has_supported_syntax_) return; \
+ } while (false)
+
+
+void CodeGenSelector::VisitDeclarations(ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); i++) {
+ Visit(decls->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0, len = stmts->length(); i < len; i++) {
+ Visit(stmts->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void CodeGenSelector::VisitDeclaration(Declaration* decl) {
+ Property* prop = decl->proxy()->AsProperty();
+ if (prop != NULL) {
+ Visit(prop->obj());
+ Visit(prop->key());
+ }
+
+ if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ }
+}
+
+
+void CodeGenSelector::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {}
+
+
+void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
+ Visit(stmt->condition());
+ CHECK_BAILOUT;
+ Visit(stmt->then_statement());
+ CHECK_BAILOUT;
+ Visit(stmt->else_statement());
+}
+
+
+void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {}
+
+
+void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {}
+
+
+void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {}
+
+
+void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) {
+ BAILOUT("SwitchStatement");
+}
+
+
+void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ Visit(stmt->cond());
+ CHECK_BAILOUT;
+ Visit(stmt->body());
+}
+
+
+void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
+ Visit(stmt->cond());
+ CHECK_BAILOUT;
+ Visit(stmt->body());
+}
+
+
+void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
+ BAILOUT("ForStatement");
+}
+
+
+void CodeGenSelector::VisitForInStatement(ForInStatement* stmt) {
+ BAILOUT("ForInStatement");
+}
+
+
+void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ Visit(stmt->try_block());
+ CHECK_BAILOUT;
+ Visit(stmt->catch_block());
+}
+
+
+void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ Visit(stmt->try_block());
+ CHECK_BAILOUT;
+ Visit(stmt->finally_block());
+}
+
+
+void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {}
+
+
+void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {}
+
+
+void CodeGenSelector::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ BAILOUT("FunctionBoilerplateLiteral");
+}
+
+
+void CodeGenSelector::VisitConditional(Conditional* expr) {
+ Visit(expr->condition());
+ CHECK_BAILOUT;
+ Visit(expr->then_expression());
+ CHECK_BAILOUT;
+ Visit(expr->else_expression());
+}
+
+
+void CodeGenSelector::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
+ Variable* var = expr->var();
+ if (!var->is_global()) {
+ Slot* slot = var->slot();
+ if (slot != NULL) {
+ Slot::Type type = slot->type();
+ // When LOOKUP slots are enabled, some currently dead code
+ // implementing unary typeof will become live.
+ if (type == Slot::LOOKUP) {
+ BAILOUT("Lookup slot");
+ }
+ } else {
+ // If not global or a slot, it is a parameter rewritten to an explicit
+ // property reference on the (shadow) arguments object.
+#ifdef DEBUG
+ Property* property = var->AsProperty();
+ ASSERT_NOT_NULL(property);
+ Variable* object = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object);
+ ASSERT_NOT_NULL(object->slot());
+ ASSERT_NOT_NULL(property->key()->AsLiteral());
+ ASSERT(property->key()->AsLiteral()->handle()->IsSmi());
+#endif
+ }
+ }
+}
+
+
+void CodeGenSelector::VisitLiteral(Literal* expr) {}
+
+
+void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {}
+
+
+void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
+ ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+
+ for (int i = 0, len = properties->length(); i < len; i++) {
+ ObjectLiteral::Property* property = properties->at(i);
+ if (property->IsCompileTimeValue()) continue;
+ Visit(property->key());
+ CHECK_BAILOUT;
+ Visit(property->value());
+ CHECK_BAILOUT;
+ }
+}
+
+
+void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
+ ZoneList<Expression*>* subexprs = expr->values();
+ for (int i = 0, len = subexprs->length(); i < len; i++) {
+ Expression* subexpr = subexprs->at(i);
+ if (subexpr->AsLiteral() != NULL) continue;
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+ Visit(subexpr);
+ CHECK_BAILOUT;
+ }
+}
+
+
+void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+ Visit(expr->key());
+ CHECK_BAILOUT;
+ Visit(expr->value());
+}
+
+
+void CodeGenSelector::VisitAssignment(Assignment* expr) {
+ // We support plain non-compound assignments to properties, parameters and
+ // non-context (stack-allocated) locals, and global variables.
+ Token::Value op = expr->op();
+ if (op == Token::INIT_CONST) BAILOUT("initialize constant");
+
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ if (var->mode() == Variable::CONST) {
+ BAILOUT("Assignment to const");
+ }
+ // All global variables are supported.
+ if (!var->is_global()) {
+ ASSERT(var->slot() != NULL);
+ Slot::Type type = var->slot()->type();
+ if (type == Slot::LOOKUP) {
+ BAILOUT("Lookup slot");
+ }
+ }
+ } else if (prop != NULL) {
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // This is a throw reference error.
+ BAILOUT("non-variable/non-property assignment");
+ }
+
+ Visit(expr->value());
+}
+
+
+void CodeGenSelector::VisitThrow(Throw* expr) {
+ Visit(expr->exception());
+}
+
+
+void CodeGenSelector::VisitProperty(Property* expr) {
+ Visit(expr->obj());
+ CHECK_BAILOUT;
+ Visit(expr->key());
+}
+
+
+void CodeGenSelector::VisitCall(Call* expr) {
+ Expression* fun = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ // Check for supported calls
+ if (var != NULL && var->is_possibly_eval()) {
+ BAILOUT("call to the identifier 'eval'");
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Calls to global variables are supported.
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ BAILOUT("call to a lookup slot");
+ } else if (fun->AsProperty() != NULL) {
+ Property* prop = fun->AsProperty();
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // Otherwise the call is supported if the function expression is.
+ Visit(fun);
+ }
+ // Check all arguments to the call.
+ for (int i = 0; i < args->length(); i++) {
+ Visit(args->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void CodeGenSelector::VisitCallNew(CallNew* expr) {
+ Visit(expr->expression());
+ CHECK_BAILOUT;
+ ZoneList<Expression*>* args = expr->arguments();
+ // Check all arguments to the call
+ for (int i = 0; i < args->length(); i++) {
+ Visit(args->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
+ // Check for inline runtime call
+ if (expr->name()->Get(0) == '_' &&
+ CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
+ BAILOUT("inlined runtime call");
+ }
+ // Check all arguments to the call. (Relies on TEMP meaning STACK.)
+ for (int i = 0; i < expr->arguments()->length(); i++) {
+ Visit(expr->arguments()->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID:
+ case Token::NOT:
+ case Token::TYPEOF:
+ Visit(expr->expression());
+ break;
+ case Token::BIT_NOT:
+ BAILOUT("UnaryOperataion: BIT_NOT");
+ case Token::DELETE:
+ BAILOUT("UnaryOperataion: DELETE");
+ default:
+ BAILOUT("UnaryOperataion");
+ }
+}
+
+
+void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->expression()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ // All global variables are supported.
+ if (!var->is_global()) {
+ ASSERT(var->slot() != NULL);
+ Slot::Type type = var->slot()->type();
+ if (type == Slot::LOOKUP) {
+ BAILOUT("CountOperation with lookup slot");
+ }
+ }
+ } else if (prop != NULL) {
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // This is a throw reference error.
+ BAILOUT("CountOperation non-variable/non-property expression");
+ }
+}
+
+
+void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
+ Visit(expr->left());
+ CHECK_BAILOUT;
+ Visit(expr->right());
+}
+
+
+void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
+ Visit(expr->left());
+ CHECK_BAILOUT;
+ Visit(expr->right());
+}
+
+
+void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {}
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+
+
} } // namespace v8::internal
diff --git a/src/debug-delay.js b/src/debug-delay.js
index 14d8c88..04fde1f 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -1704,7 +1704,7 @@
if (global) {
// Evaluate in the global context.
response.body =
- this.exec_state_.evaluateGlobal(expression, Boolean(disable_break));
+ this.exec_state_.evaluateGlobal(expression), Boolean(disable_break);
return;
}
diff --git a/src/debug.cc b/src/debug.cc
index fc809c5..34b3a6d 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1695,7 +1695,9 @@
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
script_cache_->Add(Handle<Script>(Script::cast(obj)));
count++;
diff --git a/src/dtoa-config.c b/src/dtoa-config.c
index 9c5ee33..a1acd2d 100644
--- a/src/dtoa-config.c
+++ b/src/dtoa-config.c
@@ -38,8 +38,7 @@
*/
#if !(defined(__APPLE__) && defined(__MACH__)) && \
- !defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__) && \
- !defined(__sun)
+ !defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__)
#include <endian.h>
#endif
#include <math.h>
@@ -48,7 +47,7 @@
/* The floating point word order on ARM is big endian when floating point
* emulation is used, even if the byte order is little endian */
#if !(defined(__APPLE__) && defined(__MACH__)) && !defined(WIN32) && \
- !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__sun) && \
+ !defined(__FreeBSD__) && !defined(__OpenBSD__) && \
__FLOAT_WORD_ORDER == __BIG_ENDIAN
#define IEEE_MC68k
#else
@@ -57,7 +56,7 @@
#define __MATH_H__
#if defined(__APPLE__) && defined(__MACH__) || defined(__FreeBSD__) || \
- defined(__OpenBSD__) || defined(__sun)
+ defined(__OpenBSD__)
/* stdlib.h on FreeBSD and Apple's 10.5 and later SDKs will mangle the
* name of strtod. If it's included after strtod is redefined as
* gay_strtod, it will mangle the name of gay_strtod, which is
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
new file mode 100644
index 0000000..e90a44e
--- /dev/null
+++ b/src/fast-codegen.cc
@@ -0,0 +1,707 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "fast-codegen.h"
+#include "stub-cache.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval) {
+ CodeGenerator::MakeCodePrologue(fun);
+ const int kInitialBufferSize = 4 * KB;
+ MacroAssembler masm(NULL, kInitialBufferSize);
+ FastCodeGenerator cgen(&masm, script, is_eval);
+ cgen.Generate(fun);
+ if (cgen.HasStackOverflow()) {
+ ASSERT(!Top::has_pending_exception());
+ return Handle<Code>::null();
+ }
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
+ return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+}
+
+
+int FastCodeGenerator::SlotOffset(Slot* slot) {
+ ASSERT(slot != NULL);
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -slot->index() * kPointerSize;
+ // Adjust by a (parameter or local) base offset.
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
+ break;
+ case Slot::LOCAL:
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ break;
+ case Slot::CONTEXT:
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ return offset;
+}
+
+
+void FastCodeGenerator::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
+ int length = declarations->length();
+ int globals = 0;
+ for (int i = 0; i < length; i++) {
+ Declaration* decl = declarations->at(i);
+ Variable* var = decl->proxy()->var();
+ Slot* slot = var->slot();
+
+ // If it was not possible to allocate the variable at compile
+ // time, we need to "declare" it at runtime to make sure it
+ // actually exists in the local context.
+ if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
+ VisitDeclaration(decl);
+ } else {
+ // Count global variables and functions for later processing
+ globals++;
+ }
+ }
+
+ // Compute array of global variable and function declarations.
+ // Do nothing in case of no declared global functions or variables.
+ if (globals > 0) {
+ Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+ for (int j = 0, i = 0; i < length; i++) {
+ Declaration* decl = declarations->at(i);
+ Variable* var = decl->proxy()->var();
+ Slot* slot = var->slot();
+
+ if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
+ array->set(j++, *(var->name()));
+ if (decl->fun() == NULL) {
+ if (var->mode() == Variable::CONST) {
+ // In case this is const property use the hole.
+ array->set_the_hole(j++);
+ } else {
+ array->set_undefined(j++);
+ }
+ } else {
+ Handle<JSFunction> function =
+ Compiler::BuildBoilerplate(decl->fun(), script_, this);
+ // Check for stack-overflow exception.
+ if (HasStackOverflow()) return;
+ array->set(j++, *function);
+ }
+ }
+ }
+ // Invoke the platform-dependent code generator to do the actual
+ // declaration the global variables and functions.
+ DeclareGlobals(array);
+ }
+}
+
+
+void FastCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
+ if (FLAG_debug_info) {
+ CodeGenerator::RecordPositions(masm_, fun->start_position());
+ }
+}
+
+
+void FastCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
+ if (FLAG_debug_info) {
+ CodeGenerator::RecordPositions(masm_, fun->end_position());
+ }
+}
+
+
+void FastCodeGenerator::SetStatementPosition(Statement* stmt) {
+ if (FLAG_debug_info) {
+ CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ }
+}
+
+
+void FastCodeGenerator::SetStatementPosition(int pos) {
+ if (FLAG_debug_info) {
+ CodeGenerator::RecordPositions(masm_, pos);
+ }
+}
+
+
+void FastCodeGenerator::SetSourcePosition(int pos) {
+ if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+ masm_->RecordPosition(pos);
+ }
+}
+
+
+void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
+ Label eval_right, done;
+
+ // Set up the appropriate context for the left subexpression based
+ // on the operation and our own context. Initially assume we can
+ // inherit both true and false labels from our context.
+ if (expr->op() == Token::OR) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ VisitForControl(expr->left(), &done, &eval_right);
+ break;
+ case Expression::kValue:
+ VisitForValueControl(expr->left(),
+ location_,
+ &done,
+ &eval_right);
+ break;
+ case Expression::kTest:
+ VisitForControl(expr->left(), true_label_, &eval_right);
+ break;
+ case Expression::kValueTest:
+ VisitForValueControl(expr->left(),
+ location_,
+ true_label_,
+ &eval_right);
+ break;
+ case Expression::kTestValue:
+ VisitForControl(expr->left(), true_label_, &eval_right);
+ break;
+ }
+ } else {
+ ASSERT_EQ(Token::AND, expr->op());
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ VisitForControl(expr->left(), &eval_right, &done);
+ break;
+ case Expression::kValue:
+ VisitForControlValue(expr->left(),
+ location_,
+ &eval_right,
+ &done);
+ break;
+ case Expression::kTest:
+ VisitForControl(expr->left(), &eval_right, false_label_);
+ break;
+ case Expression::kValueTest:
+ VisitForControl(expr->left(), &eval_right, false_label_);
+ break;
+ case Expression::kTestValue:
+ VisitForControlValue(expr->left(),
+ location_,
+ &eval_right,
+ false_label_);
+ break;
+ }
+ }
+
+ __ bind(&eval_right);
+ Visit(expr->right());
+
+ __ bind(&done);
+}
+
+
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+ Comment cmnt(masm_, "[ Block");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+ VisitStatements(stmt->statements());
+ __ bind(nested_statement.break_target());
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ SetStatementPosition(stmt);
+ VisitForEffect(stmt->expression());
+}
+
+
+void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+ Comment cmnt(masm_, "[ EmptyStatement");
+ SetStatementPosition(stmt);
+}
+
+
+void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
+ Comment cmnt(masm_, "[ IfStatement");
+ SetStatementPosition(stmt);
+ Label then_part, else_part, done;
+
+ // Do not worry about optimizing for empty then or else bodies.
+ VisitForControl(stmt->condition(), &then_part, &else_part);
+
+ __ bind(&then_part);
+ Visit(stmt->then_statement());
+ __ jmp(&done);
+
+ __ bind(&else_part);
+ Visit(stmt->else_statement());
+
+ __ bind(&done);
+}
+
+
+void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+ Comment cmnt(masm_, "[ ContinueStatement");
+ SetStatementPosition(stmt);
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ while (!current->IsContinueTarget(stmt->target())) {
+ stack_depth = current->Exit(stack_depth);
+ current = current->outer();
+ }
+ __ Drop(stack_depth);
+
+ Iteration* loop = current->AsIteration();
+ __ jmp(loop->continue_target());
+}
+
+
+void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+ Comment cmnt(masm_, "[ BreakStatement");
+ SetStatementPosition(stmt);
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ while (!current->IsBreakTarget(stmt->target())) {
+ stack_depth = current->Exit(stack_depth);
+ current = current->outer();
+ }
+ __ Drop(stack_depth);
+
+ Breakable* target = current->AsBreakable();
+ __ jmp(target->break_target());
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ SetStatementPosition(stmt);
+ Expression* expr = stmt->expression();
+ VisitForValue(expr, kAccumulator);
+
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ while (current != NULL) {
+ stack_depth = current->Exit(stack_depth);
+ current = current->outer();
+ }
+ __ Drop(stack_depth);
+
+ EmitReturnSequence(stmt->statement_pos());
+}
+
+
+void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ Comment cmnt(masm_, "[ WithEnterStatement");
+ SetStatementPosition(stmt);
+
+ VisitForValue(stmt->expression(), kStack);
+ if (stmt->is_catch_block()) {
+ __ CallRuntime(Runtime::kPushCatchContext, 1);
+ } else {
+ __ CallRuntime(Runtime::kPushContext, 1);
+ }
+ // Both runtime calls return the new context in both the context and the
+ // result registers.
+
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+}
+
+
+void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
+ Comment cmnt(masm_, "[ WithExitStatement");
+ SetStatementPosition(stmt);
+
+ // Pop context.
+ LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+}
+
+
+void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ Comment cmnt(masm_, "[ DoWhileStatement");
+ SetStatementPosition(stmt);
+ Label body, stack_limit_hit, stack_check_success;
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ __ bind(loop_statement.continue_target());
+ SetStatementPosition(stmt->condition_position());
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(loop_statement.break_target());
+
+ decrement_loop_depth();
+}
+
+
+void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+ Comment cmnt(masm_, "[ WhileStatement");
+ SetStatementPosition(stmt);
+ Label body, stack_limit_hit, stack_check_success;
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Emit the test at the bottom of the loop.
+ __ jmp(loop_statement.continue_target());
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ __ bind(loop_statement.continue_target());
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(loop_statement.break_target());
+ decrement_loop_depth();
+}
+
+
+void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ Comment cmnt(masm_, "[ TryCatchStatement");
+ SetStatementPosition(stmt);
+ // The try block adds a handler to the exception handler chain
+ // before entering, and removes it again when exiting normally.
+ // If an exception is thrown during execution of the try block,
+ // control is passed to the handler, which also consumes the handler.
+ // At this point, the exception is in a register, and store it in
+ // the temporary local variable (prints as ".catch-var") before
+ // executing the catch block. The catch block has been rewritten
+ // to introduce a new scope to bind the catch variable and to remove
+ // that scope again afterwards.
+
+ Label try_handler_setup, catch_entry, done;
+ __ Call(&try_handler_setup);
+ // Try handler code, exception in result register.
+
+ // Store exception in local .catch variable before executing catch block.
+ {
+ // The catch variable is *always* a variable proxy for a local variable.
+ Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(catch_var);
+ Slot* variable_slot = catch_var->slot();
+ ASSERT_NOT_NULL(variable_slot);
+ ASSERT_EQ(Slot::LOCAL, variable_slot->type());
+ StoreToFrameField(SlotOffset(variable_slot), result_register());
+ }
+
+ Visit(stmt->catch_block());
+ __ jmp(&done);
+
+ // Try block code. Sets up the exception handler chain.
+ __ bind(&try_handler_setup);
+ {
+ TryCatch try_block(this, &catch_entry);
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+ Visit(stmt->try_block());
+ __ PopTryHandler();
+ }
+ __ bind(&done);
+}
+
+
+void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ Comment cmnt(masm_, "[ TryFinallyStatement");
+ SetStatementPosition(stmt);
+ // Try finally is compiled by setting up a try-handler on the stack while
+ // executing the try body, and removing it again afterwards.
+ //
+ // The try-finally construct can enter the finally block in three ways:
+ // 1. By exiting the try-block normally. This removes the try-handler and
+ // calls the finally block code before continuing.
+ // 2. By exiting the try-block with a function-local control flow transfer
+ // (break/continue/return). The site of the, e.g., break removes the
+ // try handler and calls the finally block code before continuing
+ // its outward control transfer.
+ // 3. by exiting the try-block with a thrown exception.
+ // This can happen in nested function calls. It traverses the try-handler
+ // chain and consumes the try-handler entry before jumping to the
+ // handler code. The handler code then calls the finally-block before
+ // rethrowing the exception.
+ //
+ // The finally block must assume a return address on top of the stack
+ // (or in the link register on ARM chips) and a value (return value or
+ // exception) in the result register (rax/eax/r0), both of which must
+ // be preserved. The return address isn't GC-safe, so it should be
+ // cooked before GC.
+ Label finally_entry;
+ Label try_handler_setup;
+
+ // Setup the try-handler chain. Use a call to
+ // Jump to try-handler setup and try-block code. Use call to put try-handler
+ // address on stack.
+ __ Call(&try_handler_setup);
+ // Try handler code. Return address of call is pushed on handler stack.
+ {
+ // This code is only executed during stack-handler traversal when an
+ // exception is thrown. The execption is in the result register, which
+ // is retained by the finally block.
+ // Call the finally block and then rethrow the exception.
+ __ Call(&finally_entry);
+ __ push(result_register());
+ __ CallRuntime(Runtime::kReThrow, 1);
+ }
+
+ __ bind(&finally_entry);
+ {
+ // Finally block implementation.
+ Finally finally_block(this);
+ EnterFinallyBlock();
+ Visit(stmt->finally_block());
+ ExitFinallyBlock(); // Return to the calling code.
+ }
+
+ __ bind(&try_handler_setup);
+ {
+ // Setup try handler (stack pointer registers).
+ TryFinally try_block(this, &finally_entry);
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+ Visit(stmt->try_block());
+ __ PopTryHandler();
+ }
+ // Execute the finally block on the way out.
+ __ Call(&finally_entry);
+}
+
+
+void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Comment cmnt(masm_, "[ DebuggerStatement");
+ SetStatementPosition(stmt);
+ __ CallRuntime(Runtime::kDebugBreak, 0);
+ // Ignore the return value.
+#endif
+}
+
+
+void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitConditional(Conditional* expr) {
+ Comment cmnt(masm_, "[ Conditional");
+ Label true_case, false_case, done;
+ VisitForControl(expr->condition(), &true_case, &false_case);
+
+ __ bind(&true_case);
+ Visit(expr->then_expression());
+ // If control flow falls through Visit, jump to done.
+ if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+ __ jmp(&done);
+ }
+
+ __ bind(&false_case);
+ Visit(expr->else_expression());
+ // If control flow falls through Visit, merge it with true case here.
+ if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+ __ bind(&done);
+ }
+}
+
+
+void FastCodeGenerator::VisitSlot(Slot* expr) {
+ // Slots do not appear directly in the AST.
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitLiteral(Literal* expr) {
+ Comment cmnt(masm_, "[ Literal");
+ Apply(context_, expr);
+}
+
+
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->target()->AsProperty();
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ VisitForValue(prop->obj(), kStack);
+ break;
+ case KEYED_PROPERTY:
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ break;
+ }
+
+ // If we have a compound assignment: Get value of LHS expression and
+ // store in on top of the stack.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kStack;
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+ Expression::kValue);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ }
+ location_ = saved_location;
+ }
+
+ // Evaluate RHS expression.
+ Expression* rhs = expr->value();
+ VisitForValue(rhs, kAccumulator);
+
+ // If we have a compount assignment: Apply operator.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ location_ = saved_location;
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ context_);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+ // Call runtime routine to allocate the catch extension object and
+ // assign the exception value to the catch variable.
+ Comment cmnt(masm_, "[ CatchExtensionObject");
+ VisitForValue(expr->key(), kStack);
+ VisitForValue(expr->value(), kStack);
+ // Create catch extension object.
+ __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ Apply(context_, result_register());
+}
+
+
+void FastCodeGenerator::VisitThrow(Throw* expr) {
+ Comment cmnt(masm_, "[ Throw");
+ VisitForValue(expr->exception(), kStack);
+ __ CallRuntime(Runtime::kThrow, 1);
+ // Never returns here.
+}
+
+
+int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
+ // The macros used here must preserve the result register.
+ __ Drop(stack_depth);
+ __ PopTryHandler();
+ __ Call(finally_entry_);
+ return 0;
+}
+
+
+int FastCodeGenerator::TryCatch::Exit(int stack_depth) {
+ // The macros used here must preserve the result register.
+ __ Drop(stack_depth);
+ __ PopTryHandler();
+ return 0;
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/src/full-codegen.h b/src/fast-codegen.h
similarity index 92%
rename from src/full-codegen.h
rename to src/fast-codegen.h
index 35ed25f..c26e0f3 100644
--- a/src/full-codegen.h
+++ b/src/fast-codegen.h
@@ -35,35 +35,12 @@
namespace v8 {
namespace internal {
-class FullCodeGenSyntaxChecker: public AstVisitor {
- public:
- FullCodeGenSyntaxChecker() : has_supported_syntax_(true) {}
-
- void Check(FunctionLiteral* fun);
-
- bool has_supported_syntax() { return has_supported_syntax_; }
-
- private:
- void VisitDeclarations(ZoneList<Declaration*>* decls);
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- bool has_supported_syntax_;
-
- DISALLOW_COPY_AND_ASSIGN(FullCodeGenSyntaxChecker);
-};
-
-
// -----------------------------------------------------------------------------
-// Full code generator.
+// Fast code generator.
-class FullCodeGenerator: public AstVisitor {
+class FastCodeGenerator: public AstVisitor {
public:
- FullCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+ FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
: masm_(masm),
function_(NULL),
script_(script),
@@ -91,7 +68,7 @@
class NestedStatement BASE_EMBEDDED {
public:
- explicit NestedStatement(FullCodeGenerator* codegen) : codegen_(codegen) {
+ explicit NestedStatement(FastCodeGenerator* codegen) : codegen_(codegen) {
// Link into codegen's nesting stack.
previous_ = codegen->nesting_stack_;
codegen->nesting_stack_ = this;
@@ -129,14 +106,14 @@
protected:
MacroAssembler* masm() { return codegen_->masm(); }
private:
- FullCodeGenerator* codegen_;
+ FastCodeGenerator* codegen_;
NestedStatement* previous_;
DISALLOW_COPY_AND_ASSIGN(NestedStatement);
};
class Breakable : public NestedStatement {
public:
- Breakable(FullCodeGenerator* codegen,
+ Breakable(FastCodeGenerator* codegen,
BreakableStatement* break_target)
: NestedStatement(codegen),
target_(break_target) {}
@@ -155,7 +132,7 @@
class Iteration : public Breakable {
public:
- Iteration(FullCodeGenerator* codegen,
+ Iteration(FastCodeGenerator* codegen,
IterationStatement* iteration_statement)
: Breakable(codegen, iteration_statement) {}
virtual ~Iteration() {}
@@ -172,7 +149,7 @@
// The environment inside the try block of a try/catch statement.
class TryCatch : public NestedStatement {
public:
- explicit TryCatch(FullCodeGenerator* codegen, Label* catch_entry)
+ explicit TryCatch(FastCodeGenerator* codegen, Label* catch_entry)
: NestedStatement(codegen), catch_entry_(catch_entry) { }
virtual ~TryCatch() {}
virtual TryCatch* AsTryCatch() { return this; }
@@ -186,7 +163,7 @@
// The environment inside the try block of a try/finally statement.
class TryFinally : public NestedStatement {
public:
- explicit TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
+ explicit TryFinally(FastCodeGenerator* codegen, Label* finally_entry)
: NestedStatement(codegen), finally_entry_(finally_entry) { }
virtual ~TryFinally() {}
virtual TryFinally* AsTryFinally() { return this; }
@@ -202,7 +179,7 @@
// the block's parameters from the stack.
class Finally : public NestedStatement {
public:
- explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
+ explicit Finally(FastCodeGenerator* codegen) : NestedStatement(codegen) { }
virtual ~Finally() {}
virtual Finally* AsFinally() { return this; }
virtual int Exit(int stack_depth) {
@@ -219,7 +196,7 @@
// the block's temporary storage from the stack.
class ForIn : public Iteration {
public:
- ForIn(FullCodeGenerator* codegen,
+ ForIn(FastCodeGenerator* codegen,
ForInStatement* statement)
: Iteration(codegen, statement) { }
virtual ~ForIn() {}
@@ -245,10 +222,7 @@
// or on top of the stack) into the result expected according to an
// expression context.
void Apply(Expression::Context context, Register reg);
-
- // Slot cannot have type Slot::LOOKUP.
void Apply(Expression::Context context, Slot* slot);
-
void Apply(Expression::Context context, Literal* lit);
void ApplyTOS(Expression::Context context);
@@ -436,7 +410,7 @@
friend class NestedStatement;
- DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
+ DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 90f9dda..5c0aa0c 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -201,11 +201,6 @@
DEFINE_bool(use_big_map_space, true,
"Use big map space, but don't compact if it grew too big.")
-DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
- "Maximum number of pages in map space which still allows to encode "
- "forwarding pointers. That's actually a constant, but it's useful "
- "to control it with a flag for better testing.")
-
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
diff --git a/src/frames.h b/src/frames.h
index 19860ad..024065a 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -607,12 +607,11 @@
void Advance();
void Reset();
+ private:
static bool IsWithinBounds(
Address low_bound, Address high_bound, Address addr) {
return low_bound <= addr && addr <= high_bound;
}
-
- private:
bool IsValidStackAddress(Address addr) const {
return IsWithinBounds(low_bound_, high_bound_, addr);
}
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
deleted file mode 100644
index 0477ab9..0000000
--- a/src/full-codegen.cc
+++ /dev/null
@@ -1,1168 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "full-codegen.h"
-#include "stub-cache.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-#define BAILOUT(reason) \
- do { \
- if (FLAG_trace_bailout) { \
- PrintF("%s\n", reason); \
- } \
- has_supported_syntax_ = false; \
- return; \
- } while (false)
-
-
-#define CHECK_BAILOUT \
- do { \
- if (!has_supported_syntax_) return; \
- } while (false)
-
-
-void FullCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
- Scope* scope = fun->scope();
-
- if (scope->num_heap_slots() > 0) {
- // We support functions with a local context if they do not have
- // parameters that need to be copied into the context.
- for (int i = 0, len = scope->num_parameters(); i < len; i++) {
- Slot* slot = scope->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- BAILOUT("Function has context-allocated parameters.");
- }
- }
- }
-
- VisitDeclarations(scope->declarations());
- CHECK_BAILOUT;
-
- VisitStatements(fun->body());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDeclarations(
- ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); i++) {
- Visit(decls->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
- Property* prop = decl->proxy()->AsProperty();
- if (prop != NULL) {
- Visit(prop->obj());
- Visit(prop->key());
- }
-
- if (decl->fun() != NULL) {
- Visit(decl->fun());
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
- Visit(stmt->condition());
- CHECK_BAILOUT;
- Visit(stmt->then_statement());
- CHECK_BAILOUT;
- Visit(stmt->else_statement());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
- if (!FLAG_always_fast_compiler) BAILOUT("ForStatement");
- if (stmt->init() != NULL) {
- Visit(stmt->init());
- CHECK_BAILOUT;
- }
- if (stmt->cond() != NULL) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- }
- Visit(stmt->body());
- if (stmt->next() != NULL) {
- CHECK_BAILOUT;
- Visit(stmt->next());
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void FullCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->catch_block());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->finally_block());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* expr) {
- BAILOUT("FunctionBoilerplateLiteral");
-}
-
-
-void FullCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
- Visit(expr->condition());
- CHECK_BAILOUT;
- Visit(expr->then_expression());
- CHECK_BAILOUT;
- Visit(expr->else_expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void FullCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
- ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
-
- for (int i = 0, len = properties->length(); i < len; i++) {
- ObjectLiteral::Property* property = properties->at(i);
- if (property->IsCompileTimeValue()) continue;
- Visit(property->key());
- CHECK_BAILOUT;
- Visit(property->value());
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
- Expression* subexpr = subexprs->at(i);
- if (subexpr->AsLiteral() != NULL) continue;
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- Visit(subexpr);
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- Visit(expr->key());
- CHECK_BAILOUT;
- Visit(expr->value());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
- Token::Value op = expr->op();
- if (op == Token::INIT_CONST) BAILOUT("initialize constant");
-
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- if (var->mode() == Variable::CONST) BAILOUT("Assignment to const");
- // All other variables are supported.
- } else if (prop != NULL) {
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // This is a throw reference error.
- BAILOUT("non-variable/non-property assignment");
- }
-
- Visit(expr->value());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
- Visit(expr->exception());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitProperty(Property* expr) {
- Visit(expr->obj());
- CHECK_BAILOUT;
- Visit(expr->key());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCall(Call* expr) {
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- // Check for supported calls
- if (var != NULL && var->is_possibly_eval()) {
- BAILOUT("call to the identifier 'eval'");
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Calls to global variables are supported.
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- BAILOUT("call to a lookup slot");
- } else if (fun->AsProperty() != NULL) {
- Property* prop = fun->AsProperty();
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // Otherwise the call is supported if the function expression is.
- Visit(fun);
- }
- // Check all arguments to the call.
- for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
- Visit(expr->expression());
- CHECK_BAILOUT;
- ZoneList<Expression*>* args = expr->arguments();
- // Check all arguments to the call
- for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
- // Check for inline runtime call
- if (expr->name()->Get(0) == '_' &&
- CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
- BAILOUT("inlined runtime call");
- }
- // Check all arguments to the call. (Relies on TEMP meaning STACK.)
- for (int i = 0; i < expr->arguments()->length(); i++) {
- Visit(expr->arguments()->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::ADD:
- case Token::NOT:
- case Token::TYPEOF:
- case Token::VOID:
- Visit(expr->expression());
- break;
- case Token::BIT_NOT:
- BAILOUT("UnaryOperation: BIT_NOT");
- case Token::DELETE:
- BAILOUT("UnaryOperation: DELETE");
- case Token::SUB:
- BAILOUT("UnaryOperation: SUB");
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- Property* prop = expr->expression()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- // All global variables are supported.
- if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type == Slot::LOOKUP) {
- BAILOUT("CountOperation with lookup slot");
- }
- }
- } else if (prop != NULL) {
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // This is a throw reference error.
- BAILOUT("CountOperation non-variable/non-property expression");
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
- // Supported.
-}
-
-#undef BAILOUT
-#undef CHECK_BAILOUT
-
-
-#define __ ACCESS_MASM(masm())
-
-Handle<Code> FullCodeGenerator::MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval) {
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- int len = String::cast(script->source())->length();
- Counters::total_full_codegen_source_size.Increment(len);
- }
- CodeGenerator::MakeCodePrologue(fun);
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(NULL, kInitialBufferSize);
- FullCodeGenerator cgen(&masm, script, is_eval);
- cgen.Generate(fun);
- if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
- return Handle<Code>::null();
- }
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
- return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
-}
-
-
-int FullCodeGenerator::SlotOffset(Slot* slot) {
- ASSERT(slot != NULL);
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -slot->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- switch (slot->type()) {
- case Slot::PARAMETER:
- offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
- break;
- case Slot::LOCAL:
- offset += JavaScriptFrameConstants::kLocal0Offset;
- break;
- case Slot::CONTEXT:
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- return offset;
-}
-
-
-void FullCodeGenerator::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
- int length = declarations->length();
- int globals = 0;
- for (int i = 0; i < length; i++) {
- Declaration* decl = declarations->at(i);
- Variable* var = decl->proxy()->var();
- Slot* slot = var->slot();
-
- // If it was not possible to allocate the variable at compile
- // time, we need to "declare" it at runtime to make sure it
- // actually exists in the local context.
- if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
- VisitDeclaration(decl);
- } else {
- // Count global variables and functions for later processing
- globals++;
- }
- }
-
- // Compute array of global variable and function declarations.
- // Do nothing in case of no declared global functions or variables.
- if (globals > 0) {
- Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
- for (int j = 0, i = 0; i < length; i++) {
- Declaration* decl = declarations->at(i);
- Variable* var = decl->proxy()->var();
- Slot* slot = var->slot();
-
- if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
- array->set(j++, *(var->name()));
- if (decl->fun() == NULL) {
- if (var->mode() == Variable::CONST) {
- // In case this is const property use the hole.
- array->set_the_hole(j++);
- } else {
- array->set_undefined(j++);
- }
- } else {
- Handle<JSFunction> function =
- Compiler::BuildBoilerplate(decl->fun(), script_, this);
- // Check for stack-overflow exception.
- if (HasStackOverflow()) return;
- array->set(j++, *function);
- }
- }
- }
- // Invoke the platform-dependent code generator to do the actual
- // declaration the global variables and functions.
- DeclareGlobals(array);
- }
-}
-
-
-void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->start_position());
- }
-}
-
-
-void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->end_position());
- }
-}
-
-
-void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
- }
-}
-
-
-void FullCodeGenerator::SetStatementPosition(int pos) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, pos);
- }
-}
-
-
-void FullCodeGenerator::SetSourcePosition(int pos) {
- if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
- masm_->RecordPosition(pos);
- }
-}
-
-
-void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
- Label eval_right, done;
-
- // Set up the appropriate context for the left subexpression based
- // on the operation and our own context. Initially assume we can
- // inherit both true and false labels from our context.
- if (expr->op() == Token::OR) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- VisitForControl(expr->left(), &done, &eval_right);
- break;
- case Expression::kValue:
- VisitForValueControl(expr->left(),
- location_,
- &done,
- &eval_right);
- break;
- case Expression::kTest:
- VisitForControl(expr->left(), true_label_, &eval_right);
- break;
- case Expression::kValueTest:
- VisitForValueControl(expr->left(),
- location_,
- true_label_,
- &eval_right);
- break;
- case Expression::kTestValue:
- VisitForControl(expr->left(), true_label_, &eval_right);
- break;
- }
- } else {
- ASSERT_EQ(Token::AND, expr->op());
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- VisitForControl(expr->left(), &eval_right, &done);
- break;
- case Expression::kValue:
- VisitForControlValue(expr->left(),
- location_,
- &eval_right,
- &done);
- break;
- case Expression::kTest:
- VisitForControl(expr->left(), &eval_right, false_label_);
- break;
- case Expression::kValueTest:
- VisitForControl(expr->left(), &eval_right, false_label_);
- break;
- case Expression::kTestValue:
- VisitForControlValue(expr->left(),
- location_,
- &eval_right,
- false_label_);
- break;
- }
- }
-
- __ bind(&eval_right);
- Visit(expr->right());
-
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
- VisitStatements(stmt->statements());
- __ bind(nested_statement.break_target());
-}
-
-
-void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- VisitForEffect(stmt->expression());
-}
-
-
-void FullCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
- Comment cmnt(masm_, "[ EmptyStatement");
- SetStatementPosition(stmt);
-}
-
-
-void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
- Comment cmnt(masm_, "[ IfStatement");
- SetStatementPosition(stmt);
- Label then_part, else_part, done;
-
- // Do not worry about optimizing for empty then or else bodies.
- VisitForControl(stmt->condition(), &then_part, &else_part);
-
- __ bind(&then_part);
- Visit(stmt->then_statement());
- __ jmp(&done);
-
- __ bind(&else_part);
- Visit(stmt->else_statement());
-
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- Comment cmnt(masm_, "[ ContinueStatement");
- SetStatementPosition(stmt);
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (!current->IsContinueTarget(stmt->target())) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
-
- Iteration* loop = current->AsIteration();
- __ jmp(loop->continue_target());
-}
-
-
-void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- Comment cmnt(masm_, "[ BreakStatement");
- SetStatementPosition(stmt);
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (!current->IsBreakTarget(stmt->target())) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
-
- Breakable* target = current->AsBreakable();
- __ jmp(target->break_target());
-}
-
-
-void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
- Expression* expr = stmt->expression();
- VisitForValue(expr, kAccumulator);
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (current != NULL) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
-
- EmitReturnSequence(stmt->statement_pos());
-}
-
-
-void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
- Comment cmnt(masm_, "[ WithEnterStatement");
- SetStatementPosition(stmt);
-
- VisitForValue(stmt->expression(), kStack);
- if (stmt->is_catch_block()) {
- __ CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- __ CallRuntime(Runtime::kPushContext, 1);
- }
- // Both runtime calls return the new context in both the context and the
- // result registers.
-
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-}
-
-
-void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
- Comment cmnt(masm_, "[ WithExitStatement");
- SetStatementPosition(stmt);
-
- // Pop context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- Comment cmnt(masm_, "[ DoWhileStatement");
- SetStatementPosition(stmt);
- Label body, stack_limit_hit, stack_check_success;
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- __ bind(&body);
- Visit(stmt->body());
-
- // Check stack before looping.
- __ StackLimitCheck(&stack_limit_hit);
- __ bind(&stack_check_success);
-
- __ bind(loop_statement.continue_target());
- SetStatementPosition(stmt->condition_position());
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
-
- __ bind(&stack_limit_hit);
- StackCheckStub stack_stub;
- __ CallStub(&stack_stub);
- __ jmp(&stack_check_success);
-
- __ bind(loop_statement.break_target());
-
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- Comment cmnt(masm_, "[ WhileStatement");
- SetStatementPosition(stmt);
- Label body, stack_limit_hit, stack_check_success;
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- // Emit the test at the bottom of the loop.
- __ jmp(loop_statement.continue_target());
-
- __ bind(&body);
- Visit(stmt->body());
-
- __ bind(loop_statement.continue_target());
- // Check stack before looping.
- __ StackLimitCheck(&stack_limit_hit);
- __ bind(&stack_check_success);
-
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
-
- __ bind(&stack_limit_hit);
- StackCheckStub stack_stub;
- __ CallStub(&stack_stub);
- __ jmp(&stack_check_success);
-
- __ bind(loop_statement.break_target());
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
- Comment cmnt(masm_, "[ ForStatement");
- SetStatementPosition(stmt);
- Label test, body, stack_limit_hit, stack_check_success;
-
- Iteration loop_statement(this, stmt);
- if (stmt->init() != NULL) {
- Visit(stmt->init());
- }
-
- increment_loop_depth();
- // Emit the test at the bottom of the loop (even if empty).
- __ jmp(&test);
-
- __ bind(&body);
- Visit(stmt->body());
-
- __ bind(loop_statement.continue_target());
-
- SetStatementPosition(stmt);
- if (stmt->next() != NULL) {
- Visit(stmt->next());
- }
-
- __ bind(&test);
-
- // Check stack before looping.
- __ StackLimitCheck(&stack_limit_hit);
- __ bind(&stack_check_success);
-
- if (stmt->cond() != NULL) {
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
- } else {
- __ jmp(&body);
- }
-
- __ bind(&stack_limit_hit);
- StackCheckStub stack_stub;
- __ CallStub(&stack_stub);
- __ jmp(&stack_check_success);
-
- __ bind(loop_statement.break_target());
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Comment cmnt(masm_, "[ TryCatchStatement");
- SetStatementPosition(stmt);
- // The try block adds a handler to the exception handler chain
- // before entering, and removes it again when exiting normally.
- // If an exception is thrown during execution of the try block,
- // control is passed to the handler, which also consumes the handler.
- // At this point, the exception is in a register, and store it in
- // the temporary local variable (prints as ".catch-var") before
- // executing the catch block. The catch block has been rewritten
- // to introduce a new scope to bind the catch variable and to remove
- // that scope again afterwards.
-
- Label try_handler_setup, catch_entry, done;
- __ Call(&try_handler_setup);
- // Try handler code, exception in result register.
-
- // Store exception in local .catch variable before executing catch block.
- {
- // The catch variable is *always* a variable proxy for a local variable.
- Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(catch_var);
- Slot* variable_slot = catch_var->slot();
- ASSERT_NOT_NULL(variable_slot);
- ASSERT_EQ(Slot::LOCAL, variable_slot->type());
- StoreToFrameField(SlotOffset(variable_slot), result_register());
- }
-
- Visit(stmt->catch_block());
- __ jmp(&done);
-
- // Try block code. Sets up the exception handler chain.
- __ bind(&try_handler_setup);
- {
- TryCatch try_block(this, &catch_entry);
- __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
- Visit(stmt->try_block());
- __ PopTryHandler();
- }
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- Comment cmnt(masm_, "[ TryFinallyStatement");
- SetStatementPosition(stmt);
- // Try finally is compiled by setting up a try-handler on the stack while
- // executing the try body, and removing it again afterwards.
- //
- // The try-finally construct can enter the finally block in three ways:
- // 1. By exiting the try-block normally. This removes the try-handler and
- // calls the finally block code before continuing.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (break/continue/return). The site of the, e.g., break removes the
- // try handler and calls the finally block code before continuing
- // its outward control transfer.
- // 3. by exiting the try-block with a thrown exception.
- // This can happen in nested function calls. It traverses the try-handler
- // chain and consumes the try-handler entry before jumping to the
- // handler code. The handler code then calls the finally-block before
- // rethrowing the exception.
- //
- // The finally block must assume a return address on top of the stack
- // (or in the link register on ARM chips) and a value (return value or
- // exception) in the result register (rax/eax/r0), both of which must
- // be preserved. The return address isn't GC-safe, so it should be
- // cooked before GC.
- Label finally_entry;
- Label try_handler_setup;
-
- // Setup the try-handler chain. Use a call to
- // Jump to try-handler setup and try-block code. Use call to put try-handler
- // address on stack.
- __ Call(&try_handler_setup);
- // Try handler code. Return address of call is pushed on handler stack.
- {
- // This code is only executed during stack-handler traversal when an
- // exception is thrown. The execption is in the result register, which
- // is retained by the finally block.
- // Call the finally block and then rethrow the exception.
- __ Call(&finally_entry);
- __ push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
- }
-
- __ bind(&finally_entry);
- {
- // Finally block implementation.
- Finally finally_block(this);
- EnterFinallyBlock();
- Visit(stmt->finally_block());
- ExitFinallyBlock(); // Return to the calling code.
- }
-
- __ bind(&try_handler_setup);
- {
- // Setup try handler (stack pointer registers).
- TryFinally try_block(this, &finally_entry);
- __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
- Visit(stmt->try_block());
- __ PopTryHandler();
- }
- // Execute the finally block on the way out.
- __ Call(&finally_entry);
-}
-
-
-void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Comment cmnt(masm_, "[ DebuggerStatement");
- SetStatementPosition(stmt);
- __ CallRuntime(Runtime::kDebugBreak, 0);
- // Ignore the return value.
-#endif
-}
-
-
-void FullCodeGenerator::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FullCodeGenerator::VisitConditional(Conditional* expr) {
- Comment cmnt(masm_, "[ Conditional");
- Label true_case, false_case, done;
- VisitForControl(expr->condition(), &true_case, &false_case);
-
- __ bind(&true_case);
- Visit(expr->then_expression());
- // If control flow falls through Visit, jump to done.
- if (context_ == Expression::kEffect || context_ == Expression::kValue) {
- __ jmp(&done);
- }
-
- __ bind(&false_case);
- Visit(expr->else_expression());
- // If control flow falls through Visit, merge it with true case here.
- if (context_ == Expression::kEffect || context_ == Expression::kValue) {
- __ bind(&done);
- }
-}
-
-
-void FullCodeGenerator::VisitSlot(Slot* expr) {
- // Slots do not appear directly in the AST.
- UNREACHABLE();
-}
-
-
-void FullCodeGenerator::VisitLiteral(Literal* expr) {
- Comment cmnt(masm_, "[ Literal");
- Apply(context_, expr);
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() != Token::INIT_CONST);
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- VisitForValue(prop->obj(), kStack);
- break;
- case KEYED_PROPERTY:
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
- break;
- }
-
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kStack;
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
- Expression::kValue);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop);
- __ push(result_register());
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop);
- __ push(result_register());
- break;
- }
- location_ = saved_location;
- }
-
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- VisitForValue(rhs, kAccumulator);
-
- // If we have a compound assignment: Apply operator.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitBinaryOp(expr->binary_op(), Expression::kValue);
- location_ = saved_location;
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- context_);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- VisitForValue(expr->key(), kStack);
- VisitForValue(expr->value(), kStack);
- // Create catch extension object.
- __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- Apply(context_, result_register());
-}
-
-
-void FullCodeGenerator::VisitThrow(Throw* expr) {
- Comment cmnt(masm_, "[ Throw");
- VisitForValue(expr->exception(), kStack);
- __ CallRuntime(Runtime::kThrow, 1);
- // Never returns here.
-}
-
-
-int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
- // The macros used here must preserve the result register.
- __ Drop(stack_depth);
- __ PopTryHandler();
- __ Call(finally_entry_);
- return 0;
-}
-
-
-int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
- // The macros used here must preserve the result register.
- __ Drop(stack_depth);
- __ PopTryHandler();
- return 0;
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
diff --git a/src/handles.cc b/src/handles.cc
index 3156670..d551e21 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -681,18 +681,14 @@
bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
// Compile the source information to a code object.
Handle<SharedFunctionInfo> shared(function->shared());
- bool result = CompileLazyShared(shared, flag, 0);
- LOG(FunctionCreateEvent(*function));
- return result;
+ return CompileLazyShared(shared, flag, 0);
}
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) {
// Compile the source information to a code object.
Handle<SharedFunctionInfo> shared(function->shared());
- bool result = CompileLazyShared(shared, flag, 1);
- LOG(FunctionCreateEvent(*function));
- return result;
+ return CompileLazyShared(shared, flag, 1);
}
OptimizedObjectForAddingMultipleProperties::
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 3cb65ee..b615055 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -625,7 +625,8 @@
ConstructorHeapProfile js_cons_profile;
RetainerHeapProfile js_retainer_profile;
HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
CollectStats(obj, info);
js_cons_profile.CollectStats(obj);
js_retainer_profile.CollectStats(obj);
diff --git a/src/heap.cc b/src/heap.cc
index 6be1daf..fba2e87 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -76,8 +76,8 @@
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
#if defined(ANDROID)
-int Heap::max_semispace_size_ = 2*MB;
-int Heap::max_old_generation_size_ = 192*MB;
+int Heap::max_semispace_size_ = 512*KB;
+int Heap::max_old_generation_size_ = 128*MB;
int Heap::initial_semispace_size_ = 128*KB;
size_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64)
@@ -327,7 +327,7 @@
int Heap::SizeOfObjects() {
int total = 0;
AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ while (Space* space = spaces.next()) {
total += space->Size();
}
return total;
@@ -732,14 +732,13 @@
// do not expect them.
VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator code_it(Heap::code_space());
- for (HeapObject* object = code_it.next();
- object != NULL; object = code_it.next())
+ while (code_it.has_next()) {
+ HeapObject* object = code_it.next();
object->Iterate(&v);
+ }
HeapObjectIterator data_it(Heap::old_data_space());
- for (HeapObject* object = data_it.next();
- object != NULL; object = data_it.next())
- object->Iterate(&v);
+ while (data_it.has_next()) data_it.next()->Iterate(&v);
}
#endif
@@ -805,8 +804,8 @@
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
- for (HeapObject* cell = cell_iterator.next();
- cell != NULL; cell = cell_iterator.next()) {
+ while (cell_iterator.has_next()) {
+ HeapObject* cell = cell_iterator.next();
if (cell->IsJSGlobalPropertyCell()) {
Address value_address =
reinterpret_cast<Address>(cell) +
@@ -1014,15 +1013,13 @@
void Heap::RebuildRSets(PagedSpace* space) {
HeapObjectIterator it(space);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- Heap::UpdateRSet(obj);
+ while (it.has_next()) Heap::UpdateRSet(it.next());
}
void Heap::RebuildRSets(LargeObjectSpace* space) {
LargeObjectIterator it(space);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- Heap::UpdateRSet(obj);
+ while (it.has_next()) Heap::UpdateRSet(it.next());
}
@@ -1206,7 +1203,7 @@
map->set_code_cache(empty_fixed_array());
map->set_unused_property_fields(0);
map->set_bit_field(0);
- map->set_bit_field2(1 << Map::kIsExtensible);
+ map->set_bit_field2(0);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -3109,8 +3106,7 @@
if (!HasBeenSetup()) return;
Top::PrintStack();
AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
- space->Print();
+ while (Space* space = spaces.next()) space->Print();
}
@@ -3344,11 +3340,6 @@
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
- IterateWeakRoots(v, mode);
-}
-
-
-void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
if (mode != VISIT_ALL_IN_SCAVENGE) {
@@ -3403,20 +3394,6 @@
// Iterate over pointers being held by inactive threads.
ThreadManager::Iterate(v);
v->Synchronize("threadmanager");
-
- // Iterate over the pointers the Serialization/Deserialization code is
- // holding.
- // During garbage collection this keeps the partial snapshot cache alive.
- // During deserialization of the startup snapshot this creates the partial
- // snapshot cache and deserializes the objects it refers to. During
- // serialization this does nothing, since the partial snapshot cache is
- // empty. However the next thing we do is create the partial snapshot,
- // filling up the partial snapshot cache with objects it needs as we go.
- SerializerDeserializer::Iterate(v);
- // We don't do a v->Synchronize call here, because in debug mode that will
- // output a flag to the snapshot. However at this point the serializer and
- // deserializer are deliberately a little unsynchronized (see above) so the
- // checking of the sync flag in the snapshot would fail.
}
@@ -3567,8 +3544,7 @@
// Initialize map space.
map_space_ = new MapSpace(FLAG_use_big_map_space
? max_old_generation_size_
- : MapSpace::kMaxMapPageIndex * Page::kPageSize,
- FLAG_max_map_space_pages,
+ : (MapSpace::kMaxMapPageIndex + 1) * Page::kPageSize,
MAP_SPACE);
if (map_space_ == NULL) return false;
if (!map_space_->Setup(NULL, 0)) return false;
@@ -3671,8 +3647,7 @@
void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
- for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
- space->Shrink();
+ while (PagedSpace* space = spaces.next()) space->Shrink();
}
@@ -3681,8 +3656,7 @@
void Heap::Protect() {
if (HasBeenSetup()) {
AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
- space->Protect();
+ while (Space* space = spaces.next()) space->Protect();
}
}
@@ -3690,8 +3664,7 @@
void Heap::Unprotect() {
if (HasBeenSetup()) {
AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
- space->Unprotect();
+ while (Space* space = spaces.next()) space->Unprotect();
}
}
@@ -3863,25 +3836,34 @@
}
-HeapObject* HeapIterator::next() {
+bool HeapIterator::has_next() {
// No iterator means we are done.
- if (object_iterator_ == NULL) return NULL;
+ if (object_iterator_ == NULL) return false;
- if (HeapObject* obj = object_iterator_->next_object()) {
+ if (object_iterator_->has_next_object()) {
// If the current iterator has more objects we are fine.
- return obj;
+ return true;
} else {
// Go though the spaces looking for one that has objects.
while (space_iterator_->has_next()) {
object_iterator_ = space_iterator_->next();
- if (HeapObject* obj = object_iterator_->next_object()) {
- return obj;
+ if (object_iterator_->has_next_object()) {
+ return true;
}
}
}
// Done with the last space.
object_iterator_ = NULL;
- return NULL;
+ return false;
+}
+
+
+HeapObject* HeapIterator::next() {
+ if (has_next()) {
+ return object_iterator_->next_object();
+ } else {
+ return NULL;
+ }
}
diff --git a/src/heap.h b/src/heap.h
index 0dd20c0..1f04444 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -690,8 +690,6 @@
static void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over all the other roots in the heap.
- static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
// Iterates remembered set of an old space.
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
@@ -1292,6 +1290,7 @@
explicit HeapIterator();
virtual ~HeapIterator();
+ bool has_next();
HeapObject* next();
void reset();
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index dc017ae..2cf469a 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -860,24 +860,6 @@
}
-void Assembler::cmpb(const Operand& dst, Register src) {
- ASSERT(src.is_byte_register());
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x38);
- emit_operand(src, dst);
-}
-
-
-void Assembler::cmpb(Register dst, const Operand& src) {
- ASSERT(dst.is_byte_register());
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x3A);
- emit_operand(dst, src);
-}
-
-
void Assembler::cmpw(const Operand& op, Immediate imm16) {
ASSERT(imm16.is_int16());
EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index da27fd0..d675ecf 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -559,8 +559,6 @@
void and_(const Operand& dst, const Immediate& x);
void cmpb(const Operand& op, int8_t imm8);
- void cmpb(Register src, const Operand& dst);
- void cmpb(const Operand& dst, Register src);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 121e155..240f4da 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -639,22 +639,15 @@
return frame_->Pop();
}
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+ : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
+ cgen_->UnloadReference(this);
}
@@ -704,7 +697,6 @@
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
- ref->set_unloaded();
}
@@ -751,12 +743,6 @@
class FloatingPointHelper : public AllStatic {
public:
-
- enum ArgLocation {
- ARGS_ON_STACK,
- ARGS_IN_REGISTERS
- };
-
// Code pattern for loading a floating point value. Input value must
// be either a smi or a heap number object (fp value). Requirements:
// operand in register number. Returns operand as floating point number
@@ -764,16 +750,9 @@
static void LoadFloatOperand(MacroAssembler* masm, Register number);
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
+ // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+ // floating point numbers on FPU stack.
+ static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -789,11 +768,7 @@
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
+ static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
};
@@ -938,6 +913,31 @@
return;
}
+ // Set the flags based on the operation, type and loop nesting level.
+ GenericBinaryFlags flags;
+ switch (op) {
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ flags = (loop_nesting() > 0)
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
+ break;
+
+ default:
+ // By default only inline the Smi check code for likely smis if this
+ // operation is part of a loop.
+ flags = ((loop_nesting() > 0) && type->IsLikelySmi())
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
+ break;
+ }
+
Result right = frame_->Pop();
Result left = frame_->Pop();
@@ -971,6 +971,7 @@
bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+ bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
if (left_is_smi && right_is_smi) {
// Compute the constant result at compile time, and leave it on the frame.
@@ -979,31 +980,33 @@
if (FoldConstantSmis(op, left_int, right_int)) return;
}
- Result answer;
if (left_is_non_smi || right_is_non_smi) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ // Set flag so that we go straight to the slow case, with no smi code.
+ generate_no_smi_code = true;
} else if (right_is_smi) {
- answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode);
+ ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
+ return;
} else if (left_is_smi) {
- answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
- }
+ ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
+ return;
}
- frame_->Push(&answer);
+
+ if (((flags & NO_SMI_CODE_IN_STUB) != 0) && !generate_no_smi_code) {
+ LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ // If we know the arguments aren't smis, use the binary operation stub
+ // that does not check for the fast smi case.
+ if (generate_no_smi_code) {
+ flags = NO_SMI_CODE_IN_STUB;
+ }
+ GenericBinaryOpStub stub(op, overwrite_mode, flags);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+ }
}
@@ -1090,11 +1093,10 @@
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- Result answer;
+void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
// We need eax as the quotient register, edx as the remainder
@@ -1216,7 +1218,7 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- answer = quotient;
+ frame_->Push("ient);
} else {
ASSERT(op == Token::MOD);
// Check for a negative zero result. If the result is zero, and
@@ -1232,10 +1234,9 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- answer = remainder;
+ frame_->Push(&remainder);
}
- ASSERT(answer.is_valid());
- return answer;
+ return;
}
// Special handling of shift operations because they use fixed
@@ -1256,7 +1257,7 @@
frame_->Spill(ecx);
// Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
+ Result answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
@@ -1320,8 +1321,8 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
+ frame_->Push(&answer);
+ return;
}
// Handle the other binary operations.
@@ -1330,7 +1331,7 @@
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
- answer = allocator_->Allocate();
+ Result answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Perform the smi tag check.
@@ -1352,12 +1353,12 @@
__ mov(answer.reg(), left->reg());
switch (op) {
case Token::ADD:
- __ add(answer.reg(), Operand(right->reg()));
+ __ add(answer.reg(), Operand(right->reg())); // Add optimistically.
deferred->Branch(overflow);
break;
case Token::SUB:
- __ sub(answer.reg(), Operand(right->reg()));
+ __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically.
deferred->Branch(overflow);
break;
@@ -1405,8 +1406,7 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
+ frame_->Push(&answer);
}
@@ -1575,34 +1575,36 @@
}
-Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> value,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode) {
+void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
// Consumes the argument "operand".
+
// TODO(199): Optimize some special cases of operations involving a
// smi literal (multiply by 2, shift by 0, etc.).
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ overwrite_mode);
} else {
- return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ overwrite_mode);
}
+ ASSERT(!operand->is_valid());
+ return;
}
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
- Result answer;
switch (op) {
case Token::ADD: {
operand->ToRegister();
@@ -1625,12 +1627,13 @@
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
break;
}
case Token::SUB: {
DeferredCode* deferred = NULL;
+ Result answer; // Only allocate a new register if reversed.
if (reversed) {
// The reversed case is only hit when the right operand is not a
// constant.
@@ -1658,14 +1661,15 @@
deferred->Branch(not_zero);
deferred->BindExit();
operand->Unuse();
+ frame_->Push(&answer);
break;
}
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1685,21 +1689,21 @@
__ and_(operand->reg(), ~kSmiTagMask);
}
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
}
break;
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
operand->ToRegister();
- answer = allocator()->Allocate();
+ Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -1720,6 +1724,7 @@
operand->Unuse();
__ SmiTag(answer.reg());
deferred->BindExit();
+ frame_->Push(&answer);
}
break;
@@ -1744,7 +1749,7 @@
}
operand->Unuse();
- answer = allocator()->Allocate();
+ Result answer = allocator()->Allocate();
DeferredInlineSmiOperationReversed* deferred =
new DeferredInlineSmiOperationReversed(op,
answer.reg(),
@@ -1760,6 +1765,7 @@
__ SmiTag(answer.reg());
deferred->BindExit();
+ frame_->Push(&answer);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1777,10 +1783,10 @@
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
} else {
// Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
+ Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -1802,6 +1808,7 @@
deferred->Branch(overflow);
deferred->BindExit();
operand->Unuse();
+ frame_->Push(&answer);
}
}
break;
@@ -1840,7 +1847,7 @@
}
}
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
break;
}
@@ -1866,7 +1873,7 @@
__ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
}
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
break;
}
// Fall through if we did not find a power of 2 on the right hand side!
@@ -1874,17 +1881,16 @@
default: {
Result constant_operand(value);
if (reversed) {
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
- answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
}
break;
}
}
- ASSERT(answer.is_valid());
- return answer;
+ ASSERT(!operand->is_valid());
}
@@ -2305,29 +2311,20 @@
}
-void CodeGenerator::CallApplyLazy(Expression* applicand,
+void CodeGenerator::CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
- // Load applicand.apply onto the stack. This will usually
+ JumpTarget slow, done;
+
+ // Load the apply function onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
- frame()->Push(name);
- Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- __ nop();
- frame()->Push(&answer);
+ Reference ref(this, apply);
+ ref.GetValue();
+ ASSERT(ref.type() == Reference::NAMED);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
@@ -2337,11 +2334,6 @@
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
- // Contents of frame at this point:
- // Frame[0]: arguments object of the current function or the hole.
- // Frame[1]: receiver
- // Frame[2]: applicand.apply
- // Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
@@ -2349,151 +2341,143 @@
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
- { VirtualFrame::SpilledScope spilled_scope;
- Label slow, done;
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
- probe.Unuse();
- __ j(not_equal, &slow);
- }
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ probe.Unuse();
+ slow.Branch(not_equal);
+ }
- if (try_lazy) {
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop(); // Can be called on a spilled frame.
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // esp[0]: receiver
- // esp[1]: applicand.apply
- // esp[2]: applicand.
+ if (try_lazy) {
+ JumpTarget build_args;
- // Check that the receiver really is a JavaScript object.
- __ mov(eax, Operand(esp, 0));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
+ // Get rid of the arguments object probe.
+ frame_->Drop();
+
+ // Before messing with the execution stack, we sync all
+ // elements. This is bound to happen anyway because we're
+ // about to call a function.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ // Check that the receiver really is a JavaScript object.
+ { frame_->PushElementAt(0);
+ Result receiver = frame_->Pop();
+ receiver.ToRegister();
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ Result tmp = allocator_->Allocate();
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &build_args);
- __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
- Immediate(apply_code));
- __ j(not_equal, &build_args);
-
- // Check that applicand is a function.
- __ mov(edi, Operand(esp, 2 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(eax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(eax);
- __ mov(ecx, Operand(eax));
- __ cmp(eax, kArgumentsLimit);
- __ j(above, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // ecx is a small non-negative integer, due to the test above.
- __ test(ecx, Operand(ecx));
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ push(eax);
- // Stack now has 1 element:
- // esp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // esp[0]: receiver
- // esp[1]: applicand.apply
- // esp[2]: applicand.
-
- // StoreArgumentsObject requires a correct frame, and may modify it.
- Result arguments_object = StoreArgumentsObject(false);
- frame_->SpillAll();
- arguments_object.ToRegister();
- frame_->EmitPush(arguments_object.reg());
- arguments_object.Unuse();
- // Stack and frame now have 4 elements.
- __ bind(&slow);
+ __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
+ build_args.Branch(less);
}
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
+ // Verify that we're invoking Function.prototype.apply.
+ { frame_->PushElementAt(1);
+ Result apply = frame_->Pop();
+ apply.ToRegister();
+ __ test(apply.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ Result tmp = allocator_->Allocate();
+ __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
+ build_args.Branch(not_equal);
+ __ mov(tmp.reg(),
+ FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+ Immediate(apply_code));
+ build_args.Branch(not_equal);
+ }
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(1); // Drop the receiver as well.
- res.ToRegister();
- frame_->EmitPush(res.reg());
- // Stack now has 1 element:
- // esp[0]: result
- if (try_lazy) __ bind(&done);
- } // End of spilled scope.
- // Restore the context register after a call.
+ // Get the function receiver from the stack. Check that it
+ // really is a function.
+ __ mov(edi, Operand(esp, 2 * kPointerSize));
+ __ test(edi, Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ build_args.Branch(not_equal);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ mov(eax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(eax);
+ __ mov(ecx, Operand(eax));
+ __ cmp(eax, kArgumentsLimit);
+ build_args.Branch(above);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ __ bind(&loop);
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &invoke);
+ __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
+ __ dec(ecx);
+ __ jmp(&loop);
+
+ // Invoke the function. The virtual frame knows about the receiver
+ // so make sure to forget that explicitly.
+ __ bind(&invoke);
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ frame_->Forget(1);
+ Result result = allocator()->Allocate(eax);
+ frame_->SetElementAt(0, &result);
+ done.Jump();
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // Function.prototype.apply.
+ build_args.Bind();
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->Push(&arguments_object);
+ slow.Bind();
+ }
+
+ // Flip the apply function and the function to call on the stack, so
+ // the function looks like the receiver of the apply call. This way,
+ // the generic Function.prototype.apply implementation can deal with
+ // the call like it usually does.
+ Result a2 = frame_->Pop();
+ Result a1 = frame_->Pop();
+ Result ap = frame_->Pop();
+ Result fn = frame_->Pop();
+ frame_->Push(&ap);
+ frame_->Push(&fn);
+ frame_->Push(&a1);
+ frame_->Push(&a2);
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+ Result res = frame_->CallStub(&call_function, 3);
+ frame_->Push(&res);
+
+ // All done. Restore context register after call.
+ if (try_lazy) done.Bind();
frame_->RestoreContextRegister();
}
@@ -3533,13 +3517,17 @@
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2);
- } else {
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
+ }
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
+ if (each.size() > 0) {
+ // It's safe to pop the value lying on top of the reference before
+ // unloading the reference itself (which preserves the top of stack,
+ // ie, now the topmost value of the non-zero sized reference), since
+ // we will discard the top of stack after unloading the reference
+ // anyway.
frame_->Drop();
}
}
@@ -3547,6 +3535,10 @@
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
+ // Discard the i'th entry pushed above or else the remainder of the
+ // reference, whichever is currently on top of the stack.
+ frame_->Drop();
+
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -4596,12 +4588,9 @@
void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target(), node->is_compound());
+ { Reference target(this, node->target());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -4623,27 +4612,12 @@
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
- if (node->ends_initialization_block()) {
- // Add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
- if (target.type() == Reference::NAMED) {
- frame_->Dup();
- // Dup target receiver on stack.
- } else {
- ASSERT(target.type() == Reference::KEYED);
- Result temp = frame_->Pop();
- frame_->Dup();
- frame_->Push(&temp);
- }
- }
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
- } else { // Assignment is a compound assignment.
+ } else {
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
@@ -4669,7 +4643,6 @@
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
- UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -4681,20 +4654,17 @@
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::UNLOADED);
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
// End of initialization block. Revert to fast case. The
- // argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment.
- // Swap the receiver and the value of the assignment expression.
- Result lhs = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&lhs);
- frame_->Push(&receiver);
+ // argument to the runtime call is the receiver, which is the
+ // first value pushed as part of the reference, which is below
+ // the lhs value.
+ frame_->PushElementAt(target.size());
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
}
- ASSERT(frame_->height() == original_height + 1);
}
@@ -4857,7 +4827,7 @@
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
+ CallApplyLazy(property,
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
@@ -4890,21 +4860,16 @@
// -------------------------------------------
// Load the function to call from the property through a reference.
+ Reference ref(this, property);
+ ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
- Reference ref(this, property);
- ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
- Load(property->obj());
- Load(property->key());
- Result function = EmitKeyedLoad(false);
- frame_->Drop(); // Key.
- Result receiver = frame_->Pop();
- frame_->Push(&function);
- frame_->Push(&receiver);
+ // The reference's size is non-negative.
+ frame_->PushElementAt(ref.size());
}
// Call the function.
@@ -5218,26 +5183,6 @@
}
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(),
- FieldOperand(temp.reg(), Map::kBitFieldOffset));
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
- obj.Unuse();
- temp.Unuse();
- destination()->Split(not_zero);
-}
-
-
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -5815,9 +5760,7 @@
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
- // A constant reference is not saved to, so a constant reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
+ { Reference target(this, node->expression());
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -6420,114 +6363,6 @@
}
-Result CodeGenerator::EmitKeyedLoad(bool is_global) {
- Comment cmnt(masm_, "[ Load from keyed Property");
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (loop_nesting() > 0) {
- Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- // Use a fresh temporary for the index and later the loaded
- // value.
- Result index = allocator()->Allocate();
- ASSERT(index.is_valid());
-
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(index.reg(),
- receiver.reg(),
- key.reg(),
- is_global);
-
- // Check that the receiver is not a smi (only needed if this
- // is not a load from the global context) and that it has the
- // expected map.
- if (!is_global) {
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
- }
-
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
- deferred->Branch(not_equal);
-
- // Check that the key is a smi.
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
-
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ mov(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ mov(index.reg(), key.reg());
- __ SmiUntag(index.reg());
- __ cmp(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is eax, the we can reuse that one because the value
- // coming from the deferred code will be in eax.
- Result value = index;
- __ mov(value.reg(), Operand(elements.reg(),
- index.reg(),
- times_4,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
- index.Unuse();
- __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- // Restore the receiver and key to the frame and push the
- // result on top of it.
- frame_->Push(&receiver);
- frame_->Push(&key);
- return value;
- } else {
- Comment cmnt(masm_, "[ Load from keyed Property");
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = frame_->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- return answer;
- }
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -6640,21 +6475,121 @@
}
case KEYED: {
+ Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
- Result value = cgen_->EmitKeyedLoad(is_global);
- cgen_->frame()->Push(&value);
+
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+ if (cgen_->loop_nesting() > 0) {
+ Comment cmnt(masm, "[ Inlined load from keyed Property");
+
+ Result key = cgen_->frame()->Pop();
+ Result receiver = cgen_->frame()->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = cgen_->allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = cgen_->allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching.
+ masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::null_value()));
+ deferred->Branch(not_equal);
+
+ // Check that the key is a smi.
+ __ test(key.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ mov(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ mov(index.reg(), key.reg());
+ __ SmiUntag(index.reg());
+ __ cmp(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is eax, the we can reuse that one because the value
+ // coming from the deferred code will be in eax.
+ Result value = index;
+ __ mov(value.reg(), Operand(elements.reg(),
+ index.reg(),
+ times_4,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&key);
+ cgen_->frame()->Push(&value);
+
+ } else {
+ Comment cmnt(masm, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ }
break;
}
default:
UNREACHABLE();
}
-
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
}
@@ -6688,9 +6623,6 @@
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
-
- ASSERT(persist_after_get_);
- // Do not unload the reference, because it is used in SetValue.
}
@@ -6820,7 +6752,6 @@
default:
UNREACHABLE();
}
- cgen_->UnloadReference(this);
}
@@ -7131,335 +7062,143 @@
}
-Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right) {
- if (ArgsInRegistersSupported()) {
- SetArgsInRegisters();
- return frame->CallStub(this, left, right);
- } else {
- frame->Push(left);
- frame->Push(right);
- return frame->CallStub(this, 2);
- }
-}
-
-
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = eax;
- right = ebx;
- if (HasArgsInRegisters()) {
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ mov(right, Operand(esp, 1 * kPointerSize));
- __ mov(left, Operand(esp, 2 * kPointerSize));
- }
+ // Perform fast-case smi code for the operation (eax <op> ebx) and
+ // leave result in register eax.
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
+ // Prepare the smi check of both operands by or'ing them together
+ // before checking against the smi mask.
+ __ mov(ecx, Operand(ebx));
+ __ or_(ecx, Operand(eax));
+
switch (op_) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
- combined = right;
+ case Token::ADD:
+ __ add(eax, Operand(ebx)); // add optimistically
+ __ j(overflow, slow, not_taken);
break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
case Token::SUB:
- case Token::MUL:
+ __ sub(eax, Operand(ebx)); // subtract optimistically
+ __ j(overflow, slow, not_taken);
+ break;
+
case Token::DIV:
case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, Operand(left));
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left));
- combined = right;
+ // Sign extend eax into edx:eax.
+ __ cdq();
+ // Check for 0 divisor.
+ __ test(ebx, Operand(ebx));
+ __ j(zero, slow, not_taken);
break;
default:
+ // Fall-through to smi check.
break;
}
- // 3. Perform the smi check of the operands.
- ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smis, not_taken);
+ // Perform the actual smi check.
+ ASSERT(kSmiTag == 0); // adjust zero check if not the case
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, slow, not_taken);
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
switch (op_) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- break;
-
case Token::SUB:
- __ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
- __ mov(eax, left);
+ // Do nothing here.
break;
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
+ ASSERT(kSmiTag == 0); // adjust code below if not the case
// Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
+ __ SmiUntag(eax);
// Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
+ __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
+ // Go slow on overflows.
+ __ j(overflow, slow, not_taken);
+ // Check for negative zero result.
+ __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
break;
case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
+ // Divide edx:eax by ebx.
+ __ idiv(ebx);
+ // Check for the corner case of dividing the most negative smi
+ // by -1. We cannot use the overflow flag, since it is not set
+ // by idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
+ __ j(equal, slow);
+ // Check for negative zero result.
+ __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
// Check that the remainder is zero.
__ test(edx, Operand(edx));
- __ j(not_zero, &use_fp_on_smis);
+ __ j(not_zero, slow);
// Tag the result and store it in register eax.
__ SmiTag(eax);
break;
case Token::MOD:
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, ¬_smis, not_taken);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
+ // Divide edx:eax by ebx.
+ __ idiv(ebx);
+ // Check for negative zero result.
+ __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y
// Move remainder to register eax.
- __ mov(eax, edx);
+ __ mov(eax, Operand(edx));
+ break;
+
+ case Token::BIT_OR:
+ __ or_(eax, Operand(ebx));
+ break;
+
+ case Token::BIT_AND:
+ __ and_(eax, Operand(ebx));
+ break;
+
+ case Token::BIT_XOR:
+ __ xor_(eax, Operand(ebx));
+ break;
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ // Move the second operand into register ecx.
+ __ mov(ecx, Operand(ebx));
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ switch (op_) {
+ case Token::SAR:
+ __ sar_cl(eax);
+ // No checks of result necessary
+ break;
+ case Token::SHR:
+ __ shr_cl(eax);
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, slow, not_taken);
+ break;
+ case Token::SHL:
+ __ shl_cl(eax);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(sign, slow, not_taken);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // Tag the result and store it in register eax.
+ __ SmiTag(eax);
break;
default:
UNREACHABLE();
- }
-
- // 5. Emit return of result in eax.
- GenerateReturn(masm);
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::SHL: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- // It's OK to overwrite the right argument on the stack because we
- // are about to return.
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op_) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, Operand(left));
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, Operand(right));
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- GenerateReturn(masm);
- break;
- }
-
- default:
- break;
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(¬_smis);
- switch (op_) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
break;
}
}
@@ -7474,20 +7213,30 @@
// case smi code is not generated by the caller. Generating it here will speed
// up common operations.
if (HasSmiCodeInStub()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
- GenerateLoadArguments(masm);
+ Label slow;
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ GenerateSmiCode(masm, &slow);
+ GenerateReturn(masm);
+ // Too bad. The fast case smi code didn't succeed.
+ __ bind(&slow);
}
+ // Make sure the arguments are in edx and eax.
+ GenerateLoadArguments(masm);
+
// Floating point case.
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
+ // eax: y
+ // edx: x
+
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+ FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -7496,15 +7245,59 @@
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ __ mov(eax, Operand(edx));
+ // Fall through!
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE: {
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
+ __ bind(&skip_allocation);
+ break;
+ }
+ default: UNREACHABLE();
+ }
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ __ mov(eax, Operand(edx));
+ // Fall through!
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
switch (op_) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
@@ -7512,13 +7305,8 @@
case Token::DIV: __ fdivp(1); break;
default: UNREACHABLE();
}
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
GenerateReturn(masm);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
}
}
case Token::MOD: {
@@ -7531,8 +7319,12 @@
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
+ Label non_smi_result, skip_allocation;
+ Label operand_conversion_failure;
+ FloatingPointHelper::LoadAsIntegers(
+ masm,
+ use_sse3_,
+ &operand_conversion_failure);
switch (op_) {
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
@@ -7545,7 +7337,7 @@
if (op_ == Token::SHR) {
// Check if result is non-negative and fits in a smi.
__ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
+ __ j(not_zero, &non_smi_result);
} else {
// Check if result fits in a smi.
__ cmp(eax, 0xc0000000);
@@ -7560,7 +7352,6 @@
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result
- Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
@@ -7589,6 +7380,15 @@
}
GenerateReturn(masm);
}
+
+ // Go to runtime for non-number inputs.
+ __ bind(&operand_conversion_failure);
+ // SHR should return uint32 - go to runtime for non-smi/negative result.
+ if (op_ == Token::SHR) {
+ __ bind(&non_smi_result);
+ }
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
break;
}
default: UNREACHABLE(); break;
@@ -7598,9 +7398,9 @@
// result. If arguments was passed in registers now place them on the
// stack in the correct order below the return address.
__ bind(&call_runtime);
- if (HasArgsInRegisters()) {
+ if (HasArgumentsInRegisters()) {
__ pop(ecx);
- if (HasArgsReversed()) {
+ if (HasArgumentsReversed()) {
__ push(eax);
__ push(edx);
} else {
@@ -7614,15 +7414,17 @@
// Test for string arguments before calling runtime.
Label not_strings, not_string1, string1;
Result answer;
- __ test(edx, Immediate(kSmiTagMask));
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, ¬_string1);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
__ j(above_equal, ¬_string1);
- // First argument is a string, test second.
- __ test(eax, Immediate(kSmiTagMask));
+ // First argument is a a string, test second.
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &string1);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, &string1);
// First and second argument are strings. Jump to the string add stub.
@@ -7631,25 +7433,17 @@
// Only first argument is a string.
__ bind(&string1);
- __ InvokeBuiltin(
- HasArgsReversed() ?
- Builtins::STRING_ADD_RIGHT :
- Builtins::STRING_ADD_LEFT,
- JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(¬_string1);
- __ test(eax, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, ¬_strings);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, ¬_strings);
// Only second argument is a string.
- __ InvokeBuiltin(
- HasArgsReversed() ?
- Builtins::STRING_ADD_LEFT :
- Builtins::STRING_ADD_RIGHT,
- JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
__ bind(¬_strings);
// Neither argument is a string.
@@ -7661,7 +7455,7 @@
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
+ break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
@@ -7692,57 +7486,9 @@
}
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, Operand(ebx));
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, Operand(edx));
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
// If arguments are not passed in registers read them from the stack.
- if (!HasArgsInRegisters()) {
+ if (!HasArgumentsInRegisters()) {
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
}
@@ -7752,7 +7498,7 @@
void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
// If arguments are not passed in registers remove them from the stack before
// returning.
- if (!HasArgsInRegisters()) {
+ if (!HasArgumentsInRegisters()) {
__ ret(2 * kPointerSize); // Remove both operands
} else {
__ ret(0);
@@ -7768,7 +7514,6 @@
Register source,
bool use_sse3,
Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
@@ -7971,7 +7716,7 @@
}
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
+void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers.
@@ -8003,40 +7748,16 @@
}
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, Operand(scratch));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, Operand(scratch));
-}
-
-
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
+ Register scratch) {
Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
__ test(scratch, Immediate(kSmiTagMask));
__ j(zero, &load_smi_1, not_taken);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ bind(&done_load_1);
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
__ test(scratch, Immediate(kSmiTagMask));
__ j(zero, &load_smi_2, not_taken);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
@@ -8059,24 +7780,6 @@
}
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -8411,24 +8114,10 @@
// esp[12]: subject string
// esp[16]: JSRegExp object
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime, invoke_regexp;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, Operand(ebx));
- __ j(zero, &runtime, not_taken);
+ Label runtime;
// Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, kJSRegExpOffset));
+ __ mov(eax, Operand(esp, 4 * kPointerSize));
ASSERT_EQ(0, kSmiTag);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -8464,7 +8153,7 @@
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the second argument is a string.
- __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
@@ -8476,7 +8165,7 @@
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the third argument is a positive smi.
- __ mov(eax, Operand(esp, kPreviousIndexOffset));
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
__ j(not_zero, &runtime);
// Check that it is not greater than the subject string length.
@@ -8487,7 +8176,7 @@
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
@@ -8505,74 +8194,38 @@
__ j(greater, &runtime);
// ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string, seq_two_byte_string, check_code;
- const int kStringRepresentationEncodingMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- __ mov(eax, Operand(esp, kSubjectOffset));
+ // Check the representation and encoding of the subject string (only support
+ // flat ascii strings).
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ and_(ebx, kStringRepresentationEncodingMask);
- // First check for sequential string.
- ASSERT_EQ(0, kStringTag);
- ASSERT_EQ(0, kSeqStringTag);
- __ test(Operand(ebx),
- Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_string);
-
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string.
- __ mov(edx, ebx);
- __ and_(edx, kStringRepresentationMask);
- __ cmp(edx, kConsStringTag);
+ __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
__ j(not_equal, &runtime);
- __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
- __ cmp(Operand(edx), Immediate(Handle<String>(Heap::empty_string())));
- __ j(not_equal, &runtime);
- __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ and_(ebx, kStringRepresentationEncodingMask);
- __ bind(&seq_string);
- // eax: subject string (sequential either ascii to two byte)
- // ebx: suject string type & kStringRepresentationEncodingMask
+ // ecx: RegExp data (FixedArray)
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(eax, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(eax, Operand(eax));
+ __ j(zero, &runtime, not_taken);
+
// ecx: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
- // it has, the field contains a code object otherwise it contains the hole.
- __ cmp(ebx, kStringTag | kSeqStringTag | kTwoByteStringTag);
- __ j(equal, &seq_two_byte_string);
-#ifdef DEBUG
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ Check(equal, "Expected sequential ascii string");
-#endif
+ // it has the field contains a code object otherwise it contains the hole.
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(edi, Immediate(1)); // Type is ascii.
- __ jmp(&check_code);
-
- __ bind(&seq_two_byte_string);
- // eax: subject string
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(edi, Immediate(0)); // Type is two byte.
-
- __ bind(&check_code);
- // Check that the irregexp code has been generated for If it has, the field
- // contains a code object otherwise it contains the hole.
__ CmpObjectType(edx, CODE_TYPE, ebx);
__ j(not_equal, &runtime);
- // eax: subject string
- // edx: code
- // edi: encoding of subject string (1 if ascii 0 if two_byte);
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ mov(ecx, Operand(esp, kJSRegExpOffset));
- __ SmiUntag(ebx); // Previous index from smi.
+ __ mov(eax, Operand(esp, 3 * kPointerSize)); // Subject string.
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // Previous index.
+ __ mov(ecx, Operand(esp, 4 * kPointerSize)); // JSRegExp object.
+ __ SmiUntag(ebx); // Previous index from sim.
// eax: subject string
// ebx: previous index
@@ -8580,40 +8233,37 @@
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
- // Argument 7: Indicate that this is a direct call from JavaScript.
+ // Argument 8: Indicate that this is a direct call from JavaScript.
__ push(Immediate(1));
- // Argument 6: Start (high end) of backtracking stack memory area.
+ // Argument 7: Start (high end) of backtracking stack memory area.
__ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ push(ecx);
+ // Argument 6: At start of string?
+ __ xor_(Operand(ecx), ecx); // setcc only operated on cl (lower byte of ecx).
+ __ test(ebx, Operand(ebx));
+ __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
+ __ push(ecx);
+
// Argument 5: static offsets vector buffer.
__ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label push_two_byte, push_rest;
- __ test(edi, Operand(edi));
- __ mov(edi, FieldOperand(eax, String::kLengthOffset));
- __ j(zero, &push_two_byte);
- __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
- __ push(ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
- __ push(ecx); // Argument 3.
- __ jmp(&push_rest);
+ // Argument 4: End of string data.
+ __ mov(ecx, FieldOperand(eax, String::kLengthOffset));
+ __ add(ecx, Operand(eax));
+ __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ push(ecx);
- __ bind(&push_two_byte);
- ASSERT(kShortSize == 2);
- __ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
- __ push(ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
- __ push(ecx); // Argument 3.
-
- __ bind(&push_rest);
+ // Argument 3: Start of string data.
+ __ mov(ecx, ebx);
+ __ add(ebx, Operand(eax)); // String is ASCII.
+ __ add(Operand(ebx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ push(ebx);
// Argument 2: Previous index.
- __ push(ebx);
+ __ push(ecx);
// Argument 1: Subject string.
__ push(eax);
@@ -8622,7 +8272,7 @@
__ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(Operand(edx));
// Remove arguments.
- __ add(Operand(esp), Immediate(7 * kPointerSize));
+ __ add(Operand(esp), Immediate(8 * kPointerSize));
// Check the result.
Label success;
@@ -8649,7 +8299,7 @@
// Load RegExp data.
__ bind(&success);
- __ mov(eax, Operand(esp, kJSRegExpOffset));
+ __ mov(eax, Operand(esp, 4 * kPointerSize));
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
@@ -8657,7 +8307,7 @@
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
// ebx: last_match_info backing store (FixedArray)
@@ -8667,11 +8317,11 @@
__ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
__ SmiUntag(edx); // Number of capture registers back from smi.
// Store last subject and last input.
- __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
__ mov(ecx, ebx);
__ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
- __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
__ mov(ecx, ebx);
__ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
@@ -8685,7 +8335,7 @@
// ecx: offsets vector
// edx: number of capture registers
Label next_capture, done;
- __ mov(eax, Operand(esp, kPreviousIndexOffset));
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // Read previous index.
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
@@ -8712,7 +8362,7 @@
__ bind(&done);
// Return last match info.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
__ ret(4 * kPointerSize);
// Do the runtime call to execute the regexp.
@@ -8870,7 +8520,7 @@
CpuFeatures::Scope use_sse2(SSE2);
CpuFeatures::Scope use_cmov(CMOV);
- FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols);
+ FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
__ comisd(xmm0, xmm1);
// Jump to builtin for NaN.
@@ -8932,7 +8582,30 @@
__ bind(&check_for_strings);
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
+ // Check that both objects are not smis.
+ ASSERT_EQ(0, kSmiTag);
+ __ mov(ebx, Operand(edx));
+ __ and_(ebx, Operand(eax));
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &call_builtin);
+
+ // Load instance type for both objects.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ Label non_ascii_flat;
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiString =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ and_(ecx, kFlatAsciiString);
+ __ cmp(ecx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &call_builtin);
+ __ and_(ebx, kFlatAsciiString);
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &call_builtin);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -9986,76 +9659,79 @@
Register scratch1,
Register scratch2,
Register scratch3) {
- Label result_not_equal;
- Label result_greater;
- Label compare_lengths;
- // Find minimum length.
- Label left_shorter;
+ Label compare_lengths, compare_lengths_1;
+
+ // Find minimum length. If either length is zero just compare lengths.
__ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ mov(scratch3, scratch1);
- __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
-
- Register length_delta = scratch3;
-
- __ j(less_equal, &left_shorter);
- // Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, Operand(length_delta));
- __ bind(&left_shorter);
-
- Register min_length = scratch1;
-
- // If either length is zero, just compare lengths.
- __ test(min_length, Operand(min_length));
- __ j(zero, &compare_lengths);
-
- // Change index to run from -min_length to -1 by adding min_length
- // to string start. This means that loop ends when index reaches zero,
- // which doesn't need an additional compare.
- __ lea(left,
- FieldOperand(left,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ lea(right,
- FieldOperand(right,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ neg(min_length);
-
- Register index = min_length; // index = -min_length;
-
- {
- // Compare loop.
- Label loop;
- __ bind(&loop);
- // Compare characters.
- __ mov_b(scratch2, Operand(left, index, times_1, 0));
- __ cmpb(scratch2, Operand(right, index, times_1, 0));
- __ j(not_equal, &result_not_equal);
- __ add(Operand(index), Immediate(1));
- __ j(not_zero, &loop);
+ __ test(scratch1, Operand(scratch1));
+ __ j(zero, &compare_lengths_1);
+ __ mov(scratch2, FieldOperand(right, String::kLengthOffset));
+ __ test(scratch2, Operand(scratch2));
+ __ j(zero, &compare_lengths_1);
+ __ cmp(scratch1, Operand(scratch2));
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatures::Scope use_cmov(CMOV);
+ __ cmov(greater, scratch1, Operand(scratch2));
+ } else {
+ Label l;
+ __ j(less, &l);
+ __ mov(scratch1, scratch2);
+ __ bind(&l);
}
- // Compare lengths - strings up to min-length are equal.
+ Label result_greater, result_less;
+ Label loop;
+ // Compare next character.
+ __ mov(scratch3, Immediate(-1)); // Index into strings.
+ __ bind(&loop);
+ // Compare characters.
+ Label character_compare_done;
+ __ add(Operand(scratch3), Immediate(1));
+ __ mov_b(scratch2, Operand(left,
+ scratch3,
+ times_1,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ subb(scratch2, Operand(right,
+ scratch3,
+ times_1,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ j(not_equal, &character_compare_done);
+ __ sub(Operand(scratch1), Immediate(1));
+ __ j(not_zero, &loop);
+ // If min length characters match compare lengths otherwise last character
+ // compare is the result.
+ __ bind(&character_compare_done);
+ __ j(equal, &compare_lengths);
+ __ j(less, &result_less);
+ __ jmp(&result_greater);
+
+ // Compare lengths.
+ Label result_not_equal;
__ bind(&compare_lengths);
- __ test(length_delta, Operand(length_delta));
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ bind(&compare_lengths_1);
+ __ sub(scratch1, FieldOperand(right, String::kLengthOffset));
__ j(not_zero, &result_not_equal);
// Result is EQUAL.
ASSERT_EQ(0, EQUAL);
ASSERT_EQ(0, kSmiTag);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ xor_(eax, Operand(eax));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
-
__ bind(&result_not_equal);
__ j(greater, &result_greater);
// Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
+ __ bind(&result_less);
+ __ mov(eax, Immediate(Smi::FromInt(LESS)->value()));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
// Result is GREATER.
__ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
+ __ mov(eax, Immediate(Smi::FromInt(GREATER)->value()));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
}
@@ -10076,19 +9752,41 @@
__ j(not_equal, ¬_same);
ASSERT_EQ(0, EQUAL);
ASSERT_EQ(0, kSmiTag);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ xor_(eax, Operand(eax));
__ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
__ bind(¬_same);
- // Check that both objects are sequential ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+ // Check that both objects are not smis.
+ ASSERT_EQ(0, kSmiTag);
+ __ mov(ebx, Operand(edx));
+ __ and_(ebx, Operand(eax));
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+
+ // Load instance type for both strings.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ Label non_ascii_flat;
+ __ and_(ecx, kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(ecx, kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &non_ascii_flat);
+ const int kFlatAsciiString =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ and_(ebx, kFlatAsciiString);
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &non_ascii_flat);
// Compare flat ascii strings.
- __ IncrementCounter(&Counters::string_compare_native, 1);
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+ __ bind(&non_ascii_flat);
+
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index a81a7d1..56cf978 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,70 +43,57 @@
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
+ enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen, Expression* expression);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
+ ASSERT(type_ == ILLEGAL);
type_ = value;
}
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
// The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
+ int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
+ // the expression stack, and it is left in place with its value above it.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
- // being read from again. The value of the reference may be invalidated,
+ // being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
+ // on the expression stack. The stored value is left in place (with the
+ // reference intact below it) to support chained assignments.
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
};
@@ -433,11 +420,6 @@
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
- // Load a property of an object, returning it in a Result.
- // The object and the property name are passed on the stack, and
- // not changed.
- Result EmitKeyedLoad(bool is_global);
-
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
@@ -462,20 +444,20 @@
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
- Result ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> constant_operand,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode);
+ void ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> constant_operand,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
- Result LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
+ void LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode);
void Comparison(AstNode* node,
Condition cc,
@@ -497,10 +479,10 @@
CallFunctionFlags flags,
int position);
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
+ // Use an optimized version of Function.prototype.apply that avoid
+ // allocating the arguments object and just copies the arguments
+ // from the stack.
+ void CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position);
@@ -535,7 +517,6 @@
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -632,8 +613,8 @@
friend class JumpTarget;
friend class Reference;
friend class Result;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
+ friend class FastCodeGenerator;
+ friend class CodeGenSelector;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -670,11 +651,6 @@
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
- Result GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right);
-
private:
Token::Value op_;
OverwriteMode mode_;
@@ -721,11 +697,11 @@
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
bool ArgsInRegistersSupported() {
- return op_ == Token::ADD || op_ == Token::SUB
- || op_ == Token::MUL || op_ == Token::DIV;
+ return ((op_ == Token::ADD) || (op_ == Token::SUB)
+ || (op_ == Token::MUL) || (op_ == Token::DIV))
+ && flags_ != NO_SMI_CODE_IN_STUB;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
@@ -734,8 +710,8 @@
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
+ bool HasArgumentsInRegisters() { return args_in_registers_; }
+ bool HasArgumentsReversed() { return args_reversed_; }
};
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index cb500d5..1fbaa3c 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -53,25 +53,23 @@
static ByteMnemonic two_operands_instr[] = {
{0x03, "add", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER},
+ {0x23, "and", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER},
{0x09, "or", OPER_REG_OP_ORDER},
{0x0B, "or", REG_OPER_OP_ORDER},
{0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
{0x29, "sub", OPER_REG_OP_ORDER},
{0x2A, "subb", REG_OPER_OP_ORDER},
{0x2B, "sub", REG_OPER_OP_ORDER},
- {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER},
- {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
{0x84, "test_b", REG_OPER_OP_ORDER},
{0x85, "test", REG_OPER_OP_ORDER},
+ {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER},
{0x87, "xchg", REG_OPER_OP_ORDER},
{0x8A, "mov_b", REG_OPER_OP_ORDER},
{0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
similarity index 90%
rename from src/ia32/full-codegen-ia32.cc
rename to src/ia32/fast-codegen-ia32.cc
index fbeec3f..f485d9e 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -30,7 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "full-codegen.h"
+#include "fast-codegen.h"
#include "parser.h"
namespace v8 {
@@ -51,7 +51,7 @@
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun) {
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
SetFunctionPosition(fun);
@@ -160,7 +160,7 @@
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FastCodeGenerator::EmitReturnSequence(int position) {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -193,7 +193,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -236,7 +236,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -279,7 +279,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -320,7 +320,7 @@
}
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+void FastCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -361,7 +361,7 @@
}
-void FullCodeGenerator::DropAndApply(int count,
+void FastCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
ASSERT(count > 0);
@@ -413,7 +413,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context,
+void FastCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
switch (context) {
@@ -478,7 +478,7 @@
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
+void FastCodeGenerator::DoTest(Expression::Context context) {
// The value to test is in the accumulator. If the value might be needed
// on the stack (value/test and test/value contexts with a stack location
// desired), then the value is already duplicated on the stack.
@@ -612,7 +612,7 @@
}
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -631,13 +631,13 @@
}
-void FullCodeGenerator::Move(Register destination, Slot* source) {
+void FastCodeGenerator::Move(Register destination, Slot* source) {
MemOperand location = EmitSlotSearch(source, destination);
__ mov(destination, location);
}
-void FullCodeGenerator::Move(Slot* dst,
+void FastCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
@@ -653,7 +653,7 @@
}
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
@@ -751,7 +751,7 @@
}
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
__ push(Immediate(pairs));
@@ -761,7 +761,7 @@
}
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -779,21 +779,17 @@
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), context_);
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->slot();
- Property* property = var->AsProperty();
-
- if (var->is_global() && !var->is_this()) {
+ Expression* rewrite = var->rewrite();
+ if (rewrite == NULL) {
+ ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
@@ -807,24 +803,34 @@
// (eg, push/pop elimination).
__ nop();
DropAndApply(1, context, eax);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- Comment cmnt(masm_, "Lookup slot");
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- Apply(context, eax);
-
- } else if (slot != NULL) {
- Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
- ? "Context slot"
- : "Stack slot");
+ } else if (rewrite->AsSlot() != NULL) {
+ Slot* slot = rewrite->AsSlot();
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ }
+ }
Apply(context, slot);
-
} else {
- Comment cmnt(masm_, "Rewritten parameter");
+ Comment cmnt(masm_, "Variable rewritten to property");
+ // A variable has been rewritten into an explicit access to an object
+ // property.
+ Property* property = rewrite->AsProperty();
ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
+
+ // The only property expressions that can occur are of the form
+ // "slot[literal]".
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -856,7 +862,7 @@
}
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
@@ -883,7 +889,7 @@
}
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -952,7 +958,7 @@
}
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
@@ -1002,7 +1008,7 @@
}
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(ecx, Immediate(key->handle()));
@@ -1012,7 +1018,7 @@
}
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
@@ -1020,7 +1026,7 @@
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FastCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
__ push(result_register());
GenericBinaryOpStub stub(op,
@@ -1031,17 +1037,11 @@
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+void FastCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
- // Three main cases: global variables, lookup slots, and all other
- // types of slots. Left-hand-side parameters that rewrite to
- // explicit property accesses do not reach here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
-
- Slot* slot = var->slot();
if (var->is_global()) {
- ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in eax, variable name in
// ecx, and the global object on the stack.
@@ -1053,14 +1053,8 @@
// Overwrite the receiver on the stack with the result if needed.
DropAndApply(1, context, eax);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- __ push(result_register()); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- Apply(context, eax);
-
- } else if (slot != NULL) {
+ } else if (var->slot() != NULL) {
+ Slot* slot = var->slot();
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER:
@@ -1092,7 +1086,7 @@
}
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
@@ -1127,7 +1121,7 @@
}
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// If the assignment starts a block of assignments to the same object,
@@ -1163,7 +1157,7 @@
}
-void FullCodeGenerator::VisitProperty(Property* expr) {
+void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
@@ -1183,7 +1177,7 @@
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
+void FastCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
@@ -1204,7 +1198,7 @@
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1221,7 +1215,7 @@
}
-void FullCodeGenerator::VisitCall(Call* expr) {
+void FastCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1292,7 +1286,7 @@
}
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -1327,7 +1321,7 @@
}
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1359,7 +1353,7 @@
}
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1463,26 +1457,13 @@
break;
}
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForValue(expr->expression(), kAccumulator);
- Label no_conversion;
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
- __ push(result_register());
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ bind(&no_conversion);
- Apply(context_, result_register());
- break;
- }
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
// Expression can only be a property, a global or a (parameter or local)
@@ -1501,7 +1482,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
Location saved_location = location_;
- location_ = kAccumulator;
+ location_ = kStack;
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
@@ -1517,15 +1498,11 @@
VisitForValue(prop->key(), kStack);
EmitKeyedPropertyLoad(prop);
}
+ __ push(eax);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
- __ push(eax);
+ // Convert to number.
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -1557,33 +1534,13 @@
}
}
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- if (loop_depth() > 0) {
- if (expr->op() == Token::INC) {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- } else {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- }
- __ j(overflow, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done);
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- }
- }
// Call stub for +1/-1.
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(1)));
GenericBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
- stub.GenerateCall(masm(), eax, Smi::FromInt(1));
- __ bind(&done);
+ __ CallStub(&stub);
// Store the value returned in eax.
switch (assign_type) {
@@ -1638,7 +1595,7 @@
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
@@ -1673,7 +1630,7 @@
}
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
@@ -1788,25 +1745,25 @@
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, eax);
}
-Register FullCodeGenerator::result_register() { return eax; }
+Register FastCodeGenerator::result_register() { return eax; }
-Register FullCodeGenerator::context_register() { return esi; }
+Register FastCodeGenerator::context_register() { return esi; }
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
}
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
__ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
}
@@ -1814,7 +1771,7 @@
// ----------------------------------------------------------------------------
// Non-local control flow support.
-void FullCodeGenerator::EnterFinallyBlock() {
+void FastCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ mov(edx, Operand(esp, 0));
@@ -1828,7 +1785,7 @@
}
-void FullCodeGenerator::ExitFinallyBlock() {
+void FastCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
// Restore result register from stack.
__ pop(result_register());
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index ebc2cfa9..5658605 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -244,10 +244,11 @@
// Get the map of the receiver.
__ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
-
- // Check bit field.
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks.
__ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(kSlowCaseBitFieldMask));
+ __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index a16c103..d7c7d3a 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1454,36 +1454,6 @@
}
-void MacroAssembler::IncrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- IncrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::DecrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- DecrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (FLAG_debug_code) Check(cc, msg);
}
@@ -1525,38 +1495,6 @@
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that both objects are not smis.
- ASSERT_EQ(0, kSmiTag);
- mov(scratch1, Operand(object1));
- and_(scratch1, Operand(object2));
- test(scratch1, Immediate(kSmiTagMask));
- j(zero, failure);
-
- // Load instance type for both strings.
- mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
- mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
- movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- // Interleave bits from both instance types and compare them in one check.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- and_(scratch1, kFlatAsciiStringMask);
- and_(scratch2, kFlatAsciiStringMask);
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
- j(not_equal, failure);
-}
-
-
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 3f000ee..ceecebf 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -392,8 +392,6 @@
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
- void IncrementCounter(Condition cc, StatsCounter* counter, int value);
- void DecrementCounter(Condition cc, StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
@@ -415,17 +413,6 @@
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
- // ---------------------------------------------------------------------------
- // String utilities.
-
- // Checks if both objects are sequential ASCII strings, and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label *on_not_flat_ascii_strings);
-
private:
List<Unresolved> unresolved_;
bool generating_stub_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index f6da693..4af59dd 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -59,6 +59,8 @@
* call through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
+ * - at_start (if 1, we are starting at the start of the
+ * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
@@ -72,8 +74,6 @@
* - backup of caller ebx
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
- * - Boolean at start (if 1, we are starting at the start of the string,
- * otherwise 0)
* - register 0 ebp[-4] (Only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@@ -625,7 +625,6 @@
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
- __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -668,15 +667,6 @@
// Store this value in a local variable, for use when clearing
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
-
- // Determine whether the start index is zero, that is at the start of the
- // string, and store that value in a local variable.
- __ mov(ebx, Operand(ebp, kStartIndex));
- __ xor_(Operand(ecx), ecx); // setcc only operates on cl (lower byte of ecx).
- __ test(ebx, Operand(ebx));
- __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
- __ mov(Operand(ebp, kAtStart), ecx);
-
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index d9866b7..8e7a6a5 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -123,7 +123,8 @@
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ static const int kAtStart = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kAtStart + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
@@ -132,9 +133,8 @@
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index 94ef7bf..3ebd2e6 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -53,8 +53,8 @@
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/src/ic.h b/src/ic.h
index 8f0eb37..be7f956 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -295,13 +295,6 @@
static void ClearInlinedVersion(Address address);
private:
- // Bit mask to be tested against bit field for the cases when
- // generic stub should go into slow case.
- // Access check is necessary explicitly since generic stub does not perform
- // map checks.
- static const int kSlowCaseBitFieldMask =
- (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
-
static void Generate(MacroAssembler* masm, const ExternalReference& f);
// Update the inline cache.
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 505cf03..8af472d 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -4462,13 +4462,10 @@
while (i1 < n1 || i2 < n2) {
CharacterRange next_range;
int range_source;
- if (i2 == n2 ||
- (i1 < n1 && first_set->at(i1).from() < second_set->at(i2).from())) {
- // Next smallest element is in first set.
+ if (i2 == n2 || first_set->at(i1).from() < second_set->at(i2).from()) {
next_range = first_set->at(i1++);
range_source = kInsideFirst;
} else {
- // Next smallest element is in second set.
next_range = second_set->at(i2++);
range_source = kInsideSecond;
}
diff --git a/src/list.h b/src/list.h
index d3c2767..aff63c3 100644
--- a/src/list.h
+++ b/src/list.h
@@ -68,8 +68,7 @@
// not safe to use after operations that can change the list's
// backing store (eg, Add).
inline T& operator[](int i) const {
- ASSERT(0 <= i);
- ASSERT(i < length_);
+ ASSERT(0 <= i && i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
diff --git a/src/log.cc b/src/log.cc
index 5de7429..98dd562 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -155,13 +155,6 @@
return;
}
- const Address functionAddr =
- sample->fp + JavaScriptFrameConstants::kFunctionOffset;
- if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp,
- functionAddr)) {
- sample->function = Memory::Address_at(functionAddr) - kHeapObjectTag;
- }
-
int i = 0;
const Address callback = Logger::current_state_ != NULL ?
Logger::current_state_->external_callback() : NULL;
@@ -169,8 +162,11 @@
sample->stack[i++] = callback;
}
- SafeStackTraceFrameIterator it(sample->fp, sample->sp,
- sample->sp, js_entry_sp);
+ SafeStackTraceFrameIterator it(
+ reinterpret_cast<Address>(sample->fp),
+ reinterpret_cast<Address>(sample->sp),
+ reinterpret_cast<Address>(sample->sp),
+ js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = it.frame()->pc();
it.Advance();
@@ -841,14 +837,36 @@
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- MoveEventInternal(CODE_MOVE_EVENT, from, to);
+ static Address prev_to_ = NULL;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,", log_events_[CODE_MOVE_EVENT]);
+ msg.AppendAddress(from);
+ msg.Append(',');
+ msg.AppendAddress(to, prev_to_);
+ prev_to_ = to;
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
#endif
}
void Logger::CodeDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- DeleteEventInternal(CODE_DELETE_EVENT, from);
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,", log_events_[CODE_DELETE_EVENT]);
+ msg.AppendAddress(from);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
#endif
}
@@ -870,78 +888,6 @@
}
-void Logger::FunctionCreateEvent(JSFunction* function) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static Address prev_code = NULL;
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
- msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
- msg.AppendAddress(function->address());
- msg.Append(',');
- msg.AppendAddress(function->code()->address(), prev_code);
- prev_code = function->code()->address();
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::FunctionMoveEvent(Address from, Address to) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- MoveEventInternal(FUNCTION_MOVE_EVENT, from, to);
-#endif
-}
-
-
-void Logger::FunctionDeleteEvent(Address from) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- DeleteEventInternal(FUNCTION_DELETE_EVENT, from);
-#endif
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::MoveEventInternal(LogEventsAndTags event,
- Address from,
- Address to) {
- static Address prev_to_ = NULL;
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
- msg.Append("%s,", log_events_[event]);
- msg.AppendAddress(from);
- msg.Append(',');
- msg.AppendAddress(to, prev_to_);
- prev_to_ = to;
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-#endif
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
- msg.Append("%s,", log_events_[event]);
- msg.AppendAddress(from);
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-#endif
-
-
void Logger::ResourceEvent(const char* name, const char* tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log) return;
@@ -1123,17 +1069,13 @@
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!Log::IsEnabled() || !FLAG_prof) return;
static Address prev_sp = NULL;
- static Address prev_function = NULL;
LogMessageBuilder msg;
msg.Append("%s,", log_events_[TICK_EVENT]);
- Address prev_addr = sample->pc;
+ Address prev_addr = reinterpret_cast<Address>(sample->pc);
msg.AppendAddress(prev_addr);
msg.Append(',');
- msg.AppendAddress(sample->sp, prev_sp);
- prev_sp = sample->sp;
- msg.Append(',');
- msg.AppendAddress(sample->function, prev_function);
- prev_function = sample->function;
+ msg.AppendAddress(reinterpret_cast<Address>(sample->sp), prev_sp);
+ prev_sp = reinterpret_cast<Address>(sample->sp);
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
@@ -1202,7 +1144,6 @@
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
LogCompiledFunctions();
- LogFunctionObjects();
LogAccessorCallbacks();
if (!FLAG_sliding_state_window) ticker_->Start();
}
@@ -1237,7 +1178,9 @@
AssertNoAllocation no_alloc;
int compiled_funcs_count = 0;
HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
if (!obj->IsSharedFunctionInfo()) continue;
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
if (sfi->is_compiled()
@@ -1347,22 +1290,12 @@
}
-void Logger::LogFunctionObjects() {
- AssertNoAllocation no_alloc;
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsJSFunction()) continue;
- JSFunction* jsf = JSFunction::cast(obj);
- if (!jsf->is_compiled()) continue;
- LOG(FunctionCreateEvent(jsf));
- }
-}
-
-
void Logger::LogAccessorCallbacks() {
AssertNoAllocation no_alloc;
HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
if (!obj->IsAccessorInfo()) continue;
AccessorInfo* ai = AccessorInfo::cast(obj);
if (!ai->name()->IsString()) continue;
diff --git a/src/log.h b/src/log.h
index 1f6e60e..e21df03 100644
--- a/src/log.h
+++ b/src/log.h
@@ -116,9 +116,6 @@
V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \
- V(FUNCTION_CREATION_EVENT, "function-creation", "fc") \
- V(FUNCTION_MOVE_EVENT, "function-move", "fm") \
- V(FUNCTION_DELETE_EVENT, "function-delete", "fd") \
V(SNAPSHOT_POSITION_EVENT, "snapshot-pos", "sp") \
V(TICK_EVENT, "tick", "t") \
V(REPEAT_META_EVENT, "repeat", "r") \
@@ -227,12 +224,6 @@
static void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
static void CodeDeleteEvent(Address from);
- // Emits a function object create event.
- static void FunctionCreateEvent(JSFunction* function);
- // Emits a function move event.
- static void FunctionMoveEvent(Address from, Address to);
- // Emits a function delete event.
- static void FunctionDeleteEvent(Address from);
static void SnapshotPositionEvent(Address addr, int pos);
@@ -287,8 +278,6 @@
// Logs all compiled functions found in the heap.
static void LogCompiledFunctions();
- // Logs all compiled JSFunction objects found in the heap.
- static void LogFunctionObjects();
// Logs all accessor callbacks found in the heap.
static void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
@@ -310,15 +299,6 @@
const char* name,
Address entry_point);
- // Internal configurable move event.
- static void MoveEventInternal(LogEventsAndTags event,
- Address from,
- Address to);
-
- // Internal configurable move event.
- static void DeleteEventInternal(LogEventsAndTags event,
- Address from);
-
// Emits aliases for compressed messages.
static void LogAliases();
diff --git a/src/macros.py b/src/macros.py
index c160b49..1e436a0 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -92,7 +92,6 @@
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 1f2c37d..e284b42 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -129,8 +129,7 @@
#endif
PagedSpaces spaces;
- for (PagedSpace* space = spaces.next();
- space != NULL; space = spaces.next()) {
+ while (PagedSpace* space = spaces.next()) {
space->PrepareForMarkCompact(compacting_collection_);
}
@@ -173,7 +172,7 @@
int old_gen_used = 0;
OldSpaces spaces;
- for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
+ while (OldSpace* space = spaces.next()) {
old_gen_recoverable += space->Waste() + space->AvailableFree();
old_gen_used += space->Size();
}
@@ -476,8 +475,8 @@
void MarkCompactCollector::CreateBackPointers() {
HeapObjectIterator iterator(Heap::map_space());
- for (HeapObject* next_object = iterator.next();
- next_object != NULL; next_object = iterator.next()) {
+ while (iterator.has_next()) {
+ Object* next_object = iterator.next();
if (next_object->IsMap()) { // Could also be ByteArray on free list.
Map* map = Map::cast(next_object);
if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
@@ -510,7 +509,8 @@
// so that we don't waste effort pointlessly scanning for objects.
ASSERT(!marking_stack.is_full());
- for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
+ while (it->has_next()) {
+ HeapObject* object = it->next();
if (object->IsOverflowed()) {
object->ClearOverflow();
ASSERT(object->IsMarked());
@@ -793,9 +793,8 @@
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObjects
// and related subtypes.
- for (HeapObject* obj = map_iterator.next();
- obj != NULL; obj = map_iterator.next()) {
- Map* map = reinterpret_cast<Map*>(obj);
+ while (map_iterator.has_next()) {
+ Map* map = reinterpret_cast<Map*>(map_iterator.next());
if (!map->IsMarked() && map->IsByteArray()) continue;
ASSERT(SafeIsMap(map));
@@ -970,6 +969,12 @@
inline void IgnoreNonLiveObject(HeapObject* object) {}
+// A code deletion event is logged for non-live code objects.
+inline void LogNonLiveCodeObject(HeapObject* object) {
+ if (object->IsCode()) LOG(CodeDeleteEvent(object->address()));
+}
+
+
// Function template that, given a range of addresses (eg, a semispace or a
// paged space page), iterates through the objects in the range to clear
// mark bits and compute and encode forwarding addresses. As a side effect,
@@ -1117,7 +1122,10 @@
is_previous_alive = true;
}
} else {
- MarkCompactCollector::ReportDeleteIfNeeded(object);
+ if (object->IsCode()) {
+ // Notify the logger that compiled code has been collected.
+ LOG(CodeDeleteEvent(Code::cast(object)->address()));
+ }
if (is_previous_alive) { // Transition from live to free.
free_start = current;
is_previous_alive = false;
@@ -1196,7 +1204,7 @@
// Compute the forwarding pointers in each space.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
- ReportDeleteIfNeeded>(
+ IgnoreNonLiveObject>(
Heap::old_pointer_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
@@ -1204,7 +1212,7 @@
Heap::old_data_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
- ReportDeleteIfNeeded>(
+ LogNonLiveCodeObject>(
Heap::code_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
@@ -1283,7 +1291,6 @@
MapIterator it;
HeapObject* o = it.next();
for (; o != first_map_to_evacuate_; o = it.next()) {
- ASSERT(o != NULL);
Map* map = reinterpret_cast<Map*>(o);
ASSERT(!map->IsMarked());
ASSERT(!map->IsOverflowed());
@@ -1309,8 +1316,10 @@
void UpdateMapPointersInLargeObjectSpace() {
LargeObjectIterator it(Heap::lo_space());
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- UpdateMapPointersInObject(obj);
+ while (true) {
+ if (!it.has_next()) break;
+ UpdateMapPointersInObject(it.next());
+ }
}
void Finish() {
@@ -1353,8 +1362,8 @@
static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
while (true) {
+ ASSERT(it->has_next());
HeapObject* next = it->next();
- ASSERT(next != NULL);
if (next == last)
return NULL;
ASSERT(!next->IsOverflowed());
@@ -1443,9 +1452,8 @@
if (!FLAG_enable_slow_asserts)
return;
- for (HeapObject* obj = map_to_evacuate_it_.next();
- obj != NULL; obj = map_to_evacuate_it_.next())
- ASSERT(FreeListNode::IsFreeListNode(obj));
+ while (map_to_evacuate_it_.has_next())
+ ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next()));
}
#endif
};
@@ -1478,8 +1486,7 @@
map_compact.FinishMapSpace();
PagedSpaces spaces;
- for (PagedSpace* space = spaces.next();
- space != NULL; space = spaces.next()) {
+ while (PagedSpace* space = spaces.next()) {
if (space == Heap::map_space()) continue;
map_compact.UpdateMapPointersInPagedSpace(space);
}
@@ -1654,8 +1661,7 @@
// Large objects do not move, the map word can be updated directly.
LargeObjectIterator it(Heap::lo_space());
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- UpdatePointersInNewObject(obj);
+ while (it.has_next()) UpdatePointersInNewObject(it.next());
USE(live_maps);
USE(live_pointer_olds);
@@ -1819,8 +1825,7 @@
Page::set_rset_state(Page::IN_USE);
#endif
PagedSpaces spaces;
- for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
- space->MCCommitRelocationInfo();
+ while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
}
@@ -1901,11 +1906,6 @@
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
- HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsJSFunction()) {
- LOG(FunctionMoveEvent(old_addr, new_addr));
- }
-
return obj_size;
}
@@ -1986,11 +1986,6 @@
}
#endif
- HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsJSFunction()) {
- LOG(FunctionMoveEvent(old_addr, new_addr));
- }
-
return obj_size;
}
@@ -2006,15 +2001,4 @@
Heap::RebuildRSets();
}
-
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (obj->IsCode()) {
- LOG(CodeDeleteEvent(obj->address()));
- } else if (obj->IsJSFunction()) {
- LOG(FunctionDeleteEvent(obj->address()));
- }
-#endif
-}
-
} } // namespace v8::internal
diff --git a/src/mark-compact.h b/src/mark-compact.h
index ab572f6..02aedb3 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -115,9 +115,6 @@
static bool in_use() { return state_ > PREPARE_GC; }
#endif
- // Determine type of object and emit deletion log event.
- static void ReportDeleteIfNeeded(HeapObject* obj);
-
private:
#ifdef DEBUG
enum CollectorState {
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 6457ae7..10138d9 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -164,10 +164,10 @@
}
context.Dispose();
CppByteSink sink(argv[1]);
+ i::Serializer ser(&sink);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
i::Heap::CollectAllGarbage(true);
- i::StartupSerializer ser(&sink);
ser.Serialize();
return 0;
}
diff --git a/src/objects.cc b/src/objects.cc
index c76fc83..118c489 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2839,11 +2839,7 @@
if (result.IsReadOnly()) return Heap::undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
- if (obj->IsFixedArray()) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
- SetNormalizedProperty(name, obj, details);
- return obj;
- }
+ if (obj->IsFixedArray()) return obj;
}
}
}
diff --git a/src/objects.h b/src/objects.h
index 8730f91..40be0df 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3649,8 +3649,6 @@
FixedArray::kHeaderSize + kTagIndex * kPointerSize;
static const int kDataAsciiCodeOffset =
FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
- static const int kDataUC16CodeOffset =
- FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
static const int kIrregexpCaptureCountOffset =
FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
};
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index ff75776..353d165 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -95,24 +95,6 @@
}
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
@@ -573,17 +555,17 @@
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
- sample.pc = reinterpret_cast<Address>(mcontext.mc_eip);
- sample.sp = reinterpret_cast<Address>(mcontext.mc_esp);
- sample.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
+ sample.pc = mcontext.mc_eip;
+ sample.sp = mcontext.mc_esp;
+ sample.fp = mcontext.mc_ebp;
#elif V8_HOST_ARCH_X64
- sample.pc = reinterpret_cast<Address>(mcontext.mc_rip);
- sample.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- sample.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
+ sample.pc = mcontext.mc_rip;
+ sample.sp = mcontext.mc_rsp;
+ sample.fp = mcontext.mc_rbp;
#elif V8_HOST_ARCH_ARM
- sample.pc = reinterpret_cast<Address>(mcontext.mc_r15);
- sample.sp = reinterpret_cast<Address>(mcontext.mc_r13);
- sample.fp = reinterpret_cast<Address>(mcontext.mc_r11);
+ sample.pc = mcontext.mc_r15;
+ sample.sp = mcontext.mc_r13;
+ sample.fp = mcontext.mc_r11;
#endif
active_sampler_->SampleStack(&sample);
}
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 005b1de..bfcd8fb 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -159,24 +159,6 @@
}
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
@@ -725,23 +707,23 @@
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
- sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+ sample.pc = mcontext.gregs[REG_EIP];
+ sample.sp = mcontext.gregs[REG_ESP];
+ sample.fp = mcontext.gregs[REG_EBP];
#elif V8_HOST_ARCH_X64
- sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+ sample.pc = mcontext.gregs[REG_RIP];
+ sample.sp = mcontext.gregs[REG_RSP];
+ sample.fp = mcontext.gregs[REG_RBP];
#elif V8_HOST_ARCH_ARM
// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
- sample.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- sample.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- sample.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+ sample.pc = mcontext.gregs[R15];
+ sample.sp = mcontext.gregs[R13];
+ sample.fp = mcontext.gregs[R11];
#else
- sample.pc = reinterpret_cast<Address>(mcontext.arm_pc);
- sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
- sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
+ sample.pc = mcontext.arm_pc;
+ sample.sp = mcontext.arm_sp;
+ sample.fp = mcontext.arm_fp;
#endif
#endif
if (IsVmThread())
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index e379ae2..0d5be45 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -259,24 +259,6 @@
}
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
int OS::StackWalk(Vector<StackFrame> frames) {
// If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
if (backtrace == NULL)
@@ -577,9 +559,9 @@
flavor,
reinterpret_cast<natural_t*>(&state),
&count) == KERN_SUCCESS) {
- sample.pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
- sample.sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
- sample.fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
+ sample.pc = state.REGISTER_FIELD(ip);
+ sample.sp = state.REGISTER_FIELD(sp);
+ sample.fp = state.REGISTER_FIELD(bp);
sampler_->SampleStack(&sample);
}
thread_resume(profiled_thread_);
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 62e6004..6d27304 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -94,24 +94,6 @@
}
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 89f4d98..41e0e64 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -99,6 +99,15 @@
}
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
double OS::DaylightSavingsOffset(double time) {
if (isnan(time)) return nan_value();
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -108,6 +117,15 @@
}
+double OS::LocalTimeOffset() {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
// ----------------------------------------------------------------------------
// POSIX stdio support.
//
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
deleted file mode 100644
index 85c2c54..0000000
--- a/src/platform-solaris.cc
+++ /dev/null
@@ -1,607 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
-// parts the implementation is in platform-posix.cc.
-
-#ifdef __sparc
-# error "V8 does not support the SPARC CPU architecture."
-#endif
-
-#include <sys/stack.h> // for stack alignment
-#include <unistd.h> // getpagesize(), usleep()
-#include <sys/mman.h> // mmap()
-#include <execinfo.h> // backtrace(), backtrace_symbols()
-#include <pthread.h>
-#include <sched.h> // for sched_yield
-#include <semaphore.h>
-#include <time.h>
-#include <sys/time.h> // gettimeofday(), timeradd()
-#include <errno.h>
-#include <ieeefp.h> // finite()
-#include <signal.h> // sigemptyset(), etc
-
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-// 0 is never a valid thread id on Solaris since the main thread is 1 and
-// subsequent have their ids incremented from there
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly will cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Solaris runs on a lot of things.
-}
-
-
-int OS::ActivationFrameAlignment() {
- return STACK_ALIGN;
-}
-
-
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return tzname[0]; // The location of the timezone string on Solaris.
-}
-
-
-double OS::LocalTimeOffset() {
- // On Solaris, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- ASSERT(utc != -1);
- struct tm* loc = localtime(&utc);
- ASSERT(loc != NULL);
- return static_cast<double>((mktime(loc) - utc) * msPerSecond);
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return static_cast<size_t>(getpagesize());
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
- *allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- mprotect(address, size, prot);
-}
-
-#endif
-
-
-void OS::Sleep(int milliseconds) {
- useconds_t ms = static_cast<useconds_t>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- int frames_size = frames.length();
- void** addresses = NewArray<void*>(frames_size);
-
- int frames_count = backtrace(addresses, frames_size);
-
- char** symbols;
- symbols = backtrace_symbols(addresses, frames_count);
- if (symbols == NULL) {
- DeleteArray(addresses);
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- DeleteArray(addresses);
- free(symbols);
-
- return frames_count;
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, size);
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
-}
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
-}
-
-
-Thread::~Thread() {
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
- thread->Run();
- return NULL;
-}
-
-
-void Thread::Start() {
- pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
- ASSERT(IsValid());
-}
-
-
-void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class SolarisMutex : public Mutex {
- public:
-
- SolarisMutex() {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mutex_, &attr);
- }
-
- ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
-
- int Lock() { return pthread_mutex_lock(&mutex_); }
-
- int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- private:
- pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
- return new SolarisMutex();
-}
-
-
-class SolarisSemaphore : public Semaphore {
- public:
- explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void SolarisSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-#ifndef timeradd
-#define timeradd(a, b, result) \
- do { \
- (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
- (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
- if ((result)->tv_usec >= 1000000) { \
- ++(result)->tv_sec; \
- (result)->tv_usec -= 1000000; \
- } \
- } while (0)
-#endif
-
-
-bool SolarisSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(¤t_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(¤t_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new SolarisSemaphore(count);
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static Sampler* active_sampler_ = NULL;
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
-
- TickSample sample;
- sample.pc = 0;
- sample.sp = 0;
- sample.fp = 0;
-
- // We always sample the VM state.
- sample.state = Logger::state();
-
- active_sampler_->Tick(&sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() {
- signal_handler_installed_ = false;
- }
-
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- struct itimerval old_timer_value_;
-};
-
-
-Sampler::Sampler(int interval, bool profiling)
- : interval_(interval), profiling_(profiling), active_(false) {
- data_ = new PlatformData();
-}
-
-
-Sampler::~Sampler() {
- delete data_;
-}
-
-
-void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
- if (active_sampler_ != NULL) return;
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
- data_->signal_handler_installed_ = true;
-
- // Set the itimer to generate a tick for each interval.
- itimerval itimer;
- itimer.it_interval.tv_sec = interval_ / 1000;
- itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
- itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
- itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
- setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
- active_ = true;
-}
-
-
-void Sampler::Stop() {
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
- active_ = false;
-}
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-} } // namespace v8::internal
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 81b0d4c..1be4b77 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1813,13 +1813,13 @@
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread_, &context) != 0) {
#if V8_HOST_ARCH_X64
- sample.pc = reinterpret_cast<Address>(context.Rip);
- sample.sp = reinterpret_cast<Address>(context.Rsp);
- sample.fp = reinterpret_cast<Address>(context.Rbp);
+ sample.pc = context.Rip;
+ sample.sp = context.Rsp;
+ sample.fp = context.Rbp;
#else
- sample.pc = reinterpret_cast<Address>(context.Eip);
- sample.sp = reinterpret_cast<Address>(context.Esp);
- sample.fp = reinterpret_cast<Address>(context.Ebp);
+ sample.pc = context.Eip;
+ sample.sp = context.Esp;
+ sample.fp = context.Ebp;
#endif
sampler_->SampleStack(&sample);
}
diff --git a/src/platform.h b/src/platform.h
index bc2e9d6..75e557c 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -44,12 +44,6 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
-#ifdef __sun
-// On Solaris, to get isinf, INFINITY, fpclassify and other macros one needs
-// to define this symbol
-#define __C99FEATURES__ 1
-#endif
-
#define V8_INFINITY INFINITY
// Windows specific stuff.
@@ -512,18 +506,11 @@
// TickSample captures the information collected for each sample.
class TickSample {
public:
- TickSample()
- : pc(NULL),
- sp(NULL),
- fp(NULL),
- function(NULL),
- state(OTHER),
- frames_count(0) {}
- Address pc; // Instruction pointer.
- Address sp; // Stack pointer.
- Address fp; // Frame pointer.
- Address function; // The last called JS function.
- StateTag state; // The state of the VM.
+ TickSample() : pc(0), sp(0), fp(0), state(OTHER), frames_count(0) {}
+ uintptr_t pc; // Instruction pointer.
+ uintptr_t sp; // Stack pointer.
+ uintptr_t fp; // Frame pointer.
+ StateTag state; // The state of the VM.
static const int kMaxFramesCount = 100;
EmbeddedVector<Address, kMaxFramesCount> stack; // Call stack.
int frames_count; // Number of captured frames.
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 0fcfc33..3685fcd 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -122,10 +122,7 @@
bool is_ascii = subject->IsAsciiRepresentation();
- // The string has been flattened, so it it is a cons string it contains the
- // full string in the first part.
if (StringShape(subject_ptr).IsCons()) {
- ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length());
subject_ptr = ConsString::cast(subject_ptr)->first();
}
// Ensure that an underlying string has the same ascii-ness.
@@ -144,7 +141,8 @@
start_offset,
input_start,
input_end,
- offsets_vector);
+ offsets_vector,
+ previous_index == 0);
return res;
}
@@ -155,11 +153,14 @@
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output) {
+ int* output,
+ bool at_start) {
typedef int (*matcher)(String*, int, const byte*,
- const byte*, int*, Address, int);
+ const byte*, int*, int, Address, int);
matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
+ int at_start_val = at_start ? 1 : 0;
+
// Ensure that the minimum stack has been allocated.
RegExpStack stack;
Address stack_base = RegExpStack::stack_base();
@@ -171,6 +172,7 @@
input_start,
input_end,
output,
+ at_start_val,
stack_base,
direct_call);
ASSERT(result <= SUCCESS);
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 105d8cc..2e619bd 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -218,7 +218,8 @@
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output);
+ int* output,
+ bool at_start);
};
#endif // V8_NATIVE_REGEXP
diff --git a/src/runtime.cc b/src/runtime.cc
index 51c1ba2..b6da528 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -4782,7 +4782,7 @@
return Code::cast(code);
}
- return shared->construct_stub();
+ return Builtins::builtin(Builtins::JSConstructStubGeneric);
}
@@ -4830,7 +4830,6 @@
CompileLazyShared(Handle<SharedFunctionInfo>(function->shared()),
CLEAR_EXCEPTION,
0);
- LOG(FunctionCreateEvent(*function));
}
bool first_allocation = !function->has_initial_map();
@@ -7212,8 +7211,9 @@
Handle<SharedFunctionInfo> last;
while (!done) {
HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL; obj = iterator.next()) {
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
if (obj->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
if (shared->script() == *script) {
@@ -7669,10 +7669,10 @@
int count = 0;
JSObject* last = NULL;
HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator.next()) != NULL) &&
+ while (iterator.has_next() &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
+ HeapObject* heap_obj = iterator.next();
if (heap_obj->IsJSObject()) {
// Skip context extension objects and argument arrays as these are
// checked in the context of functions using them.
@@ -7782,10 +7782,10 @@
// Iterate the heap.
int count = 0;
HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator.next()) != NULL) &&
+ while (iterator.has_next() &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
+ HeapObject* heap_obj = iterator.next();
if (heap_obj->IsJSObject()) {
JSObject* obj = JSObject::cast(heap_obj);
if (obj->map()->constructor() == constructor) {
@@ -7933,8 +7933,8 @@
// script data.
Handle<Script> script;
HeapIterator iterator;
- HeapObject* obj = NULL;
- while (script.is_null() && ((obj = iterator.next()) != NULL)) {
+ while (script.is_null() && iterator.has_next()) {
+ HeapObject* obj = iterator.next();
// If a script is found check if it has the script data requested.
if (obj->IsScript()) {
if (Script::cast(obj)->name()->IsString()) {
diff --git a/src/runtime.js b/src/runtime.js
index c4c855e..ce2f197 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -541,9 +541,7 @@
if (IS_STRING(x)) return new $String(x);
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
- if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
- throw %MakeTypeError('null_to_object', []);
- }
+ if (x == null) throw %MakeTypeError('null_to_object', []);
return x;
}
diff --git a/src/serialize.cc b/src/serialize.cc
index 6b85893..ec3a967 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -44,6 +44,67 @@
namespace v8 {
namespace internal {
+// Mapping objects to their location after deserialization.
+// This is used during building, but not at runtime by V8.
+class SerializationAddressMapper {
+ public:
+ static bool IsMapped(HeapObject* obj) {
+ EnsureMapExists();
+ return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
+ }
+
+ static int MappedTo(HeapObject* obj) {
+ ASSERT(IsMapped(obj));
+ return static_cast<int>(reinterpret_cast<intptr_t>(
+ serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
+ }
+
+ static void Map(HeapObject* obj, int to) {
+ EnsureMapExists();
+ ASSERT(!IsMapped(obj));
+ HashMap::Entry* entry =
+ serialization_map_->Lookup(Key(obj), Hash(obj), true);
+ entry->value = Value(to);
+ }
+
+ static void Zap() {
+ if (serialization_map_ != NULL) {
+ delete serialization_map_;
+ }
+ serialization_map_ = NULL;
+ }
+
+ private:
+ static bool SerializationMatchFun(void* key1, void* key2) {
+ return key1 == key2;
+ }
+
+ static uint32_t Hash(HeapObject* obj) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+ }
+
+ static void* Key(HeapObject* obj) {
+ return reinterpret_cast<void*>(obj->address());
+ }
+
+ static void* Value(int v) {
+ return reinterpret_cast<void*>(v);
+ }
+
+ static void EnsureMapExists() {
+ if (serialization_map_ == NULL) {
+ serialization_map_ = new HashMap(&SerializationMatchFun);
+ }
+ }
+
+ static HashMap* serialization_map_;
+};
+
+
+HashMap* SerializationAddressMapper::serialization_map_ = NULL;
+
+
+
// -----------------------------------------------------------------------------
// Coding of external references.
@@ -586,13 +647,10 @@
ASSERT_EQ(NULL, ThreadState::FirstInUse());
// No active handles.
ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
- // Make sure the entire partial snapshot cache is traversed, filling it with
- // valid object pointers.
- partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder();
- Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
- Heap::IterateWeakRoots(this, VISIT_ALL);
+ Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+ ASSERT(source_->AtEOF());
}
@@ -608,8 +666,7 @@
}
-Deserializer::~Deserializer() {
- ASSERT(source_->AtEOF());
+void Deserializer::TearDown() {
if (external_reference_decoder_ != NULL) {
delete external_reference_decoder_;
external_reference_decoder_ = NULL;
@@ -834,16 +891,6 @@
*current++ = Heap::roots_address()[root_id];
break;
}
- case PARTIAL_SNAPSHOT_CACHE_ENTRY: {
- int cache_index = source_->GetInt();
- *current++ = partial_snapshot_cache_[cache_index];
- break;
- }
- case SYNCHRONIZE: {
- // If we get here then that indicates that you have a mismatch between
- // the number of GC roots when serializing and deserializing.
- UNREACHABLE();
- }
default:
UNREACHABLE();
}
@@ -897,6 +944,7 @@
: sink_(sink),
current_root_index_(0),
external_reference_encoder_(NULL),
+ partial_(false),
large_object_total_(0) {
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
@@ -904,7 +952,7 @@
}
-void StartupSerializer::SerializeStrongReferences() {
+void Serializer::Serialize() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
// No active or weak handles.
@@ -918,30 +966,20 @@
CHECK_NE(v8::INSTALLED, ext->state());
}
external_reference_encoder_ = new ExternalReferenceEncoder();
- Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ Heap::IterateRoots(this, VISIT_ONLY_STRONG);
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
+ SerializationAddressMapper::Zap();
}
-void PartialSerializer::Serialize(Object** object) {
+void Serializer::SerializePartial(Object** object) {
+ partial_ = true;
external_reference_encoder_ = new ExternalReferenceEncoder();
this->VisitPointer(object);
-
- // After we have done the partial serialization the partial snapshot cache
- // will contain some references needed to decode the partial snapshot. We
- // fill it up with undefineds so it has a predictable length so the
- // deserialization code doesn't need to know the length.
- for (int index = partial_snapshot_cache_length_;
- index < kPartialSnapshotCacheCapacity;
- index++) {
- partial_snapshot_cache_[index] = Heap::undefined_value();
- startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
- }
- partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
-
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
+ SerializationAddressMapper::Zap();
}
@@ -960,54 +998,7 @@
}
-Object* SerializerDeserializer::partial_snapshot_cache_[
- kPartialSnapshotCacheCapacity];
-int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
-
-
-// This ensures that the partial snapshot cache keeps things alive during GC and
-// tracks their movement. When it is called during serialization of the startup
-// snapshot the partial snapshot is empty, so nothing happens. When the partial
-// (context) snapshot is created, this array is populated with the pointers that
-// the partial snapshot will need. As that happens we emit serialized objects to
-// the startup snapshot that correspond to the elements of this cache array. On
-// deserialization we therefore need to visit the cache array. This fills it up
-// with pointers to deserialized objects.
-void SerializerDeserializer::Iterate(ObjectVisitor *visitor) {
- visitor->VisitPointers(
- &partial_snapshot_cache_[0],
- &partial_snapshot_cache_[partial_snapshot_cache_length_]);
-}
-
-
-// When deserializing we need to set the size of the snapshot cache. This means
-// the root iteration code (above) will iterate over array elements, writing the
-// references to deserialized objects in them.
-void SerializerDeserializer::SetSnapshotCacheSize(int size) {
- partial_snapshot_cache_length_ = size;
-}
-
-
-int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- for (int i = 0; i < partial_snapshot_cache_length_; i++) {
- Object* entry = partial_snapshot_cache_[i];
- if (entry == heap_object) return i;
- }
- // We didn't find the object in the cache. So we add it to the cache and
- // then visit the pointer so that it becomes part of the startup snapshot
- // and we can refer to it from the partial snapshot.
- int length = partial_snapshot_cache_length_;
- CHECK(length < kPartialSnapshotCacheCapacity);
- partial_snapshot_cache_[length] = heap_object;
- startup_serializer_->VisitPointer(&partial_snapshot_cache_[length]);
- // We don't recurse from the startup snapshot generator into the partial
- // snapshot generator.
- ASSERT(length == partial_snapshot_cache_length_);
- return partial_snapshot_cache_length_++;
-}
-
-
-int PartialSerializer::RootIndex(HeapObject* heap_object) {
+int Serializer::RootIndex(HeapObject* heap_object) {
for (int i = 0; i < Heap::kRootListLength; i++) {
Object* root = Heap::roots_address()[i];
if (root == heap_object) return i;
@@ -1016,136 +1007,67 @@
}
-// Encode the location of an already deserialized object in order to write its
-// location into a later object. We can encode the location as an offset from
-// the start of the deserialized objects or as an offset backwards from the
-// current allocation pointer.
-void Serializer::SerializeReferenceToPreviousObject(
- int space,
- int address,
+void Serializer::SerializeObject(
+ Object* o,
ReferenceRepresentation reference_representation) {
- int offset = CurrentAllocationAddress(space) - address;
- bool from_start = true;
- if (SpaceIsPaged(space)) {
- // For paged space it is simple to encode back from current allocation if
- // the object is on the same page as the current allocation pointer.
- if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
- (address >> kPageSizeBits)) {
- from_start = false;
- address = offset;
+ CHECK(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+ if (partial_) {
+ int root_index = RootIndex(heap_object);
+ if (root_index != kInvalidRootIndex) {
+ sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->PutInt(root_index, "root_index");
+ return;
}
- } else if (space == NEW_SPACE) {
- // For new space it is always simple to encode back from current allocation.
- if (offset < address) {
- from_start = false;
- address = offset;
- }
+ // All the symbols that the snapshot needs should be in the root table.
+ ASSERT(!heap_object->IsSymbol());
}
- // If we are actually dealing with real offsets (and not a numbering of
- // all objects) then we should shift out the bits that are always 0.
- if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
- // On some architectures references between code objects are encoded
- // specially (as relative offsets). Such references have their own
- // special tags to simplify the deserializer.
- if (reference_representation == CODE_TARGET_REPRESENTATION) {
- if (from_start) {
- sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
- sink_->PutInt(address, "address");
- } else {
- sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
- sink_->PutInt(address, "address");
+ if (SerializationAddressMapper::IsMapped(heap_object)) {
+ int space = SpaceOfAlreadySerializedObject(heap_object);
+ int address = SerializationAddressMapper::MappedTo(heap_object);
+ int offset = CurrentAllocationAddress(space) - address;
+ bool from_start = true;
+ if (SpaceIsPaged(space)) {
+ if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
+ (address >> kPageSizeBits)) {
+ from_start = false;
+ address = offset;
+ }
+ } else if (space == NEW_SPACE) {
+ if (offset < address) {
+ from_start = false;
+ address = offset;
+ }
}
- } else {
- // Regular absolute references.
- CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
- if (from_start) {
- // There are some common offsets that have their own specialized encoding.
-#define COMMON_REFS_CASE(tag, common_space, common_offset) \
- if (space == common_space && address == common_offset) { \
- sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
- } else /* NOLINT */
- COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
-#undef COMMON_REFS_CASE
- { /* NOLINT */
- sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
+ // If we are actually dealing with real offsets (and not a numbering of
+ // all objects) then we should shift out the bits that are always 0.
+ if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
+ if (reference_representation == CODE_TARGET_REPRESENTATION) {
+ if (from_start) {
+ sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
+ sink_->PutInt(address, "address");
+ } else {
+ sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
sink_->PutInt(address, "address");
}
} else {
- sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
- sink_->PutInt(address, "address");
+ CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
+ if (from_start) {
+#define COMMON_REFS_CASE(tag, common_space, common_offset) \
+ if (space == common_space && address == common_offset) { \
+ sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
+ } else /* NOLINT */
+ COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+#undef COMMON_REFS_CASE
+ { /* NOLINT */
+ sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
+ sink_->PutInt(address, "address");
+ }
+ } else {
+ sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ sink_->PutInt(address, "address");
+ }
}
- }
-}
-
-
-void StartupSerializer::SerializeObject(
- Object* o,
- ReferenceRepresentation reference_representation) {
- CHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
-
- if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfAlreadySerializedObject(heap_object);
- int address = address_mapper_.MappedTo(heap_object);
- SerializeReferenceToPreviousObject(space,
- address,
- reference_representation);
- } else {
- // Object has not yet been serialized. Serialize it here.
- ObjectSerializer object_serializer(this,
- heap_object,
- sink_,
- reference_representation);
- object_serializer.Serialize();
- }
-}
-
-
-void StartupSerializer::SerializeWeakReferences() {
- for (int i = partial_snapshot_cache_length_;
- i < kPartialSnapshotCacheCapacity;
- i++) {
- sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
- sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
- }
- Heap::IterateWeakRoots(this, VISIT_ALL);
-}
-
-
-void PartialSerializer::SerializeObject(
- Object* o,
- ReferenceRepresentation reference_representation) {
- CHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
-
- int root_index;
- if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
- sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
- sink_->PutInt(root_index, "root_index");
- return;
- }
-
- if (ShouldBeInThePartialSnapshotCache(heap_object)) {
- int cache_index = PartialSnapshotCacheIndex(heap_object);
- sink_->Put(PARTIAL_SNAPSHOT_CACHE_ENTRY, "PartialSnapshotCache");
- sink_->PutInt(cache_index, "partial_snapshot_cache_index");
- return;
- }
-
- // Pointers from the partial snapshot to the objects in the startup snapshot
- // should go through the root array or through the partial snapshot cache.
- // If this is not the case you may have to add something to the root array.
- ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
- // All the symbols that the partial snapshot needs should be either in the
- // root table or in the partial snapshot cache.
- ASSERT(!heap_object->IsSymbol());
-
- if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfAlreadySerializedObject(heap_object);
- int address = address_mapper_.MappedTo(heap_object);
- SerializeReferenceToPreviousObject(space,
- address,
- reference_representation);
} else {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this,
@@ -1157,6 +1079,7 @@
}
+
void Serializer::ObjectSerializer::Serialize() {
int space = Serializer::SpaceOfObject(object_);
int size = object_->Size();
@@ -1173,8 +1096,9 @@
// Mark this object as already serialized.
bool start_new_page;
- int offset = serializer_->Allocate(space, size, &start_new_page);
- serializer_->address_mapper()->AddMapping(object_, offset);
+ SerializationAddressMapper::Map(
+ object_,
+ serializer_->Allocate(space, size, &start_new_page));
if (start_new_page) {
sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
sink_->PutSection(space, "NewPageSpace");
diff --git a/src/serialize.h b/src/serialize.h
index ce3b006..8dd193f 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -147,7 +147,7 @@
return position_ == length_;
}
- int position() { return position_; }
+ const int position() { return position_; }
private:
const byte* data_;
@@ -185,14 +185,9 @@
f(14, 32) \
f(15, 36)
-// The Serializer/Deserializer class is a common superclass for Serializer and
-// Deserializer which is used to store common constants and methods used by
-// both.
-class SerializerDeserializer: public ObjectVisitor {
- public:
- static void Iterate(ObjectVisitor* visitor);
- static void SetSnapshotCacheSize(int size);
-
+// The SerDes class is a common superclass for Serializer and Deserializer
+// which is used to store common constants and methods used by both.
+class SerDes: public ObjectVisitor {
protected:
enum DataType {
RAW_DATA_SERIALIZATION = 0,
@@ -207,8 +202,7 @@
START_NEW_PAGE_SERIALIZATION = 37,
NATIVES_STRING_RESOURCE = 38,
ROOT_SERIALIZATION = 39,
- PARTIAL_SNAPSHOT_CACHE_ENTRY = 40,
- // Free: 41-47.
+ // Free: 40-47.
BACKREF_SERIALIZATION = 48,
// One per space, must be kSpaceMask aligned.
// Free: 57-63.
@@ -233,21 +227,17 @@
static inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
-
- static int partial_snapshot_cache_length_;
- static const int kPartialSnapshotCacheCapacity = 1024;
- static Object* partial_snapshot_cache_[];
};
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class Deserializer: public SerializerDeserializer {
+class Deserializer: public SerDes {
public:
// Create a deserializer from a snapshot byte source.
explicit Deserializer(SnapshotByteSource* source);
- virtual ~Deserializer();
+ virtual ~Deserializer() { }
// Deserialize the snapshot into an empty heap.
void Deserialize();
@@ -259,6 +249,8 @@
virtual void Synchronize(const char* tag);
#endif
+ static void TearDown();
+
private:
virtual void VisitPointers(Object** start, Object** end);
@@ -280,7 +272,7 @@
// (In large object space we are keeping track of individual objects
// rather than pages.) In new space we just need the address of the
// first object and the others will flow from that.
- List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
+ List<Address> pages_[SerDes::kNumberOfSpaces];
SnapshotByteSource* source_;
static ExternalReferenceDecoder* external_reference_decoder_;
@@ -308,62 +300,13 @@
};
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class SerializationAddressMapper {
- public:
- SerializationAddressMapper()
- : serialization_map_(new HashMap(&SerializationMatchFun)),
- no_allocation_(new AssertNoAllocation()) { }
-
- ~SerializationAddressMapper() {
- delete serialization_map_;
- delete no_allocation_;
- }
-
- bool IsMapped(HeapObject* obj) {
- return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
- }
-
- int MappedTo(HeapObject* obj) {
- ASSERT(IsMapped(obj));
- return static_cast<int>(reinterpret_cast<intptr_t>(
- serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
- }
-
- void AddMapping(HeapObject* obj, int to) {
- ASSERT(!IsMapped(obj));
- HashMap::Entry* entry =
- serialization_map_->Lookup(Key(obj), Hash(obj), true);
- entry->value = Value(to);
- }
-
- private:
- static bool SerializationMatchFun(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
- }
-
- static void* Key(HeapObject* obj) {
- return reinterpret_cast<void*>(obj->address());
- }
-
- static void* Value(int v) {
- return reinterpret_cast<void*>(v);
- }
-
- HashMap* serialization_map_;
- AssertNoAllocation* no_allocation_;
- DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
-};
-
-
-class Serializer : public SerializerDeserializer {
+class Serializer : public SerDes {
public:
explicit Serializer(SnapshotByteSink* sink);
+ // Serialize the current state of the heap.
+ void Serialize();
+ // Serialize a single object and the objects reachable from it.
+ void SerializePartial(Object** obj);
void VisitPointers(Object** start, Object** end);
// You can call this after serialization to find out how much space was used
// in each space.
@@ -384,20 +327,15 @@
// going on.
static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
static bool enabled() { return serialization_enabled_; }
- SerializationAddressMapper* address_mapper() { return &address_mapper_; }
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
- protected:
+ private:
enum ReferenceRepresentation {
TAGGED_REPRESENTATION, // A tagged object reference.
CODE_TARGET_REPRESENTATION // A reference to first instruction in target.
};
- static const int kInvalidRootIndex = -1;
- virtual int RootIndex(HeapObject* heap_object) = 0;
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
-
class ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer* serializer,
@@ -433,12 +371,7 @@
int bytes_processed_so_far_;
};
- virtual void SerializeObject(Object* o,
- ReferenceRepresentation representation) = 0;
- void SerializeReferenceToPreviousObject(
- int space,
- int address,
- ReferenceRepresentation reference_representation);
+ void SerializeObject(Object* o, ReferenceRepresentation representation);
void InitializeAllocators();
// This will return the space for an object. If the object is in large
// object space it may return kLargeCode or kLargeFixedArray in order
@@ -453,6 +386,8 @@
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
+ int RootIndex(HeapObject* heap_object);
+ static const int kInvalidRootIndex = -1;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
@@ -462,11 +397,11 @@
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
+ bool partial_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
static bool too_late_to_enable_now_;
int large_object_total_;
- SerializationAddressMapper address_mapper_;
friend class ObjectSerializer;
friend class Deserializer;
@@ -474,62 +409,6 @@
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
-
-class PartialSerializer : public Serializer {
- public:
- PartialSerializer(Serializer* startup_snapshot_serializer,
- SnapshotByteSink* sink)
- : Serializer(sink),
- startup_serializer_(startup_snapshot_serializer) {
- }
-
- // Serialize the objects reachable from a single object pointer.
- virtual void Serialize(Object** o);
- virtual void SerializeObject(Object* o,
- ReferenceRepresentation representation);
-
- protected:
- virtual int RootIndex(HeapObject* o);
- virtual int PartialSnapshotCacheIndex(HeapObject* o);
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
- return o->IsString() || o->IsSharedFunctionInfo();
- }
-
- private:
- Serializer* startup_serializer_;
- DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
-};
-
-
-class StartupSerializer : public Serializer {
- public:
- explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
- // Clear the cache of objects used by the partial snapshot. After the
- // strong roots have been serialized we can create a partial snapshot
- // which will repopulate the cache with objects neede by that partial
- // snapshot.
- partial_snapshot_cache_length_ = 0;
- }
- // Serialize the current state of the heap. The order is:
- // 1) Strong references.
- // 2) Partial snapshot cache.
- // 3) Weak references (eg the symbol table).
- virtual void SerializeStrongReferences();
- virtual void SerializeObject(Object* o,
- ReferenceRepresentation representation);
- void SerializeWeakReferences();
- void Serialize() {
- SerializeStrongReferences();
- SerializeWeakReferences();
- }
-
- private:
- virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
- return false;
- }
-};
-
} } // namespace v8::internal
#endif // V8_SERIALIZE_H_
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 1e81b8e..448c3fd 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -59,4 +59,42 @@
return false;
}
+
+class FileByteSink : public SnapshotByteSink {
+ public:
+ explicit FileByteSink(const char* snapshot_file) {
+ fp_ = OS::FOpen(snapshot_file, "wb");
+ if (fp_ == NULL) {
+ PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ }
+ virtual ~FileByteSink() {
+ if (fp_ != NULL) {
+ fclose(fp_);
+ }
+ }
+ virtual void Put(int byte, const char* description) {
+ if (fp_ != NULL) {
+ fputc(byte, fp_);
+ }
+ }
+ virtual int Position() {
+ return ftell(fp_);
+ }
+
+ private:
+ FILE* fp_;
+};
+
+
+bool Snapshot::WriteToFile(const char* snapshot_file) {
+ FileByteSink file(snapshot_file);
+ Serializer ser(&file);
+ ser.Serialize();
+ return true;
+}
+
+
+
} } // namespace v8::internal
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 4fd8a6c..847bb9a 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -36,6 +36,32 @@
// -----------------------------------------------------------------------------
+// HeapObjectIterator
+
+bool HeapObjectIterator::has_next() {
+ if (cur_addr_ < cur_limit_) {
+ return true; // common case
+ }
+ ASSERT(cur_addr_ == cur_limit_);
+ return HasNextInNextPage(); // slow path
+}
+
+
+HeapObject* HeapObjectIterator::next() {
+ ASSERT(has_next());
+
+ HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+ int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+ ASSERT_OBJECT_SIZE(obj_size);
+
+ cur_addr_ += obj_size;
+ ASSERT(cur_addr_ <= cur_limit_);
+
+ return obj;
+}
+
+
+// -----------------------------------------------------------------------------
// PageIterator
bool PageIterator::has_next() {
diff --git a/src/spaces.cc b/src/spaces.cc
index 2850900..cd09398 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -82,8 +82,8 @@
}
-HeapObject* HeapObjectIterator::FromNextPage() {
- if (cur_addr_ == end_addr_) return NULL;
+bool HeapObjectIterator::HasNextInNextPage() {
+ if (cur_addr_ == end_addr_) return false;
Page* cur_page = Page::FromAllocationTop(cur_addr_);
cur_page = cur_page->next_page();
@@ -92,12 +92,12 @@
cur_addr_ = cur_page->ObjectAreaStart();
cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
- if (cur_addr_ == end_addr_) return NULL;
+ if (cur_addr_ == end_addr_) return false;
ASSERT(cur_addr_ < cur_limit_);
#ifdef DEBUG
Verify();
#endif
- return FromCurrentPage();
+ return true;
}
@@ -1437,8 +1437,7 @@
void NewSpace::CollectStatistics() {
ClearHistograms();
SemiSpaceIterator it(this);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- RecordAllocation(obj);
+ while (it.has_next()) RecordAllocation(it.next());
}
@@ -2055,7 +2054,8 @@
// - by code comment
void PagedSpace::CollectCodeStatistics() {
HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+ while (obj_it.has_next()) {
+ HeapObject* obj = obj_it.next();
if (obj->IsCode()) {
Code* code = Code::cast(obj);
code_kind_statistics[code->kind()] += code->Size();
@@ -2157,8 +2157,7 @@
ClearHistograms();
HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
- CollectHistogramInfo(obj);
+ while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
ReportHistogram(true);
}
@@ -2394,8 +2393,7 @@
ClearHistograms();
HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
- CollectHistogramInfo(obj);
+ while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
ReportHistogram(false);
}
@@ -2464,8 +2462,7 @@
HeapObject* LargeObjectIterator::next() {
- if (current_ == NULL) return NULL;
-
+ ASSERT(has_next());
HeapObject* object = current_->GetObject();
current_ = current_->next();
return object;
@@ -2642,7 +2639,8 @@
ASSERT(Page::is_rset_in_use());
LargeObjectIterator it(this);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+ while (it.has_next()) {
+ HeapObject* object = it.next();
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays need remembered set support.
if (object->IsFixedArray()) {
@@ -2670,10 +2668,11 @@
30);
LargeObjectIterator it(this);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+ while (it.has_next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays can possibly contain pointers to
// the young generation.
+ HeapObject* object = it.next();
if (object->IsFixedArray()) {
// Iterate the normal page remembered set range.
Page* page = Page::FromAddress(object->address());
@@ -2719,7 +2718,9 @@
}
// Free the chunk.
- MarkCompactCollector::ReportDeleteIfNeeded(object);
+ if (object->IsCode()) {
+ LOG(CodeDeleteEvent(object->address()));
+ }
size_ -= static_cast<int>(chunk_size);
page_count_--;
MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
@@ -2799,8 +2800,8 @@
void LargeObjectSpace::Print() {
LargeObjectIterator it(this);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
- obj->Print();
+ while (it.has_next()) {
+ it.next()->Print();
}
}
@@ -2810,9 +2811,9 @@
int num_objects = 0;
ClearHistograms();
LargeObjectIterator it(this);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ while (it.has_next()) {
num_objects++;
- CollectHistogramInfo(obj);
+ CollectHistogramInfo(it.next());
}
PrintF(" number of objects %d\n", num_objects);
@@ -2822,7 +2823,8 @@
void LargeObjectSpace::CollectCodeStatistics() {
LargeObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+ while (obj_it.has_next()) {
+ HeapObject* obj = obj_it.next();
if (obj->IsCode()) {
Code* code = Code::cast(obj);
code_kind_statistics[code->kind()] += code->Size();
@@ -2833,7 +2835,8 @@
void LargeObjectSpace::PrintRSet() {
LargeObjectIterator it(this);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+ while (it.has_next()) {
+ HeapObject* object = it.next();
if (object->IsFixedArray()) {
Page* page = Page::FromAddress(object->address());
diff --git a/src/spaces.h b/src/spaces.h
index f7a0439..4786fb4 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -597,14 +597,15 @@
// Interface for heap object iterator to be implemented by all object space
// object iterators.
//
-// NOTE: The space specific object iterators also implements the own next()
-// method which is used to avoid using virtual functions
+// NOTE: The space specific object iterators also implements the own has_next()
+// and next() methods which are used to avoid using virtual functions
// iterating a specific space.
class ObjectIterator : public Malloced {
public:
virtual ~ObjectIterator() { }
+ virtual bool has_next_object() = 0;
virtual HeapObject* next_object() = 0;
};
@@ -644,11 +645,11 @@
Address start,
HeapObjectCallback size_func);
- inline HeapObject* next() {
- return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
- }
+ inline bool has_next();
+ inline HeapObject* next();
// implementation of ObjectIterator.
+ virtual bool has_next_object() { return has_next(); }
virtual HeapObject* next_object() { return next(); }
private:
@@ -658,21 +659,9 @@
HeapObjectCallback size_func_; // size function
Page* end_page_; // caches the page of the end address
- HeapObject* FromCurrentPage() {
- ASSERT(cur_addr_ < cur_limit_);
-
- HeapObject* obj = HeapObject::FromAddress(cur_addr_);
- int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
- ASSERT_OBJECT_SIZE(obj_size);
-
- cur_addr_ += obj_size;
- ASSERT(cur_addr_ <= cur_limit_);
-
- return obj;
- }
-
- // Slow path of next, goes into the next page.
- HeapObject* FromNextPage();
+ // Slow path of has_next, checks whether there are more objects in
+ // the next page.
+ bool HasNextInNextPage();
// Initializes fields.
void Initialize(Address start, Address end, HeapObjectCallback size_func);
@@ -993,18 +982,6 @@
return Page::FromAllocationTop(alloc_info.limit);
}
- int CountPagesToTop() {
- Page* p = Page::FromAllocationTop(allocation_info_.top);
- PageIterator it(this, PageIterator::ALL_PAGES);
- int counter = 1;
- while (it.has_next()) {
- if (it.next() == p) return counter;
- counter++;
- }
- UNREACHABLE();
- return -1;
- }
-
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS. Newly allocated
// pages are append to the last_page;
@@ -1217,8 +1194,10 @@
SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
SemiSpaceIterator(NewSpace* space, Address start);
+ bool has_next() {return current_ < limit_; }
+
HeapObject* next() {
- if (current_ == limit_) return NULL;
+ ASSERT(has_next());
HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1228,6 +1207,7 @@
}
// Implementation of the ObjectIterator functions.
+ virtual bool has_next_object() { return has_next(); }
virtual HeapObject* next_object() { return next(); }
private:
@@ -1773,11 +1753,8 @@
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
- MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
- : FixedSpace(max_capacity, id, Map::kSize, "map"),
- max_map_space_pages_(max_map_space_pages) {
- ASSERT(max_map_space_pages < kMaxMapPageIndex);
- }
+ MapSpace(int max_capacity, AllocationSpace id)
+ : FixedSpace(max_capacity, id, Map::kSize, "map") {}
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
@@ -1785,21 +1762,24 @@
// Given an index, returns the page address.
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
- static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
+ // Constants.
+ static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
// Are map pointers encodable into map word?
bool MapPointersEncodable() {
if (!FLAG_use_big_map_space) {
- ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
+ ASSERT(CountTotalPages() <= kMaxMapPageIndex);
return true;
}
- return CountPagesToTop() <= max_map_space_pages_;
+ int n_of_pages = Capacity() / Page::kObjectAreaSize;
+ ASSERT(n_of_pages == CountTotalPages());
+ return n_of_pages <= kMaxMapPageIndex;
}
// Should be called after forced sweep to find out if map space needs
// compaction.
bool NeedsCompaction(int live_maps) {
- return !MapPointersEncodable() && live_maps <= CompactionThreshold();
+ return !MapPointersEncodable() && live_maps <= kCompactionThreshold;
}
Address TopAfterCompaction(int live_maps) {
@@ -1858,14 +1838,10 @@
static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
// Do map space compaction if there is a page gap.
- int CompactionThreshold() {
- return kMapsPerPage * (max_map_space_pages_ - 1);
- }
-
- const int max_map_space_pages_;
+ static const int kCompactionThreshold = kMapsPerPage * (kMaxMapPageIndex - 1);
// An array of page start address in a map space.
- Address page_addresses_[kMaxMapPageIndex];
+ Address page_addresses_[kMaxMapPageIndex + 1];
public:
TRACK_MEMORY("MapSpace")
@@ -2060,9 +2036,11 @@
explicit LargeObjectIterator(LargeObjectSpace* space);
LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
+ bool has_next() { return current_ != NULL; }
HeapObject* next();
// implementation of ObjectIterator.
+ virtual bool has_next_object() { return has_next(); }
virtual HeapObject* next_object() { return next(); }
private:
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 7397c30..fb1e926 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -60,44 +60,40 @@
// lines) rather than one macro (of length about 80 lines) to work around
// this problem. Please avoid using recursive macros of this length when
// possible.
-#define STATS_COUNTER_LIST_1(SC) \
- /* Global Handle Count*/ \
- SC(global_handles, V8.GlobalHandles) \
- /* Mallocs from PCRE */ \
- SC(pcre_mallocs, V8.PcreMallocCount) \
- /* OS Memory allocated */ \
- SC(memory_allocated, V8.OsMemoryAllocated) \
- SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
- SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
- SC(alive_after_last_gc, V8.AliveAfterLastGC) \
- SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
- SC(objs_since_last_full, V8.ObjsSinceLastFull) \
- SC(symbol_table_capacity, V8.SymbolTableCapacity) \
- SC(number_of_symbols, V8.NumberOfSymbols) \
- SC(script_wrappers, V8.ScriptWrappers) \
- SC(call_initialize_stubs, V8.CallInitializeStubs) \
- SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
- SC(call_normal_stubs, V8.CallNormalStubs) \
- SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
- SC(arguments_adaptors, V8.ArgumentsAdaptors) \
- SC(compilation_cache_hits, V8.CompilationCacheHits) \
- SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- SC(regexp_cache_hits, V8.RegExpCacheHits) \
- SC(regexp_cache_misses, V8.RegExpCacheMisses) \
- /* Amount of evaled source code. */ \
- SC(total_eval_size, V8.TotalEvalSize) \
- /* Amount of loaded source code. */ \
- SC(total_load_size, V8.TotalLoadSize) \
- /* Amount of parsed source code. */ \
- SC(total_parse_size, V8.TotalParseSize) \
- /* Amount of source code skipped over using preparsing. */ \
- SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
- /* Amount of compiled source code. */ \
- SC(total_compile_size, V8.TotalCompileSize) \
- /* Amount of source code compiled with the old codegen. */ \
- SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize) \
- /* Amount of source code compiled with the full codegen. */ \
- SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)
+#define STATS_COUNTER_LIST_1(SC) \
+ /* Global Handle Count*/ \
+ SC(global_handles, V8.GlobalHandles) \
+ /* Mallocs from PCRE */ \
+ SC(pcre_mallocs, V8.PcreMallocCount) \
+ /* OS Memory allocated */ \
+ SC(memory_allocated, V8.OsMemoryAllocated) \
+ SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
+ SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
+ SC(alive_after_last_gc, V8.AliveAfterLastGC) \
+ SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
+ SC(objs_since_last_full, V8.ObjsSinceLastFull) \
+ SC(symbol_table_capacity, V8.SymbolTableCapacity) \
+ SC(number_of_symbols, V8.NumberOfSymbols) \
+ SC(script_wrappers, V8.ScriptWrappers) \
+ SC(call_initialize_stubs, V8.CallInitializeStubs) \
+ SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
+ SC(call_normal_stubs, V8.CallNormalStubs) \
+ SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
+ SC(arguments_adaptors, V8.ArgumentsAdaptors) \
+ SC(compilation_cache_hits, V8.CompilationCacheHits) \
+ SC(compilation_cache_misses, V8.CompilationCacheMisses) \
+ SC(regexp_cache_hits, V8.RegExpCacheHits) \
+ SC(regexp_cache_misses, V8.RegExpCacheMisses) \
+ /* Amount of evaled source code. */ \
+ SC(total_eval_size, V8.TotalEvalSize) \
+ /* Amount of loaded source code. */ \
+ SC(total_load_size, V8.TotalLoadSize) \
+ /* Amount of parsed source code. */ \
+ SC(total_parse_size, V8.TotalParseSize) \
+ /* Amount of source code skipped over using preparsing. */ \
+ SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
+ /* Amount of compiled source code. */ \
+ SC(total_compile_size, V8.TotalCompileSize)
#define STATS_COUNTER_LIST_2(SC) \
diff --git a/src/v8.cc b/src/v8.cc
index 3bec827..db570a4 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -146,6 +146,7 @@
Heap::TearDown();
Logger::TearDown();
+ Deserializer::TearDown();
is_running_ = false;
has_been_disposed_ = true;
diff --git a/src/v8natives.js b/src/v8natives.js
index 7475065..3dcf430 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -197,7 +197,7 @@
// ECMA-262 - 15.2.4.2
function ObjectToString() {
- return "[object " + %_ClassOf(ToObject(this)) + "]";
+ return "[object " + %_ClassOf(this) + "]";
}
@@ -209,7 +209,7 @@
// ECMA-262 - 15.2.4.4
function ObjectValueOf() {
- return ToObject(this);
+ return this;
}
diff --git a/src/version.cc b/src/version.cc
index ab2eab3..2724f6e 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
-#define MINOR_VERSION 1
-#define BUILD_NUMBER 0
+#define MINOR_VERSION 0
+#define BUILD_NUMBER 7
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 9cfe98a..4ac3933 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1537,40 +1537,6 @@
}
-void Assembler::repmovsb() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit(0xA4);
-}
-
-
-void Assembler::repmovsw() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66); // Operand size override.
- emit(0xF3);
- emit(0xA4);
-}
-
-
-void Assembler::repmovsl() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit(0xA5);
-}
-
-
-void Assembler::repmovsq() {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0xF3);
- emit_rex_64();
- emit(0xA5);
-}
-
-
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2113,16 +2079,6 @@
}
-void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(1, adr);
-}
-
-
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 5d17edf..1bddb2f 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -574,13 +574,6 @@
void movzxwq(Register dst, const Operand& src);
void movzxwl(Register dst, const Operand& src);
- // Repeated moves.
-
- void repmovsb();
- void repmovsw();
- void repmovsl();
- void repmovsq();
-
// New x64 instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
@@ -1059,7 +1052,6 @@
void fistp_d(const Operand& adr);
void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
void fabs();
void fchs();
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 1a0138f..0cf68eb 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -224,17 +224,20 @@
Register lhs,
Register rhs);
+ // Code pattern for loading a floating point value and converting it
+ // to a 32 bit integer. Input value must be either a smi or a heap number
+ // object.
+ // Returns operands as 32-bit sign extended integers in a general purpose
+ // registers.
+ static void LoadInt32Operand(MacroAssembler* masm,
+ const Operand& src,
+ Register dst);
+
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in rax, operand_2 in rdx; falls through on float or smi
// operands, jumps to the non_float label otherwise.
static void CheckNumberOperands(MacroAssembler* masm,
Label* non_float);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* operand_conversion_failure);
};
@@ -651,29 +654,20 @@
}
-void CodeGenerator::CallApplyLazy(Expression* applicand,
+void CodeGenerator::CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
- // Load applicand.apply onto the stack. This will usually
+ JumpTarget slow, done;
+
+ // Load the apply function onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
- frame()->Push(name);
- Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- __ nop();
- frame()->Push(&answer);
+ Reference ref(this, apply);
+ ref.GetValue();
+ ASSERT(ref.type() == Reference::NAMED);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
@@ -683,11 +677,6 @@
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
- // Contents of frame at this point:
- // Frame[0]: arguments object of the current function or the hole.
- // Frame[1]: receiver
- // Frame[2]: applicand.apply
- // Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
@@ -695,149 +684,143 @@
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
- { VirtualFrame::SpilledScope spilled_scope;
- Label slow, done;
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
- } else {
- __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
- probe.Unuse();
- __ j(not_equal, &slow);
- }
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ Cmp(probe.reg(), Factory::the_hole_value());
+ probe.Unuse();
+ slow.Branch(not_equal);
+ }
- if (try_lazy) {
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop(); // Can be called on a spilled frame.
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
+ if (try_lazy) {
+ JumpTarget build_args;
- // Check that the receiver really is a JavaScript object.
- __ movq(rax, Operand(rsp, 0));
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
+ // Get rid of the arguments object probe.
+ frame_->Drop();
+
+ // Before messing with the execution stack, we sync all
+ // elements. This is bound to happen anyway because we're
+ // about to call a function.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ // Check that the receiver really is a JavaScript object.
+ {
+ frame_->PushElementAt(0);
+ Result receiver = frame_->Pop();
+ receiver.ToRegister();
+ Condition is_smi = masm_->CheckSmi(receiver.reg());
+ build_args.Branch(is_smi);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ movq(rax, Operand(rsp, kPointerSize));
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
- __ j(not_equal, &build_args);
-
- // Check that applicand is a function.
- __ movq(rdi, Operand(rsp, 2 * kPointerSize));
- is_smi = masm_->CheckSmi(rdi);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ movq(rax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger32(rax, rax);
- __ movq(rcx, rax);
- __ cmpq(rax, Immediate(kArgumentsLimit));
- __ j(above, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // rcx is a small non-negative integer, due to the test above.
- __ testl(rcx, rcx);
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rax);
- // Stack now has 1 element:
- // rsp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
-
- // StoreArgumentsObject requires a correct frame, and may modify it.
- Result arguments_object = StoreArgumentsObject(false);
- frame_->SpillAll();
- arguments_object.ToRegister();
- frame_->EmitPush(arguments_object.reg());
- arguments_object.Unuse();
- // Stack and frame now have 4 elements.
- __ bind(&slow);
+ __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ build_args.Branch(below);
}
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+ // Verify that we're invoking Function.prototype.apply.
+ {
+ frame_->PushElementAt(1);
+ Result apply = frame_->Pop();
+ apply.ToRegister();
+ Condition is_smi = masm_->CheckSmi(apply.reg());
+ build_args.Branch(is_smi);
+ Result tmp = allocator_->Allocate();
+ __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
+ build_args.Branch(not_equal);
+ __ movq(tmp.reg(),
+ FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+ apply_code);
+ build_args.Branch(not_equal);
+ }
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(1); // Drop the receiver as well.
- res.ToRegister();
- frame_->EmitPush(res.reg());
- // Stack now has 1 element:
- // rsp[0]: result
- if (try_lazy) __ bind(&done);
- } // End of spilled scope.
- // Restore the context register after a call.
+ // Get the function receiver from the stack. Check that it
+ // really is a function.
+ __ movq(rdi, Operand(rsp, 2 * kPointerSize));
+ Condition is_smi = masm_->CheckSmi(rdi);
+ build_args.Branch(is_smi);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ build_args.Branch(not_equal);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ movq(rax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger32(rax, rax);
+ __ movq(rcx, rax);
+ __ cmpq(rax, Immediate(kArgumentsLimit));
+ build_args.Branch(above);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ __ testl(rcx, rcx);
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+
+ // Invoke the function. The virtual frame knows about the receiver
+ // so make sure to forget that explicitly.
+ __ bind(&invoke);
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ frame_->Forget(1);
+ Result result = allocator()->Allocate(rax);
+ frame_->SetElementAt(0, &result);
+ done.Jump();
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // Function.prototype.apply.
+ build_args.Bind();
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->Push(&arguments_object);
+ slow.Bind();
+ }
+
+ // Flip the apply function and the function to call on the stack, so
+ // the function looks like the receiver of the apply call. This way,
+ // the generic Function.prototype.apply implementation can deal with
+ // the call like it usually does.
+ Result a2 = frame_->Pop();
+ Result a1 = frame_->Pop();
+ Result ap = frame_->Pop();
+ Result fn = frame_->Pop();
+ frame_->Push(&ap);
+ frame_->Push(&fn);
+ frame_->Push(&a1);
+ frame_->Push(&a2);
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+ Result res = frame_->CallStub(&call_function, 3);
+ frame_->Push(&res);
+
+ // All done. Restore context register after call.
+ if (try_lazy) done.Bind();
frame_->RestoreContextRegister();
}
@@ -1834,20 +1817,28 @@
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2); // Drop the original and the copy of the element.
- } else {
- // If the reference has size zero then we can use the value below
- // the reference as if it were above the reference, instead of pushing
- // a new copy of it above the reference.
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(); // Drop the original of the element.
+ }
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
+ if (each.size() > 0) {
+ // It's safe to pop the value lying on top of the reference before
+ // unloading the reference itself (which preserves the top of stack,
+ // ie, now the topmost value of the non-zero sized reference), since
+ // we will discard the top of stack after unloading the reference
+ // anyway.
+ frame_->Drop();
}
}
}
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
+ // Discard the i'th entry pushed above or else the remainder of the
+ // reference, whichever is currently on top of the stack.
+ frame_->Drop();
+
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -2558,7 +2549,7 @@
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target(), node->is_compound());
+ { Reference target(this, node->target());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -2580,27 +2571,12 @@
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
- if (node->ends_initialization_block()) {
- // Add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
- if (target.type() == Reference::NAMED) {
- frame_->Dup();
- // Dup target receiver on stack.
- } else {
- ASSERT(target.type() == Reference::KEYED);
- Result temp = frame_->Pop();
- frame_->Dup();
- frame_->Push(&temp);
- }
- }
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
- } else { // Assignment is a compound assignment.
+ } else {
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
@@ -2626,7 +2602,6 @@
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
- UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -2638,15 +2613,13 @@
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::UNLOADED);
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
// End of initialization block. Revert to fast case. The
- // argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment.
- // Swap the receiver and the value of the assignment expression.
- Result lhs = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&lhs);
- frame_->Push(&receiver);
+ // argument to the runtime call is the receiver, which is the
+ // first value pushed as part of the reference, which is below
+ // the lhs value.
+ frame_->PushElementAt(target.size());
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
@@ -2814,7 +2787,7 @@
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
+ CallApplyLazy(property,
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
@@ -2846,24 +2819,16 @@
// -------------------------------------------
// Load the function to call from the property through a reference.
+ Reference ref(this, property);
+ ref.GetValue();
+
+ // Pass receiver to called function.
if (property->is_synthetic()) {
- Reference ref(this, property, false);
- ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
- Reference ref(this, property, false);
- ASSERT(ref.size() == 2);
- Result key = frame_->Pop();
- frame_->Dup(); // Duplicate the receiver.
- frame_->Push(&key);
- ref.GetValue();
- // Top of frame contains function to call, with duplicate copy of
- // receiver below it. Swap them.
- Result function = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&function);
- frame_->Push(&receiver);
+ // The reference's size is non-negative.
+ frame_->PushElementAt(ref.size());
}
// Call the function.
@@ -3047,9 +3012,6 @@
}
} else {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Load(node->expression());
switch (op) {
case Token::NOT:
@@ -3059,6 +3021,9 @@
break;
case Token::SUB: {
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
@@ -3077,10 +3042,10 @@
Condition is_smi = masm_->CheckSmi(operand.reg());
smi_label.Branch(is_smi, &operand);
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- Result answer = frame_->CallStub(&stub, &operand);
+ frame_->Push(&operand); // undo popping of TOS
+ Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
+ CALL_FUNCTION, 1);
continue_label.Jump(&answer);
-
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
@@ -3202,9 +3167,7 @@
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
- // A constant reference is not saved to, so the reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
+ { Reference target(this, node->expression());
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -3659,22 +3622,6 @@
}
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kBitFieldOffset));
- __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
- obj.Unuse();
- destination()->Split(not_zero);
-}
-
-
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -3979,8 +3926,7 @@
Load(args->at(1));
Load(args->at(2));
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
+ Result answer = frame_->CallRuntime(Runtime::kSubString, 3);
frame_->Push(&answer);
}
@@ -4293,19 +4239,14 @@
//------------------------------------------------------------------------------
// CodeGenerator implementation of variables, lookups, and stores.
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+ : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
+ cgen_->UnloadReference(this);
}
@@ -4355,7 +4296,6 @@
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
- ref->set_unloaded();
}
@@ -5074,6 +5014,31 @@
return;
}
+ // Set the flags based on the operation, type and loop nesting level.
+ GenericBinaryFlags flags;
+ switch (op) {
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ flags = (loop_nesting() > 0)
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
+ break;
+
+ default:
+ // By default only inline the Smi check code for likely smis if this
+ // operation is part of a loop.
+ flags = ((loop_nesting() > 0) && type->IsLikelySmi())
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
+ break;
+ }
+
Result right = frame_->Pop();
Result left = frame_->Pop();
@@ -5107,6 +5072,7 @@
bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+ bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
if (left_is_smi && right_is_smi) {
// Compute the constant result at compile time, and leave it on the frame.
@@ -5115,35 +5081,34 @@
if (FoldConstantSmis(op, left_int, right_int)) return;
}
- Result answer;
if (left_is_non_smi || right_is_non_smi) {
- // Go straight to the slow case, with no smi code
+ // Set flag so that we go straight to the slow case, with no smi code.
+ generate_no_smi_code = true;
+ } else if (right_is_smi) {
+ ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
+ return;
+ } else if (left_is_smi) {
+ ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
+ return;
+ }
+
+ if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
+ LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
frame_->Push(&left);
frame_->Push(&right);
- GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else if (right_is_smi) {
- answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode);
- } else if (left_is_smi) {
- answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
- } else {
- frame_->Push(&left);
- frame_->Push(&right);
- GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
- answer = frame_->CallStub(&stub, 2);
+ // If we know the arguments aren't smis, use the binary operation stub
+ // that does not check for the fast smi case.
+ // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
+ if (generate_no_smi_code) {
+ flags = NO_SMI_CODE_IN_STUB;
}
+ GenericBinaryOpStub stub(op, overwrite_mode, flags);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
}
- frame_->Push(&answer);
}
@@ -5224,12 +5189,12 @@
}
-Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> value,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode) {
+void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
@@ -5240,19 +5205,20 @@
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ LikelySmiBinaryOperation(op, &unsafe_operand, operand,
overwrite_mode);
} else {
- return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ LikelySmiBinaryOperation(op, operand, &unsafe_operand,
overwrite_mode);
}
+ ASSERT(!operand->is_valid());
+ return;
}
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
- Result answer;
switch (op) {
case Token::ADD: {
operand->ToRegister();
@@ -5273,15 +5239,15 @@
smi_value,
deferred->entry_label());
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
break;
}
case Token::SUB: {
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
@@ -5295,7 +5261,7 @@
smi_value,
deferred->entry_label());
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
}
break;
}
@@ -5303,8 +5269,8 @@
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -5322,21 +5288,21 @@
operand->reg(),
shift_value);
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
}
break;
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
operand->ToRegister();
- answer = allocator()->Allocate();
+ Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -5351,14 +5317,15 @@
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
+ frame_->Push(&answer);
}
break;
case Token::SHL:
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -5375,10 +5342,10 @@
overwrite_mode);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
} else {
// Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
+ Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -5393,6 +5360,7 @@
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
+ frame_->Push(&answer);
}
}
break;
@@ -5427,7 +5395,7 @@
}
}
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
break;
}
@@ -5455,7 +5423,7 @@
Smi::FromInt(int_value - 1));
}
deferred->BindExit();
- answer = *operand;
+ frame_->Push(operand);
break; // This break only applies if we generated code for MOD.
}
// Fall through if we did not find a power of 2 on the right hand side!
@@ -5464,24 +5432,22 @@
default: {
Result constant_operand(value);
if (reversed) {
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
- answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode);
+ LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
}
break;
}
}
- ASSERT(answer.is_valid());
- return answer;
+ ASSERT(!operand->is_valid());
}
-Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- Result answer;
+void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
// We need rax as the quotient register, rdx as the remainder
@@ -5563,17 +5529,16 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- answer = quotient;
+ frame_->Push("ient);
} else {
ASSERT(op == Token::MOD);
__ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
deferred->BindExit();
left->Unuse();
right->Unuse();
- answer = remainder;
+ frame_->Push(&remainder);
}
- ASSERT(answer.is_valid());
- return answer;
+ return;
}
// Special handling of shift operations because they use fixed
@@ -5594,7 +5559,7 @@
frame_->Spill(rcx);
// Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
+ Result answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
@@ -5633,8 +5598,8 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
+ frame_->Push(&answer);
+ return;
}
// Handle the other binary operations.
@@ -5643,7 +5608,7 @@
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
- answer = allocator_->Allocate();
+ Result answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Perform the smi tag check.
@@ -5697,122 +5662,7 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad(bool is_global) {
- Comment cmnt(masm_, "[ Load from keyed Property");
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (loop_nesting() > 0) {
- Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- // Use a fresh temporary for the index and later the loaded
- // value.
- Result index = allocator()->Allocate();
- ASSERT(index.is_valid());
-
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(index.reg(),
- receiver.reg(),
- key.reg(),
- is_global);
-
- // Check that the receiver is not a smi (only needed if this
- // is not a load from the global context) and that it has the
- // expected map.
- if (!is_global) {
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
- }
-
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching. Do not use
- // root array to load null_value, since it must be patched with
- // the expected receiver map.
- masm_->movq(kScratchRegister, Factory::null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- // Check that the key is a non-negative smi.
- __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
-
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ movq(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Factory::fixed_array_map());
- deferred->Branch(not_equal);
-
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ SmiToInteger32(index.reg(), key.reg());
- __ cmpl(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // The index register holds the un-smi-tagged key. It has been
- // zero-extended to 64-bits, so it can be used directly as index in the
- // operand below.
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is rax, the we can reuse that one because the value
- // coming from the deferred code will be in rax.
- Result value = index;
- __ movq(value.reg(),
- Operand(elements.reg(),
- index.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
- index.Unuse();
- __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- // Restore the receiver and key to the frame and push the
- // result on top of it.
- frame_->Push(&receiver);
- frame_->Push(&key);
- return value;
-
- } else {
- Comment cmnt(masm_, "[ Load from keyed Property");
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = frame_->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- return answer;
- }
+ frame_->Push(&answer);
}
@@ -5945,18 +5795,119 @@
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
- Result value = cgen_->EmitKeyedLoad(is_global);
- cgen_->frame()->Push(&value);
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+ if (cgen_->loop_nesting() > 0) {
+ Comment cmnt(masm, "[ Inlined load from keyed Property");
+
+ Result key = cgen_->frame()->Pop();
+ Result receiver = cgen_->frame()->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = cgen_->allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = cgen_->allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching.
+ masm->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is a non-negative smi.
+ __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ movq(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ SmiToInteger32(index.reg(), key.reg());
+ __ cmpl(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // The index register holds the un-smi-tagged key. It has been
+ // zero-extended to 64-bits, so it can be used directly as index in the
+ // operand below.
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is rax, the we can reuse that one because the value
+ // coming from the deferred code will be in rax.
+ Result value = index;
+ __ movq(value.reg(),
+ Operand(elements.reg(),
+ index.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&key);
+ cgen_->frame()->Push(&value);
+
+ } else {
+ Comment cmnt(masm, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ }
break;
}
default:
UNREACHABLE();
}
-
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
}
@@ -5993,9 +5944,6 @@
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
-
- ASSERT(persist_after_get_);
- // Do not unload the reference, because it is used in SetValue.
}
@@ -6124,7 +6072,6 @@
default:
UNREACHABLE();
}
- cgen_->UnloadReference(this);
}
@@ -6266,17 +6213,19 @@
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
+ // TODO(X64): This method is identical to the ia32 version.
+ // Either find a reason to change it, or move it somewhere where it can be
+ // shared. (Notice: It assumes that a Smi can fit in an int).
+
Object* answer_object = Heap::undefined_value();
switch (op) {
case Token::ADD:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
+ if (Smi::IsValid(left + right)) {
answer_object = Smi::FromInt(left + right);
}
break;
case Token::SUB:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
+ if (Smi::IsValid(left - right)) {
answer_object = Smi::FromInt(left - right);
}
break;
@@ -6350,216 +6299,56 @@
// End of CodeGenerator implementation.
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the
-// trashed registers.
-void IntegerConvert(MacroAssembler* masm,
- Register source,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
- Label done, right_exponent, normal_exponent;
- Register scratch = rbx;
- Register scratch2 = rdi;
- // Get exponent word.
- __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ movl(scratch2, scratch);
- __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmpl(scratch2, Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ subq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(rsp, 0));
- __ movl(rcx, Operand(rsp, 0)); // Load low word of answer into rcx.
- __ addq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Load rcx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(rcx, rcx);
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmpl(scratch2, Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmpl(scratch2, Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ movl(scratch2, scratch);
- __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, Immediate(big_shift_distance));
- // Get the second half of the double.
- __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(rcx, Immediate(32 - big_shift_distance));
- __ or_(rcx, scratch2);
- // We have the answer in rcx, but we may need to negate it.
- __ testl(scratch, scratch);
- __ j(positive, &done);
- __ neg(rcx);
- __ jmp(&done);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in rcx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ subl(scratch2, Immediate(zero_exponent));
- // rcx already has a Smi zero.
- __ j(less, &done);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
- __ movl(rcx, Immediate(30));
- __ subl(rcx, scratch2);
-
- __ bind(&right_exponent);
- // Here rcx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, Immediate(shift_distance));
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, Immediate(32 - shift_distance));
- __ or_(scratch2, scratch);
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to rcx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(rcx, rcx);
- __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative);
- __ movl(rcx, scratch2);
- __ jmp(&done);
- __ bind(&negative);
- __ subl(rcx, scratch2);
- __ bind(&done);
- }
-}
-
-
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
+ ASSERT(op_ == Token::SUB);
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
+ Label slow;
+ Label done;
+ Label try_float;
+ // Check whether the value is a smi.
+ __ JumpIfNotSmi(rax, &try_float);
- // Enter runtime system if the value of the smi is zero
- // to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue.
- __ SmiNeg(rax, rax, &done);
+ // Enter runtime system if the value of the smi is zero
+ // to make sure that we switch between 0 and -0.
+ // Also enter it if the value of the smi is Smi::kMinValue.
+ __ SmiNeg(rax, rax, &done);
- // Either zero or Smi::kMinValue, neither of which become a smi when
- // negated.
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(not_equal, &slow);
- __ Move(rax, Factory::minus_zero_value());
- __ jmp(&done);
+ // Either zero or Smi::kMinValue, neither of which become a smi when negated.
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
- // Try floating point case.
- __ bind(&try_float);
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(rcx);
- // Tag the result as a smi and we're done.
- ASSERT(kSmiTagSize == 1);
- __ Integer32ToSmi(rax, rcx);
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
+ // Enter runtime system.
__ bind(&slow);
__ pop(rcx); // pop return address
__ push(rax);
__ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ __ jmp(&done);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ Cmp(rdx, Factory::heap_number_map());
+ __ j(not_equal, &slow);
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(kScratchRegister, Immediate(0x01));
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (overwrite_) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
}
+
+ __ bind(&done);
+ __ StubReturn(1);
}
@@ -7508,6 +7297,15 @@
}
+void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
+ const Operand& src,
+ Register dst) {
+ // TODO(X64): Convert number operands to int32 values.
+ // Don't convert a Smi to a double first.
+ UNIMPLEMENTED();
+}
+
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
Label load_smi_1, load_smi_2, done_load_1, done;
__ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
@@ -7537,61 +7335,6 @@
}
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rdx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, use_sse3, conversion_failure);
- __ movl(rdx, rcx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rax, rax);
- __ movl(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rcx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, rax, use_sse3, conversion_failure);
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs) {
@@ -7832,7 +7575,7 @@
case Token::SHL:
case Token::SHR:
case Token::SAR:
- // Move the second operand into register rcx.
+ // Move the second operand into register ecx.
__ movq(rcx, rbx);
// Perform the operation.
switch (op_) {
@@ -7928,8 +7671,44 @@
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- Label skip_allocation, non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
+ FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
+ // TODO(X64): Don't convert a Smi to float and then back to int32
+ // afterwards.
+ FloatingPointHelper::LoadFloatOperands(masm);
+
+ Label skip_allocation, non_smi_result, operand_conversion_failure;
+
+ // Reserve space for converted numbers.
+ __ subq(rsp, Immediate(2 * kPointerSize));
+
+ if (use_sse3_) {
+ // Truncate the operands to 32-bit integers and check for
+ // exceptions in doing so.
+ CpuFeatures::Scope scope(SSE3);
+ __ fisttp_s(Operand(rsp, 0 * kPointerSize));
+ __ fisttp_s(Operand(rsp, 1 * kPointerSize));
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(1));
+ __ j(not_zero, &operand_conversion_failure);
+ } else {
+ // Check if right operand is int32.
+ __ fist_s(Operand(rsp, 0 * kPointerSize));
+ __ fild_s(Operand(rsp, 0 * kPointerSize));
+ __ FCmp();
+ __ j(not_zero, &operand_conversion_failure);
+ __ j(parity_even, &operand_conversion_failure);
+
+ // Check if left operand is int32.
+ __ fist_s(Operand(rsp, 1 * kPointerSize));
+ __ fild_s(Operand(rsp, 1 * kPointerSize));
+ __ FCmp();
+ __ j(not_zero, &operand_conversion_failure);
+ __ j(parity_even, &operand_conversion_failure);
+ }
+
+ // Get int32 operands and perform bitop.
+ __ pop(rcx);
+ __ pop(rax);
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
@@ -7977,6 +7756,22 @@
GenerateReturn(masm);
}
+ // Clear the FPU exception flag and reset the stack before calling
+ // the runtime system.
+ __ bind(&operand_conversion_failure);
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ if (use_sse3_) {
+ // If we've used the SSE3 instructions for truncating the
+ // floating point values to integers and it failed, we have a
+ // pending #IA exception. Clear it.
+ __ fnclex();
+ } else {
+ // The non-SSE3 variant does early bailout if the right
+ // operand isn't a 32-bit integer, so we may have a single
+ // value on the FPU stack we need to get rid of.
+ __ ffree(0);
+ }
+
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
@@ -8196,8 +7991,8 @@
// Both strings are non-empty.
// rax: first string
// rbx: length of first string
- // rcx: length of second string
- // rdx: second string
+ // ecx: length of second string
+ // edx: second string
// r8: instance type of first string if string check was performed above
// r9: instance type of first string if string check was performed above
Label string_add_flat_result;
@@ -8353,11 +8148,11 @@
}
-void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
+void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
@@ -8378,174 +8173,6 @@
}
-void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
- // boundary before starting rep movs. Copy remaining characters after running
- // rep movs.
- ASSERT(dest.is(rdi)); // rep movs destination
- ASSERT(src.is(rsi)); // rep movs source
- ASSERT(count.is(rcx)); // rep movs count
-
- // Nothing to do for zero characters.
- Label done;
- __ testq(count, count);
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- ASSERT_EQ(2, sizeof(uc16)); // NOLINT
- __ addq(count, count);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ testq(count, Immediate(~7));
- __ j(zero, &last_bytes);
-
- // Copy from edi to esi using rep movs instruction.
- __ movq(kScratchRegister, count);
- __ sar(count, Immediate(3)); // Number of doublewords to copy.
- __ repmovsq();
-
- // Find number of bytes left.
- __ movq(count, kScratchRegister);
- __ and_(count, Immediate(7));
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ testq(count, count);
- __ j(zero, &done);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(1));
- __ addq(dest, Immediate(1));
- __ subq(count, Immediate(1));
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: to
- // rsp[16]: from
- // rsp[24]: string
-
- const int kToOffset = 1 * kPointerSize;
- const int kFromOffset = kToOffset + kPointerSize;
- const int kStringOffset = kFromOffset + kPointerSize;
- const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
-
- // Make sure first argument is a string.
- __ movq(rax, Operand(rsp, kStringOffset));
- ASSERT_EQ(0, kSmiTag);
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: string
- // rbx: instance type
- // Calculate length of sub string using the smi values.
- __ movq(rcx, Operand(rsp, kToOffset));
- __ movq(rdx, Operand(rsp, kFromOffset));
- __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
-
- __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
- __ j(negative, &runtime);
- // Handle sub-strings of length 2 and less in the runtime system.
- __ SmiToInteger32(rcx, rcx);
- __ cmpl(rcx, Immediate(2));
- __ j(below_equal, &runtime);
-
- // rax: string
- // rbx: instance type
- // rcx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ and_(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
- __ cmpb(rbx, Immediate(kSeqStringTag | kAsciiStringTag));
- __ j(not_equal, &non_ascii_flat);
-
- // Allocate the result.
- __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- }
-
- // rax: result string
- // rcx: result length
- // rdx: original value of rsi
- // rdi: first character of result
- // rsi: character of sub string start
- GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, rdx); // Restore rsi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(kArgumentsSize);
-
- __ bind(&non_ascii_flat);
- // rax: string
- // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
- // rcx: result string length
- // Check for sequential two byte string
- __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- }
-
- // rax: result string
- // rcx: result length
- // rdx: original value of rsi
- // rdi: first character of result
- // rsi: character of sub string start
- GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, rdx); // Restore esi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(kArgumentsSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
-}
-
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
@@ -8614,6 +8241,7 @@
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
Label result_greater;
@@ -8623,11 +8251,13 @@
// Result is LESS.
__ Move(rax, Smi::FromInt(LESS));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
// Result is GREATER.
__ bind(&result_greater);
__ Move(rax, Smi::FromInt(GREATER));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
}
@@ -8657,7 +8287,6 @@
__ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
// Inline comparison of ascii strings.
- __ IncrementCounter(&Counters::string_compare_native, 1);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 72c8416..50bb023 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,70 +43,57 @@
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
+ enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen, Expression* expression);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
+ ASSERT(type_ == ILLEGAL);
type_ = value;
}
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
// The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
+ int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
+ // the expression stack, and it is left in place with its value above it.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
- // being read from again. The value of the reference may be invalidated,
+ // being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
+ // on the expression stack. The stored value is left in place (with the
+ // reference intact below it) to support chained assignments.
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
- bool persist_after_get_;
};
@@ -435,11 +422,6 @@
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
- // Load a property of an object, returning it in a Result.
- // The object and the property name are passed on the stack, and
- // not changed.
- Result EmitKeyedLoad(bool is_global);
-
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
@@ -464,20 +446,20 @@
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
- Result ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> constant_operand,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode);
+ void ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> constant_operand,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
- Result LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
+ void LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode);
void Comparison(Condition cc,
bool strict,
@@ -496,10 +478,10 @@
CallFunctionFlags flags,
int position);
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
+ // Use an optimized version of Function.prototype.apply that avoid
+ // allocating the arguments object and just copies the arguments
+ // from the stack.
+ void CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position);
@@ -532,7 +514,6 @@
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -629,8 +610,8 @@
friend class JumpTarget;
friend class Reference;
friend class Result;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
+ friend class FastCodeGenerator;
+ friend class CodeGenSelector;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -731,29 +712,6 @@
};
-class StringStubBase: public CodeStub {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
- // not supported.
- void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be rdi.
- Register src, // Must be rsi.
- Register count, // Must be rcx.
- bool ascii);
-};
-
-
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
@@ -761,7 +719,7 @@
};
-class StringAddStub: public StringStubBase {
+class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -773,23 +731,17 @@
void Generate(MacroAssembler* masm);
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register desc,
+ Register src,
+ Register count,
+ bool ascii);
+
// Should the stub check whether arguments are strings?
bool string_check_;
};
-class SubStringStub: public StringStubBase {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class StringCompareStub: public CodeStub {
public:
explicit StringCompareStub() {}
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index ce3aae8..0b43e76 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -114,10 +114,6 @@
{ 0x9E, UNSET_OP_ORDER, "sahf" },
{ 0x99, UNSET_OP_ORDER, "cdq" },
{ 0x9B, UNSET_OP_ORDER, "fwait" },
- { 0xA4, UNSET_OP_ORDER, "movs" },
- { 0xA5, UNSET_OP_ORDER, "movs" },
- { 0xA6, UNSET_OP_ORDER, "cmps" },
- { 0xA7, UNSET_OP_ORDER, "cmps" },
{ -1, UNSET_OP_ORDER, "" }
};
@@ -161,16 +157,6 @@
};
-enum Prefixes {
- ESCAPE_PREFIX = 0x0F,
- OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
- ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
- REPNE_PREFIX = 0xF2,
- REP_PREFIX = 0xF3,
- REPEQ_PREFIX = REP_PREFIX
-};
-
-
struct InstructionDesc {
const char* mnem;
InstructionType type;
@@ -1142,12 +1128,12 @@
// Scan for prefixes.
while (true) {
current = *data;
- if (current == OPERAND_SIZE_OVERRIDE_PREFIX) { // Group 3 prefix.
+ if (current == 0x66) { // Group 3 prefix.
operand_size_ = current;
} else if ((current & 0xF0) == 0x40) { // REX prefix.
setRex(current);
if (rex_w()) AppendToBuffer("REX.W ");
- } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix (0xF2 or 0xF3).
+ } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix.
group_1_prefix_ = current;
} else { // Not a prefix - an opcode.
break;
@@ -1159,17 +1145,7 @@
byte_size_operand_ = idesc.byte_size_operation;
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
- if (current >= 0xA4 && current <= 0xA7) {
- // String move or compare operations.
- if (group_1_prefix_ == REP_PREFIX) {
- // REP.
- AppendToBuffer("rep ");
- }
- if (rex_w()) AppendToBuffer("REX.W ");
- AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
- } else {
- AppendToBuffer("%s", idesc.mnem, operand_size_code());
- }
+ AppendToBuffer(idesc.mnem);
data++;
break;
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
similarity index 90%
rename from src/x64/full-codegen-x64.cc
rename to src/x64/fast-codegen-x64.cc
index a5085ca..0f28433 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -30,7 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "full-codegen.h"
+#include "fast-codegen.h"
#include "parser.h"
namespace v8 {
@@ -51,7 +51,7 @@
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-x64.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun) {
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
SetFunctionPosition(fun);
@@ -161,7 +161,7 @@
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FastCodeGenerator::EmitReturnSequence(int position) {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -200,7 +200,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -243,7 +243,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -285,7 +285,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -324,7 +324,7 @@
}
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+void FastCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -364,7 +364,7 @@
}
-void FullCodeGenerator::DropAndApply(int count,
+void FastCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
ASSERT(count > 0);
@@ -415,7 +415,7 @@
}
-void FullCodeGenerator::Apply(Expression::Context context,
+void FastCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
switch (context) {
@@ -480,7 +480,7 @@
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
+void FastCodeGenerator::DoTest(Expression::Context context) {
// The value to test is in the accumulator. If the value might be needed
// on the stack (value/test and test/value contexts with a stack location
// desired), then the value is already duplicated on the stack.
@@ -614,7 +614,7 @@
}
-MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -633,13 +633,13 @@
}
-void FullCodeGenerator::Move(Register destination, Slot* source) {
+void FastCodeGenerator::Move(Register destination, Slot* source) {
MemOperand location = EmitSlotSearch(source, destination);
__ movq(destination, location);
}
-void FullCodeGenerator::Move(Slot* dst,
+void FastCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
@@ -655,7 +655,7 @@
}
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
@@ -754,7 +754,7 @@
}
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(rsi); // The context is the first argument.
__ Push(pairs);
@@ -764,7 +764,7 @@
}
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -782,21 +782,17 @@
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), context_);
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->slot();
- Property* property = var->AsProperty();
-
- if (var->is_global() && !var->is_this()) {
+ Expression* rewrite = var->rewrite();
+ if (rewrite == NULL) {
+ ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
@@ -809,24 +805,34 @@
// is no test rax instruction here.
__ nop();
DropAndApply(1, context, rax);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- Comment cmnt(masm_, "Lookup slot");
- __ push(rsi); // Context.
- __ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- Apply(context, rax);
-
- } else if (slot != NULL) {
- Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
- ? "Context slot"
- : "Stack slot");
+ } else if (rewrite->AsSlot() != NULL) {
+ Slot* slot = rewrite->AsSlot();
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ }
+ }
Apply(context, slot);
-
} else {
- Comment cmnt(masm_, "Rewritten parameter");
+ Comment cmnt(masm_, "Variable rewritten to property");
+ // A variable has been rewritten into an explicit access to an object
+ // property.
+ Property* property = rewrite->AsProperty();
ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
+
+ // The only property expressions that can occur are of the form
+ // "slot[literal]".
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -858,7 +864,7 @@
}
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
@@ -884,7 +890,7 @@
}
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -954,7 +960,7 @@
}
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
@@ -1004,7 +1010,7 @@
}
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
@@ -1014,7 +1020,7 @@
}
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1022,7 +1028,7 @@
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FastCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
__ push(result_register());
GenericBinaryOpStub stub(op,
@@ -1033,16 +1039,11 @@
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+void FastCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
- // Three main cases: non-this global variables, lookup slots, and
- // all other types of slots. Left-hand-side parameters that rewrite
- // to explicit property accesses do not reach here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
- Slot* slot = var->slot();
if (var->is_global()) {
- ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in rax, variable name in
// rcx, and the global object on the stack.
@@ -1053,14 +1054,8 @@
// Overwrite the global object on the stack with the result if needed.
DropAndApply(1, context, rax);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- __ push(result_register()); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- Apply(context, rax);
-
} else if (var->slot() != NULL) {
+ Slot* slot = var->slot();
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER:
@@ -1083,7 +1078,6 @@
break;
}
Apply(context, result_register());
-
} else {
// Variables rewritten as properties are not treated as variables in
// assignments.
@@ -1092,7 +1086,7 @@
}
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
@@ -1127,7 +1121,7 @@
}
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// If the assignment starts a block of assignments to the same object,
@@ -1163,7 +1157,7 @@
}
-void FullCodeGenerator::VisitProperty(Property* expr) {
+void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
@@ -1183,7 +1177,7 @@
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
+void FastCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
@@ -1206,7 +1200,7 @@
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1224,7 +1218,7 @@
}
-void FullCodeGenerator::VisitCall(Call* expr) {
+void FastCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1298,7 +1292,7 @@
}
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -1333,7 +1327,7 @@
}
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1366,7 +1360,7 @@
}
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1470,27 +1464,13 @@
break;
}
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForValue(expr->expression(), kAccumulator);
- Label no_conversion;
- Condition is_smi;
- is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, &no_conversion);
- __ push(result_register());
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ bind(&no_conversion);
- Apply(context_, result_register());
- break;
- }
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
// Expression can only be a property, a global or a (parameter or local)
@@ -1509,7 +1489,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
Location saved_location = location_;
- location_ = kAccumulator;
+ location_ = kStack;
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
@@ -1525,16 +1505,11 @@
VisitForValue(prop->key(), kStack);
EmitKeyedPropertyLoad(prop);
}
+ __ push(rax);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- Condition is_smi;
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &no_conversion);
- __ push(rax);
+ // Convert to number.
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -1566,27 +1541,6 @@
}
}
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- if (loop_depth() > 0) {
- if (expr->op() == Token::INC) {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- }
- __ j(overflow, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &done);
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- }
- }
// Call stub for +1/-1.
__ push(rax);
__ Push(Smi::FromInt(1));
@@ -1594,7 +1548,6 @@
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
- __ bind(&done);
// Store the value returned in rax.
switch (assign_type) {
@@ -1648,7 +1601,7 @@
}
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
@@ -1683,7 +1636,7 @@
}
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
@@ -1795,25 +1748,25 @@
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, rax);
}
-Register FullCodeGenerator::result_register() { return rax; }
+Register FastCodeGenerator::result_register() { return rax; }
-Register FullCodeGenerator::context_register() { return rsi; }
+Register FastCodeGenerator::context_register() { return rsi; }
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value);
}
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
__ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
}
@@ -1822,7 +1775,7 @@
// Non-local control flow support.
-void FullCodeGenerator::EnterFinallyBlock() {
+void FastCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
@@ -1836,7 +1789,7 @@
}
-void FullCodeGenerator::ExitFinallyBlock() {
+void FastCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore result register from stack.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index e293247..457ece5 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -271,10 +271,11 @@
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
__ j(below, &slow);
-
- // Check bit field.
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks. The map is already in rdx.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(kSlowCaseBitFieldMask));
+ Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the key is a smi.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index b06b8c8..65a408b 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -581,20 +581,6 @@
}
-Condition MacroAssembler::CheckBothPositiveSmi(Register first,
- Register second) {
- if (first.is(second)) {
- return CheckPositiveSmi(first);
- }
- movl(kScratchRegister, first);
- orl(kScratchRegister, second);
- rol(kScratchRegister, Immediate(1));
- testl(kScratchRegister, Immediate(0x03));
- return zero;
-}
-
-
-
Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
@@ -674,17 +660,7 @@
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
- if (on_not_smi_result == NULL) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (dst.is(src1)) {
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- }
- Assert(no_overflow, "Smi substraction onverflow");
- } else if (dst.is(src1)) {
+ if (dst.is(src1)) {
subq(dst, src2);
Label smi_result;
j(no_overflow, &smi_result);
@@ -1316,14 +1292,6 @@
}
-void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
- Label* on_not_both_smi) {
- Condition both_smi = CheckBothPositiveSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
Register scratch1,
@@ -1343,7 +1311,8 @@
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringBits =
+ kNotStringTag | kSeqStringTag | kAsciiStringTag;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -1351,7 +1320,7 @@
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ Immediate(kFlatAsciiStringBits + (kFlatAsciiStringBits << 3)));
j(not_equal, on_fail);
}
@@ -1549,17 +1518,6 @@
}
-Condition MacroAssembler::IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- movzxbq(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- ASSERT(kNotStringTag != 0);
- testb(instance_type, Immediate(kIsNotStringMask));
- return zero;
-}
-
-
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Label* miss) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 8d4a8f2..ce2848c 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -207,9 +207,6 @@
// Are both values tagged smis.
Condition CheckBothSmi(Register first, Register second);
- // Are both values tagged smis.
- Condition CheckBothPositiveSmi(Register first, Register second);
-
// Are either value a tagged smi.
Condition CheckEitherSmi(Register first, Register second);
@@ -251,10 +248,6 @@
// Jump if either or both register are not smi values.
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
- // Jump if either or both register are not positive smi values.
- void JumpIfNotBothPositiveSmi(Register src1, Register src2,
- Label* on_not_both_smi);
-
// Operations on tagged smi values.
// Smis represent a subset of integers. The subset is always equivalent to
@@ -459,15 +452,6 @@
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
- // Check if the object in register heap_object is a string. Afterwards the
- // register map contains the object map and the register instance_type
- // contains the instance_type. The registers map and instance_type can be the
- // same in which case it contains the instance type afterwards. Either of the
- // registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type);
-
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 6142ce3..75bbf3e 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -71,6 +71,8 @@
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
+ * - at_start (if 1, we are starting at the start of the
+ * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
@@ -80,8 +82,6 @@
* - backup of callee save registers (rbx, possibly rsi and rdi).
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
- * - At start of string (if 1, we are starting at the start of the
- * string, otherwise 0)
* - register 0 rbp[-n] (Only positions must be stored in the first
* - register 1 rbp[-n-8] num_saved_registers_ registers)
* - ...
@@ -661,7 +661,7 @@
ASSERT_EQ(kInputStart, -3 * kPointerSize);
ASSERT_EQ(kInputEnd, -4 * kPointerSize);
ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kStackHighEnd, -6 * kPointerSize);
+ ASSERT_EQ(kAtStart, -6 * kPointerSize);
__ push(rdi);
__ push(rsi);
__ push(rdx);
@@ -672,7 +672,6 @@
__ push(rbx); // Callee-save
#endif
__ push(Immediate(0)); // Make room for "input start - 1" constant.
- __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -717,15 +716,6 @@
// Store this value in a local variable, for use when clearing
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
-
- // Determine whether the start index is zero, that is at the start of the
- // string, and store that value in a local variable.
- __ movq(rbx, Operand(rbp, kStartIndex));
- __ xor_(rcx, rcx); // setcc only operates on cl (lower byte of rcx).
- __ testq(rbx, rbx);
- __ setcc(zero, rcx); // 1 if 0 (start of string), 0 if positive.
- __ movq(Operand(rbp, kAtStart), rcx);
-
if (num_saved_registers_ > 0) {
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index c17f2b8..694cba0 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -138,7 +138,9 @@
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ // AtStart is passed as 32 bit int (values 0 or 1).
+ static const int kAtStart = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kAtStart + kPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kPointerSize;
#else
@@ -150,8 +152,9 @@
static const int kInputStart = kStartIndex - kPointerSize;
static const int kInputEnd = kInputStart - kPointerSize;
static const int kRegisterOutput = kInputEnd - kPointerSize;
- static const int kStackHighEnd = kRegisterOutput - kPointerSize;
- static const int kDirectCall = kFrameAlign;
+ static const int kAtStart = kRegisterOutput - kPointerSize;
+ static const int kStackHighEnd = kFrameAlign;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
#endif
#ifdef _WIN64
@@ -165,7 +168,7 @@
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kStackHighEnd - kPointerSize;
+ static const int kBackup_rbx = kAtStart - kPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -173,10 +176,9 @@
// the frame in GetCode.
static const int kInputStartMinusOne =
kLastCalleeSaveRegister - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index a0fc3cb..015ba13 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -54,8 +54,8 @@
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)