Merge V8 at r7668: Initial merge by Git.
Change-Id: I1703c8b4f5c63052451a22cf3fb878abc9a0ec75
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index bd76d9a..3e19a45 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -223,9 +223,9 @@
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(this);
+ StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(this);
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -234,7 +234,7 @@
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(this);
+ StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index be34df9..fd8e8b5 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -44,11 +44,12 @@
namespace v8 {
namespace internal {
-CpuFeatures::CpuFeatures()
- : supported_(0),
- enabled_(0),
- found_by_runtime_probing_(0) {
-}
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
#ifdef __arm__
static uint64_t CpuFeaturesImpliedByCompiler() {
@@ -58,48 +59,52 @@
#endif // def CAN_USE_ARMV7_INSTRUCTIONS
// If the compiler is allowed to use VFP then we can use VFP too in our code
// generation even when generating snapshots. This won't work for cross
- // compilation.
+ // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
- answer |= 1u << VFP3;
+ answer |= 1u << VFP3 | 1u << ARMv7;
#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
#ifdef CAN_USE_VFP_INSTRUCTIONS
- answer |= 1u << VFP3;
+ answer |= 1u << VFP3 | 1u << ARMv7;
#endif // def CAN_USE_VFP_INSTRUCTIONS
return answer;
}
#endif // def __arm__
-void CpuFeatures::Probe(bool portable) {
+void CpuFeatures::Probe() {
+ ASSERT(!initialized_);
+#ifdef DEBUG
+ initialized_ = true;
+#endif
#ifndef __arm__
- // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+ // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
+ // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3;
+ supported_ |= 1u << VFP3 | 1u << ARMv7;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
}
#else // def __arm__
- if (portable && Serializer::enabled()) {
+ if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
return; // No features if we might serialize.
}
if (OS::ArmCpuHasFeature(VFP3)) {
- // This implementation also sets the VFP flags if
- // runtime detection of VFP returns true.
- supported_ |= 1u << VFP3;
- found_by_runtime_probing_ |= 1u << VFP3;
+ // This implementation also sets the VFP flags if runtime
+ // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
+ // 0406B, page A1-6.
+ supported_ |= 1u << VFP3 | 1u << ARMv7;
+ found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
}
if (OS::ArmCpuHasFeature(ARMv7)) {
supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
-
- if (!portable) found_by_runtime_probing_ = 0;
#endif
}
@@ -268,8 +273,8 @@
static const int kMinimalBufferSize = 4*KB;
-Assembler::Assembler(void* buffer, int buffer_size)
- : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
positions_recorder_(this),
allow_peephole_optimization_(false),
emit_debug_code_(FLAG_debug_code) {
@@ -715,7 +720,7 @@
*instr ^= kMovMvnFlip;
return true;
} else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
- if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
if (imm32 < 0x10000) {
*instr ^= kMovwLeaveCCFlip;
*instr |= EncodeMovwImmediate(imm32);
@@ -779,7 +784,7 @@
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (must_use_constant_pool() ||
- !Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ !CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
} else {
@@ -822,7 +827,7 @@
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() ||
- !isolate()->cpu_features()->IsSupported(ARMv7)) {
+ !CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
} else {
@@ -1265,7 +1270,7 @@
const Operand& src,
Condition cond) {
// v6 and above.
- ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.rm_.is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@@ -1293,7 +1298,7 @@
int width,
Condition cond) {
// v7 and above.
- ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1313,7 +1318,7 @@
int width,
Condition cond) {
// v7 and above.
- ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1328,7 +1333,7 @@
// bfc dst, #lsb, #width
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
// v7 and above.
- ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1347,7 +1352,7 @@
int width,
Condition cond) {
// v7 and above.
- ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1619,7 +1624,7 @@
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
- ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
+ ASSERT(CpuFeatures::IsEnabled(ARMv7));
ASSERT(src.rm().is(no_reg));
ASSERT(!dst1.is(lr)); // r14.
ASSERT_EQ(0, dst1.code() % 2);
@@ -1634,7 +1639,7 @@
ASSERT(!src1.is(lr)); // r14.
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
- ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
+ ASSERT(CpuFeatures::IsEnabled(ARMv7));
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
@@ -1821,45 +1826,6 @@
}
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& dst,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // Unindexed addressing.
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
- coproc, CRegister crd,
- const MemOperand& dst,
- LFlag l) { // v5 and above
- stc(coproc, crd, dst, l, kSpecialCondition);
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- stc(coproc, crd, rn, option, l, kSpecialCondition);
-}
-
-
// Support for VFP.
void Assembler::vldr(const DwVfpRegister dst,
@@ -1870,7 +1836,7 @@
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1011(11-8) | offset
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1912,7 +1878,7 @@
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1956,7 +1922,7 @@
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
// Vsrc(15-12) | 1011(11-8) | (offset/4)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1997,7 +1963,7 @@
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -2032,6 +1998,88 @@
}
+void Assembler::vldm(BlockAddrMode am,
+ Register base,
+ DwVfpRegister first,
+ DwVfpRegister last,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-626.
+ // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
+ // first(15-12) | 1010(11-8) | (count * 2)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT_LE(first.code(), last.code());
+ ASSERT(am == ia || am == ia_w || am == db_w);
+ ASSERT(!base.is(pc));
+
+ int sd, d;
+ first.split_code(&sd, &d);
+ int count = last.code() - first.code() + 1;
+ emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
+ 0xB*B8 | count*2);
+}
+
+
+void Assembler::vstm(BlockAddrMode am,
+ Register base,
+ DwVfpRegister first,
+ DwVfpRegister last,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
+ // first(15-12) | 1011(11-8) | (count * 2)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT_LE(first.code(), last.code());
+ ASSERT(am == ia || am == ia_w || am == db_w);
+ ASSERT(!base.is(pc));
+
+ int sd, d;
+ first.split_code(&sd, &d);
+ int count = last.code() - first.code() + 1;
+ emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
+ 0xB*B8 | count*2);
+}
+
+void Assembler::vldm(BlockAddrMode am,
+ Register base,
+ SwVfpRegister first,
+ SwVfpRegister last,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-626.
+ // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
+ // first(15-12) | 1010(11-8) | (count/2)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT_LE(first.code(), last.code());
+ ASSERT(am == ia || am == ia_w || am == db_w);
+ ASSERT(!base.is(pc));
+
+ int sd, d;
+ first.split_code(&sd, &d);
+ int count = last.code() - first.code() + 1;
+ emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
+ 0xA*B8 | count);
+}
+
+
+void Assembler::vstm(BlockAddrMode am,
+ Register base,
+ SwVfpRegister first,
+ SwVfpRegister last,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
+ // first(15-12) | 1011(11-8) | (count/2)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT_LE(first.code(), last.code());
+ ASSERT(am == ia || am == ia_w || am == db_w);
+ ASSERT(!base.is(pc));
+
+ int sd, d;
+ first.split_code(&sd, &d);
+ int count = last.code() - first.code() + 1;
+ emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
+ 0xA*B8 | count);
+}
+
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
@@ -2043,7 +2091,7 @@
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
- ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
// VMOV can accept an immediate of the form:
//
@@ -2096,7 +2144,7 @@
const Condition cond) {
// Dd = immediate
// Instruction details available in ARM DDI 0406B, A8-640.
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
uint32_t enc;
if (FitsVMOVDoubleImmediate(imm, &enc)) {
@@ -2133,7 +2181,7 @@
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@@ -2146,7 +2194,7 @@
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
}
@@ -2160,7 +2208,7 @@
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src1.is(pc) && !src2.is(pc));
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
src1.code()*B12 | 0xB*B8 | B4 | dst.code());
@@ -2175,7 +2223,7 @@
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
dst1.code()*B12 | 0xB*B8 | B4 | src.code());
@@ -2189,7 +2237,7 @@
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
@@ -2204,7 +2252,7 @@
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
@@ -2329,7 +2377,7 @@
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@@ -2338,7 +2386,7 @@
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@@ -2347,7 +2395,7 @@
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@@ -2356,7 +2404,7 @@
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2365,7 +2413,7 @@
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2374,7 +2422,7 @@
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@@ -2383,7 +2431,7 @@
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2413,7 +2461,7 @@
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@@ -2428,7 +2476,7 @@
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@@ -2443,7 +2491,7 @@
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@@ -2458,7 +2506,7 @@
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@@ -2471,7 +2519,7 @@
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@@ -2484,7 +2532,7 @@
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
src1.code()*B12 | 0x5*B9 | B8 | B6);
@@ -2495,7 +2543,7 @@
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2505,7 +2553,7 @@
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2516,7 +2564,7 @@
const Condition cond) {
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
}
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 91e6244..9050c2c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
@@ -468,58 +468,97 @@
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
- void Probe(bool portable);
+ static void Probe();
// Check whether a feature is supported by the target CPU.
- bool IsSupported(CpuFeature f) const {
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
return (supported_ & (1u << f)) != 0;
}
+#ifdef DEBUG
// Check whether a feature is currently enabled.
- bool IsEnabled(CpuFeature f) const {
- return (enabled_ & (1u << f)) != 0;
+ static bool IsEnabled(CpuFeature f) {
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+ return (enabled & (1u << f)) != 0;
}
+#endif
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(CpuFeature f)
- : cpu_features_(Isolate::Current()->cpu_features()),
- isolate_(Isolate::Current()) {
- ASSERT(cpu_features_->IsSupported(f));
+ explicit Scope(CpuFeature f) {
+ unsigned mask = 1u << f;
+ ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
- (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
- old_enabled_ = cpu_features_->enabled_;
- cpu_features_->enabled_ |= 1u << f;
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
}
~Scope() {
- ASSERT_EQ(Isolate::Current(), isolate_);
- cpu_features_->enabled_ = old_enabled_;
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
}
private:
- unsigned old_enabled_;
- CpuFeatures* cpu_features_;
Isolate* isolate_;
+ unsigned old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
+ class TryForceFeatureScope BASE_EMBEDDED {
+ public:
+ explicit TryForceFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_) {
+ if (CanForce()) {
+ CpuFeatures::supported_ |= (1u << f);
+ }
+ }
+
+ ~TryForceFeatureScope() {
+ if (CanForce()) {
+ CpuFeatures::supported_ = old_supported_;
+ }
+ }
+
+ private:
+ static bool CanForce() {
+ // It's only safe to temporarily force support of CPU features
+ // when there's only a single isolate, which is guaranteed when
+ // the serializer is enabled.
+ return Serializer::enabled();
+ }
+
+ const unsigned old_supported_;
+ };
+
private:
- CpuFeatures();
-
- unsigned supported_;
- unsigned enabled_;
- unsigned found_by_runtime_probing_;
-
- friend class Isolate;
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+ static unsigned supported_;
+ static unsigned found_by_runtime_probing_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -564,7 +603,7 @@
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
// Overrides the default provided by FLAG_debug_code.
@@ -908,16 +947,6 @@
void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
- void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short, Condition cond = al);
- void stc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short); // v5 and above
- void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
// Support for VFP.
// All these APIs support S0 to S31 and D0 to D15.
// Currently these APIs do not support extended D registers, i.e, D16 to D31.
@@ -956,6 +985,30 @@
const MemOperand& dst,
const Condition cond = al);
+ void vldm(BlockAddrMode am,
+ Register base,
+ DwVfpRegister first,
+ DwVfpRegister last,
+ Condition cond = al);
+
+ void vstm(BlockAddrMode am,
+ Register base,
+ DwVfpRegister first,
+ DwVfpRegister last,
+ Condition cond = al);
+
+ void vldm(BlockAddrMode am,
+ Register base,
+ SwVfpRegister first,
+ SwVfpRegister last,
+ Condition cond = al);
+
+ void vstm(BlockAddrMode am,
+ Register base,
+ SwVfpRegister first,
+ SwVfpRegister last,
+ Condition cond = al);
+
void vmov(const DwVfpRegister dst,
double imm,
const Condition cond = al);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index f401cfd..5235dd3 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -1173,9 +1173,11 @@
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Probe the CPU to set the supported features, because this builtin
- // may be called before the initialization performs CPU setup.
- masm->isolate()->cpu_features()->Probe(false);
+ CpuFeatures::TryForceFeatureScope scope(VFP3);
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
+ return;
+ }
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 441adfe..d66daea 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -308,13 +308,9 @@
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
Register exponent = result1_;
Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
+
Label not_special;
// Convert from Smi to integer.
__ mov(source_, Operand(source_, ASR, kSmiTagSize));
@@ -502,7 +498,7 @@
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
__ vmov(d7.high(), scratch1);
@@ -521,7 +517,7 @@
ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from r1 to r1 and r0 in double format. r9 is scratch.
+ // Write Smi from r1 to r1 and r0 in double format.
__ mov(scratch1, Operand(r1));
ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
@@ -570,7 +566,7 @@
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+ if (CpuFeatures::IsSupported(VFP3) &&
destination == kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
// Load the double from tagged HeapNumber to double register.
@@ -585,7 +581,7 @@
// Handle loading a double from a smi.
__ bind(&is_smi);
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi to double using VFP instructions.
__ SmiUntag(scratch1, object);
@@ -676,7 +672,7 @@
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(single_scratch, scratch1);
__ vcvt_f64_s32(double_dst, single_scratch);
@@ -686,51 +682,51 @@
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst1 | dst2 |
+ // | dst2 | dst1 |
// | s | exp | mantissa |
// Check for zero.
__ cmp(scratch1, Operand(0));
- __ mov(dst1, scratch1);
__ mov(dst2, scratch1);
+ __ mov(dst1, scratch1);
__ b(eq, &done);
// Preload the sign of the value.
- __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(dst2, scratch1, Operand(HeapNumber::kSignMask), SetCC);
// Get the absolute value of the object (as an unsigned integer).
__ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
// Get mantisssa[51:20].
// Get the position of the first set bit.
- __ CountLeadingZeros(dst2, scratch1, scratch2);
- __ rsb(dst2, dst2, Operand(31));
+ __ CountLeadingZeros(dst1, scratch1, scratch2);
+ __ rsb(dst1, dst1, Operand(31));
// Set the exponent.
- __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst1, scratch2, scratch2,
+ __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+ __ Bfi(dst2, scratch2, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ mov(scratch2, Operand(1));
- __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
+ __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst1));
- __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
// Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ b(mi, &fewer_than_20_useful_bits);
// Set the higher 20 bits of the mantissa.
- __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
+ __ orr(dst2, dst2, Operand(scratch1, LSR, scratch2));
__ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst2, Operand(scratch1, LSL, scratch2));
+ __ mov(dst1, Operand(scratch1, LSL, scratch2));
__ b(&done);
__ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
__ mov(scratch2, Operand(scratch1, LSL, scratch2));
- __ orr(dst1, dst1, scratch2);
- // Set dst2 to 0.
- __ mov(dst2, Operand(0));
+ __ orr(dst2, dst2, scratch2);
+ // Set dst1 to 0.
+ __ mov(dst1, Operand(0));
}
__ b(&done);
@@ -744,7 +740,7 @@
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
@@ -818,7 +814,7 @@
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
SwVfpRegister single_scratch = double_scratch.low();
// Load the double value.
@@ -951,18 +947,10 @@
// Call C routine that may not cause GC or other trouble.
__ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
4);
- // Store answer in the overwritable heap number.
-#if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from heap_number_result.
- __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
-#else
- // Double returned in registers 0 and 1.
+ // Store answer in the overwritable heap number. Double returned in
+ // registers r0 and r1.
__ Strd(r0, r1, FieldMemOperand(heap_number_result,
HeapNumber::kValueOffset));
-#endif
// Place heap_number_result in r0 and return to the pushed return address.
__ mov(r0, Operand(heap_number_result));
__ pop(pc);
@@ -1153,7 +1141,7 @@
}
// Lhs is a smi, rhs is a number.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
// Convert lhs to a double in d7.
CpuFeatures::Scope scope(VFP3);
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
@@ -1193,7 +1181,7 @@
}
// Rhs is a smi, lhs is a heap number.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load the double from lhs, tagged HeapNumber r1, to d7.
__ sub(r7, lhs, Operand(kHeapObjectTag));
@@ -1373,7 +1361,7 @@
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
@@ -1463,7 +1451,7 @@
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- if (isolate->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ CheckMap(object,
scratch1,
@@ -1597,7 +1585,7 @@
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
Isolate* isolate = masm->isolate();
- if (isolate->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
__ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
Label no_nan;
@@ -1707,7 +1695,7 @@
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses VFP3 instructions.
- ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
Label false_result;
Label not_heap_number;
@@ -1780,1064 +1768,6 @@
}
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow). We branch into this code (to the not_smi label) if
-// the operands were not both Smi. The operands are in r0 and r1. In order
-// to call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in r0 and r1 (for the
-// value in r1) and r2 and r3 (for the value in r0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(
- MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin) {
- Label slow, slow_reverse, do_the_call;
- bool use_fp_registers =
- Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
- Token::MOD != op_;
-
- ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
- Register heap_number_map = r6;
-
- if (ShouldGenerateSmiCode()) {
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Smi-smi case (overflow).
- // Since both are Smis there is no heap number to overwrite, so allocate.
- // The new heap number is in r5. r3 and r7 are scratch.
- __ AllocateHeapNumber(
- r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
-
- // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
- // using registers d7 and d6 for the double values.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from rhs to r3 and r2 in double format. r9 is scratch.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r9);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r9);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- __ jmp(&do_the_call); // Tail call. No return.
- }
-
- // We branch here if at least one of r0 and r1 is not a Smi.
- __ bind(not_smi);
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // After this point we have the left hand side in r1 and the right hand side
- // in r0.
- if (lhs.is(r0)) {
- __ Swap(r0, r1, ip);
- }
-
- // The type transition also calculates the answer.
- bool generate_code_to_calculate_answer = true;
-
- if (ShouldGenerateFPCode()) {
- // DIV has neither SmiSmi fast code nor specialized slow code.
- // So don't try to patch a DIV Stub.
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- GenerateTypeTransition(masm); // Tail call.
- generate_code_to_calculate_answer = false;
- break;
-
- case Token::DIV:
- // DIV has neither SmiSmi fast code nor specialized slow code.
- // So don't try to patch a DIV Stub.
- break;
-
- default:
- break;
- }
- }
-
- if (generate_code_to_calculate_answer) {
- Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
- if (mode_ == NO_OVERWRITE) {
- // In the case where there is no chance of an overwritable float we may
- // as well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
- }
-
- // Move r0 to a double in r2-r3.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(r5, Operand(r0)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r0 to d7.
- __ sub(r7, r0, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that second double is in r2 and r3.
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r0);
- __ bind(&r0_is_smi);
- if (mode_ == OVERWRITE_RIGHT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
-
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r0 to double in d7.
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- }
- } else {
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r4);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-
- // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
- // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
- Label r1_is_not_smi;
- if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) &&
- HasSmiSmiFastPath()) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &r1_is_not_smi);
- GenerateTypeTransition(masm); // Tail call.
- }
-
- __ bind(&finished_loading_r0);
-
- // Move r1 to a double in r0-r1.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
- __ bind(&r1_is_not_smi);
- __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(r5, Operand(r1)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r1 to d6.
- __ sub(r7, r1, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that first double is in r0 and r1.
- __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r1);
- __ bind(&r1_is_smi);
- if (mode_ == OVERWRITE_LEFT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
-
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r1 to double in d6.
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r9);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-
- __ bind(&finished_loading_r1);
- }
-
- if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
- __ bind(&do_the_call);
- // If we are inlining the operation using VFP3 instructions for
- // add, subtract, multiply, or divide, the arguments are in d6 and d7.
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // ARMv7 VFP3 instructions to implement
- // double precision, add, subtract, multiply, divide.
-
- if (Token::MUL == op_) {
- __ vmul(d5, d6, d7);
- } else if (Token::DIV == op_) {
- __ vdiv(d5, d6, d7);
- } else if (Token::ADD == op_) {
- __ vadd(d5, d6, d7);
- } else if (Token::SUB == op_) {
- __ vsub(d5, d6, d7);
- } else {
- UNREACHABLE();
- }
- __ sub(r0, r5, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // If we did not inline the operation, then the arguments are in:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
-
- __ push(lr); // For later.
- __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(
- ExternalReference::double_fp_operation(op_, masm->isolate()), 4);
- // Store answer in the overwritable heap number.
- #if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(r4, r5, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
- #else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
- #endif
- __ mov(r0, Operand(r5));
- // And we are done.
- __ pop(pc);
- }
- }
- }
-
- if (!generate_code_to_calculate_answer &&
- !slow_reverse.is_linked() &&
- !slow.is_linked()) {
- return;
- }
-
- if (lhs.is(r0)) {
- __ b(&slow);
- __ bind(&slow_reverse);
- __ Swap(r0, r1, ip);
- }
-
- heap_number_map = no_reg; // Don't use this any more from here on.
-
- // We jump to here if something goes wrong (one param is not a number of any
- // sort or new-space allocation fails).
- __ bind(&slow);
-
- // Push arguments to the stack
- __ Push(r1, r0);
-
- if (Token::ADD == op_) {
- // Test for string arguments before calling runtime.
- // r1 : first argument
- // r0 : second argument
- // sp[0] : second argument
- // sp[4] : first argument
-
- Label not_strings, not_string1, string1, string1_smi2;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, ¬_string1);
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, ¬_string1);
-
- // First argument is a a string, test second.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &string1_smi2);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, r0, r2, r4, r5, r6, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ str(r2, MemOperand(sp, 0));
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
-
- // First argument was not a string, test second.
- __ bind(¬_string1);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, ¬_strings);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, ¬_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
-
- __ bind(¬_strings);
- }
-
- __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
-}
-
-
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value. We truncate towards zero as required
-// by the ES spec. If this is the case we do the bitwise op and see if the
-// result is a Smi. If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs and rhs. On exit the answer is in r0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- Label slow, result_not_a_smi;
- Label rhs_is_smi, lhs_is_smi;
- Label done_checking_rhs, done_checking_lhs;
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
- __ jmp(&done_checking_lhs);
- __ bind(&lhs_is_smi);
- __ mov(r3, Operand(lhs, ASR, 1));
- __ bind(&done_checking_lhs);
-
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
- __ jmp(&done_checking_rhs);
- __ bind(&rhs_is_smi);
- __ mov(r2, Operand(rhs, ASR, 1));
- __ bind(&done_checking_rhs);
-
- ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
-
- // r0 and r1: Original operands (Smi or heap numbers).
- // r2 and r3: Signed int32 operands.
- switch (op_) {
- case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
- case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
- case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of writing
- // the register as an unsigned int so we go to slow case if we hit this
- // case.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, &slow);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default: UNREACHABLE();
- }
- // check that the *signed* result fits in a smi
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ mov(r0, Operand(r2, LSL, kSmiTagSize));
- __ Ret();
-
- Label have_to_allocate, got_a_heap_number;
- __ bind(&result_not_a_smi);
- switch (mode_) {
- case OVERWRITE_RIGHT: {
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(rhs));
- break;
- }
- case OVERWRITE_LEFT: {
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(lhs));
- break;
- }
- case NO_OVERWRITE: {
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
- default: break;
- }
- __ bind(&got_a_heap_number);
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r2);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
-
- if (mode_ != NO_OVERWRITE) {
- __ bind(&have_to_allocate);
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- __ jmp(&got_a_heap_number);
- }
-
- // If all else failed then we go to the runtime system.
- __ bind(&slow);
- __ Push(lhs, rhs); // Restore stack.
- switch (op_) {
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-
-
-// This function takes the known int in a register for the cases
-// where it doesn't know a good trick, and may deliver
-// a result that needs shifting.
-static void MultiplyByKnownIntInStub(
- MacroAssembler* masm,
- Register result,
- Register source,
- Register known_int_register, // Smi tagged.
- int known_int,
- int* required_shift) { // Including Smi tag shift
- switch (known_int) {
- case 3:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 1;
- break;
- case 5:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 1;
- break;
- case 6:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 2;
- break;
- case 7:
- __ rsb(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 9:
- __ add(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 10:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 2;
- break;
- default:
- ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
- __ mul(result, source, known_int_register);
- *required_shift = 0;
- }
-}
-
-
-// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
-// trick. See http://en.wikipedia.org/wiki/Divisibility_rule
-// Takes the sum of the digits base (mask + 1) repeatedly until we have a
-// number from 0 to mask. On exit the 'eq' condition flags are set if the
-// answer is exactly the mask.
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
-}
-
-
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ bic(scratch, lhs, Operand(mask));
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift1));
- __ add(lhs, lhs, Operand(scratch, LSR, shift2));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
-}
-
-
-// Splits the number into two halves (bottom half has shift bits). The top
-// half is subtracted from the bottom half. If the result is negative then
-// rhs is added.
-void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs) {
- int mask = (1 << shift) - 1;
- __ and_(ip, lhs, Operand(mask));
- __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
- __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
-}
-
-
-void IntegerModStub::ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator) {
- int limit = denominator;
- while (limit * 2 <= max) limit *= 2;
- while (limit >= denominator) {
- __ cmp(lhs, Operand(limit));
- __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
- limit >>= 1;
- }
-}
-
-
-void IntegerModStub::ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits) {
- __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
- __ Ret();
-}
-
-
-// See comment for class.
-void IntegerModStub::Generate(MacroAssembler* masm) {
- __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
- __ bic(odd_number_, odd_number_, Operand(1));
- __ mov(odd_number_, Operand(odd_number_, LSL, 1));
- // We now have (odd_number_ - 1) * 2 in the register.
- // Build a switch out of branches instead of data because it avoids
- // having to teach the assembler about intra-code-object pointers
- // that are not in relative branch instructions.
- Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
- Label mod21, mod23, mod25;
- { Assembler::BlockConstPoolScope block_const_pool(masm);
- __ add(pc, pc, Operand(odd_number_));
- // When you read pc it is always 8 ahead, but when you write it you always
- // write the actual value. So we put in two nops to take up the slack.
- __ nop();
- __ nop();
- __ b(&mod3);
- __ b(&mod5);
- __ b(&mod7);
- __ b(&mod9);
- __ b(&mod11);
- __ b(&mod13);
- __ b(&mod15);
- __ b(&mod17);
- __ b(&mod19);
- __ b(&mod21);
- __ b(&mod23);
- __ b(&mod25);
- }
-
- // For each denominator we find a multiple that is almost only ones
- // when expressed in binary. Then we do the sum-of-digits trick for
- // that number. If the multiple is not 1 then we have to do a little
- // more work afterwards to get the answer into the 0-denominator-1
- // range.
- DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11.
- __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111.
- ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111.
- __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111.
- ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111.
- ModReduce(masm, lhs_, 0x3f, 11);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111.
- ModReduce(masm, lhs_, 0xff, 13);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111.
- __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111.
- ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111.
- ModReduce(masm, lhs_, 0xff, 19);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111.
- ModReduce(masm, lhs_, 0x3f, 21);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101.
- ModReduce(masm, lhs_, 0xff, 23);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101.
- ModReduce(masm, lhs_, 0x7f, 25);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- // lhs_ : x
- // rhs_ : y
- // r0 : result
-
- Register result = r0;
- Register lhs = lhs_;
- Register rhs = rhs_;
-
- // This code can't cope with other register allocations yet.
- ASSERT(result.is(r0) &&
- ((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0))));
-
- Register smi_test_reg = r7;
- Register scratch = r9;
-
- // All ops need to know whether we are dealing with two Smis. Set up
- // smi_test_reg to tell us that.
- if (ShouldGenerateSmiCode()) {
- __ orr(smi_test_reg, lhs, Operand(rhs));
- }
-
- switch (op_) {
- case Token::ADD: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, ¬_smi);
- __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
- }
- HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD);
- break;
- }
-
- case Token::SUB: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, ¬_smi);
- if (lhs.is(r1)) {
- __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
- } else {
- __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
- }
- }
- HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB);
- break;
- }
-
- case Token::MUL: {
- Label not_smi, slow;
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ b(ne, ¬_smi);
- // Remove tag from one operand (but keep sign), so that result is Smi.
- __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
- // Do multiplication
- // scratch = lower 32 bits of ip * lhs.
- __ smull(scratch, scratch2, lhs, ip);
- // Go slow on overflows (overflow bit is not set).
- __ mov(ip, Operand(scratch, ASR, 31));
- // No overflow if higher 33 bits are identical.
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &slow);
- // Go slow on zero result to handle -0.
- __ tst(scratch, Operand(scratch));
- __ mov(result, Operand(scratch), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, rhs, Operand(lhs), SetCC);
- __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
- // Slow case. We fall through here if we multiplied a negative number
- // with 0, because that would mean we should produce -0.
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL);
- break;
- }
-
- case Token::DIV:
- case Token::MOD: {
- Label not_smi;
- if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
- Label lhs_is_unsuitable;
- __ JumpIfNotSmi(lhs, ¬_smi);
- if (IsPowerOf2(constant_rhs_)) {
- if (op_ == Token::MOD) {
- __ and_(rhs,
- lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
- SetCC);
- // We now have the answer, but if the input was negative we also
- // have the sign bit. Our work is done if the result is
- // positive or zero:
- if (!rhs.is(r0)) {
- __ mov(r0, rhs, LeaveCC, pl);
- }
- __ Ret(pl);
- // A mod of a negative left hand side must return a negative number.
- // Unfortunately if the answer is 0 then we must return -0. And we
- // already optimistically trashed rhs so we may need to restore it.
- __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
- // Next two instructions are conditional on the answer being -0.
- __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
- __ b(eq, &lhs_is_unsuitable);
- // We need to subtract the dividend. Eg. -3 % 4 == -3.
- __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
- } else {
- ASSERT(op_ == Token::DIV);
- __ tst(lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
- __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder.
- int shift = 0;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- d >>= 1;
- shift++;
- }
- __ mov(r0, Operand(lhs, LSR, shift));
- __ bic(r0, r0, Operand(kSmiTagMask));
- }
- } else {
- // Not a power of 2.
- __ tst(lhs, Operand(0x80000000u));
- __ b(ne, &lhs_is_unsuitable);
- // Find a fixed point reciprocal of the divisor so we can divide by
- // multiplying.
- double divisor = 1.0 / constant_rhs_;
- int shift = 32;
- double scale = 4294967296.0; // 1 << 32.
- uint32_t mul;
- // Maximise the precision of the fixed point reciprocal.
- while (true) {
- mul = static_cast<uint32_t>(scale * divisor);
- if (mul >= 0x7fffffff) break;
- scale *= 2.0;
- shift++;
- }
- mul++;
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ mov(scratch2, Operand(mul));
- __ umull(scratch, scratch2, scratch2, lhs);
- __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
- // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
- // rhs is still the known rhs. rhs is Smi tagged.
- // lhs is still the unkown lhs. lhs is Smi tagged.
- int required_scratch_shift = 0; // Including the Smi tag shift of 1.
- // scratch = scratch2 * rhs.
- MultiplyByKnownIntInStub(masm,
- scratch,
- scratch2,
- rhs,
- constant_rhs_,
- &required_scratch_shift);
- // scratch << required_scratch_shift is now the Smi tagged rhs *
- // (lhs / rhs) where / indicates integer division.
- if (op_ == Token::DIV) {
- __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
- __ b(ne, &lhs_is_unsuitable); // There was a remainder.
- __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
- } else {
- ASSERT(op_ == Token::MOD);
- __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
- }
- }
- __ Ret();
- __ bind(&lhs_is_unsuitable);
- } else if (op_ == Token::MOD &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS) {
- // Do generate a bit of smi code for modulus even though the default for
- // modulus is not to do it, but as the ARM processor has no coprocessor
- // support for modulus checking for smis makes sense. We can handle
- // 1 to 25 times any power of 2. This covers over half the numbers from
- // 1 to 100 including all of the first 25. (Actually the constants < 10
- // are handled above by reciprocal multiplication. We only get here for
- // those cases if the right hand side is not a constant or for cases
- // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
- // stub.)
- Label slow;
- Label not_power_of_2;
- ASSERT(!ShouldGenerateSmiCode());
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- // Check for two positive smis.
- __ orr(smi_test_reg, lhs, Operand(rhs));
- __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
- __ b(ne, &slow);
- // Check that rhs is a power of two and not zero.
- Register mask_bits = r3;
- __ sub(scratch, rhs, Operand(1), SetCC);
- __ b(mi, &slow);
- __ and_(mask_bits, rhs, Operand(scratch), SetCC);
- __ b(ne, ¬_power_of_2);
- // Calculate power of two modulus.
- __ and_(result, lhs, Operand(scratch));
- __ Ret();
-
- __ bind(¬_power_of_2);
- __ eor(scratch, scratch, Operand(mask_bits));
- // At least two bits are set in the modulus. The high one(s) are in
- // mask_bits and the low one is scratch + 1.
- __ and_(mask_bits, scratch, Operand(lhs));
- Register shift_distance = scratch;
- scratch = no_reg;
-
- // The rhs consists of a power of 2 multiplied by some odd number.
- // The power-of-2 part we handle by putting the corresponding bits
- // from the lhs in the mask_bits register, and the power in the
- // shift_distance register. Shift distance is never 0 due to Smi
- // tagging.
- __ CountLeadingZeros(r4, shift_distance, shift_distance);
- __ rsb(shift_distance, r4, Operand(32));
-
- // Now we need to find out what the odd number is. The last bit is
- // always 1.
- Register odd_number = r4;
- __ mov(odd_number, Operand(rhs, LSR, shift_distance));
- __ cmp(odd_number, Operand(25));
- __ b(gt, &slow);
-
- IntegerModStub stub(
- result, shift_distance, odd_number, mask_bits, lhs, r5);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call.
-
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(
- masm,
- ¬_smi,
- lhs,
- rhs,
- op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label slow;
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &slow);
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- switch (op_) {
- case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
- case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
- case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(result, Operand(lhs, ASR, scratch2));
- // Smi tag result.
- __ bic(result, result, Operand(kSmiTagMask));
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch, Operand(0xc0000000));
- __ b(ne, &slow);
- // Smi tag result.
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch, Operand(0x40000000), SetCC);
- __ b(mi, &slow);
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- default: UNREACHABLE();
- }
- __ Ret();
- __ bind(&slow);
- HandleNonSmiBitwiseOp(masm, lhs, rhs);
- break;
- }
-
- default: UNREACHABLE();
- }
- // This code should be unreachable.
- __ stop("Unreachable");
-
- // Generate an unreachable reference to the DEFAULT stub so that it can be
- // found at the end of this stub when clearing ICs at GC.
- // TODO(kaznacheev): Check performance impact and get rid of this.
- if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
- GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
- __ CallStub(&uninit);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
- __ Push(r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
@@ -2887,6 +1817,9 @@
case TRBinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
+ case TRBinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
case TRBinaryOpIC::STRING:
GenerateStringStub(masm);
break;
@@ -3077,7 +2010,7 @@
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
- Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+ CpuFeatures::IsSupported(VFP3) &&
op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
@@ -3132,6 +2065,9 @@
op_,
result,
scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
}
break;
}
@@ -3190,7 +2126,7 @@
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, not_numbers);
@@ -3229,7 +2165,7 @@
// result.
__ mov(r0, Operand(r5));
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP3);
@@ -3261,6 +2197,7 @@
// requested the code falls through. If number allocation is requested but a
// heap number cannot be allocated the code jumps to the lable gc_required.
void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Label not_smis;
@@ -3282,7 +2219,7 @@
// If heap number results are possible generate the result in an allocated
// heap number.
if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, NULL, gc_required);
+ GenerateFPOperation(masm, true, use_runtime, gc_required);
}
__ bind(¬_smis);
}
@@ -3294,11 +2231,14 @@
if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
result_type_ == TRBinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ GenerateSmiCode(masm,
+ &call_runtime,
+ &call_runtime,
+ ALLOW_HEAPNUMBER_RESULTS);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -3320,6 +2260,36 @@
}
+void TypeRecordingBinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(operands_type_ == TRBinaryOpIC::BOTH_STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = r1;
+ Register right = r0;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
@@ -3358,7 +2328,7 @@
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
FloatingPointHelper::Destination destination =
- Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+ CpuFeatures::IsSupported(VFP3) &&
op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
@@ -3485,6 +2455,9 @@
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(
masm, op_, heap_number_result, scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
}
break;
@@ -3545,7 +2518,7 @@
// to return a heap number if we can.
// The non vfp3 code does not support this special case, so jump to
// runtime if we don't support it.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi,
(result_type_ <= TRBinaryOpIC::INT32) ? &transition
: &return_heap_number);
@@ -3571,16 +2544,16 @@
__ Ret();
__ bind(&return_heap_number);
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ heap_number_result = r5;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
if (op_ != Token::SHR) {
// Convert the result to a floating point value.
__ vmov(double_scratch.low(), r2);
@@ -3599,6 +2572,7 @@
} else {
// Tail call that writes the int32 in r2 to the heap number in r0, using
// r3 as scratch. r0 is preserved and returned.
+ __ mov(r0, r5);
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
__ TailCallStub(&stub);
}
@@ -3665,7 +2639,7 @@
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
@@ -3806,7 +2780,7 @@
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (tagged) {
// Argument is a number and is on stack and in r0.
@@ -3894,7 +2868,7 @@
__ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
}
__ Ret();
- } // if (Isolate::Current()->cpu_features()->IsSupported(VFP3))
+ } // if (CpuFeatures::IsSupported(VFP3))
__ bind(&calculate);
if (tagged) {
@@ -3903,7 +2877,7 @@
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1);
} else {
- if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) UNREACHABLE();
+ if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
CpuFeatures::Scope scope(VFP3);
Label no_update;
@@ -4102,7 +3076,7 @@
__ mov(r0, Operand(r2));
}
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r1);
@@ -4143,7 +3117,7 @@
void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime;
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label base_not_smi;
@@ -4737,7 +3711,7 @@
__ b(ne, &slow);
// Null is not instance of anything.
- __ cmp(scratch, Operand(FACTORY->null_value()));
+ __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
__ b(ne, &object_not_null);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -5235,7 +4209,7 @@
__ bind(&failure);
// For failure and exception return null.
- __ mov(r0, Operand(FACTORY->null_value()));
+ __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -5306,6 +4280,8 @@
const int kMaxInlineLength = 100;
Label slowcase;
Label done;
+ Factory* factory = masm->isolate()->factory();
+
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
@@ -5340,7 +4316,7 @@
// Interleave operations for better latency.
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(FACTORY->empty_fixed_array()));
+ __ mov(r4, Operand(factory->empty_fixed_array()));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
@@ -5361,13 +4337,13 @@
// r5: Number of elements in array, untagged.
// Set map.
- __ mov(r2, Operand(FACTORY->fixed_array_map()));
+ __ mov(r2, Operand(factory->fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with the-hole.
- __ mov(r2, Operand(FACTORY->the_hole_value()));
+ __ mov(r2, Operand(factory->the_hole_value()));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Fill fixed array elements with hole.
// r0: JSArray, tagged.
@@ -6807,7 +5783,7 @@
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load left and right operand
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 1dde255..0bb0025 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -71,162 +71,6 @@
};
-class GenericBinaryOpStub : public CodeStub {
- public:
- static const int kUnknownIntValue = -1;
-
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- Register lhs,
- Register rhs,
- int constant_rhs = kUnknownIntValue)
- : op_(op),
- mode_(mode),
- lhs_(lhs),
- rhs_(rhs),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
- runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
- name_(NULL) { }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- lhs_(LhsRegister(RegisterBits::decode(key))),
- rhs_(RhsRegister(RegisterBits::decode(key))),
- constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
- runtime_operands_type_(type_info),
- name_(NULL) { }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- Register lhs_;
- Register rhs_;
- int constant_rhs_;
- bool specialized_on_rhs_;
- BinaryOpIC::TypeInfo runtime_operands_type_;
- char* name_;
-
- static const int kMaxKnownRhs = 0x40000000;
- static const int kKnownRhsKeyBits = 6;
-
- // Minor key encoding in 17 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class TypeInfoBits: public BitField<int, 8, 3> {};
- class RegisterBits: public BitField<bool, 11, 1> {};
- class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt())
- | TypeInfoBits::encode(runtime_operands_type_)
- | RegisterBits::encode(lhs_.is(r0));
- }
-
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs);
- void HandleBinaryOpSlowCases(MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
- }
-
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
- return key;
- }
-
- int KnownBitsForMinorKey(int key) {
- if (!key) return 0;
- if (key <= 11) return key - 1;
- int d = 1;
- while (key != 12) {
- key--;
- d <<= 1;
- }
- return d;
- }
-
- Register LhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r0 : r1;
- }
-
- Register RhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r1 : r0;
- }
-
- bool HasSmiSmiFastPath() {
- return op_ != Token::DIV;
- }
-
- bool ShouldGenerateSmiCode() {
- return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-
- const char* GetName();
-
- virtual void FinishCode(Code* code) {
- code->set_binary_op_type(runtime_operands_type_);
- }
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
- }
-#endif
-};
-
-
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
@@ -235,7 +79,7 @@
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
- use_vfp3_ = Isolate::Current()->cpu_features()->IsSupported(VFP3);
+ use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -303,6 +147,7 @@
Label* not_numbers,
Label* gc_required);
void GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
@@ -313,6 +158,7 @@
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
@@ -413,102 +259,6 @@
};
-// This stub can do a fast mod operation without using fp.
-// It is tail called from the GenericBinaryOpStub and it always
-// returns an answer. It never causes GC so it doesn't need a real frame.
-//
-// The inputs are always positive Smis. This is never called
-// where the denominator is a power of 2. We handle that separately.
-//
-// If we consider the denominator as an odd number multiplied by a power of 2,
-// then:
-// * The exponent (power of 2) is in the shift_distance register.
-// * The odd number is in the odd_number register. It is always in the range
-// of 3 to 25.
-// * The bits from the numerator that are to be copied to the answer (there are
-// shift_distance of them) are in the mask_bits register.
-// * The other bits of the numerator have been shifted down and are in the lhs
-// register.
-class IntegerModStub : public CodeStub {
- public:
- IntegerModStub(Register result,
- Register shift_distance,
- Register odd_number,
- Register mask_bits,
- Register lhs,
- Register scratch)
- : result_(result),
- shift_distance_(shift_distance),
- odd_number_(odd_number),
- mask_bits_(mask_bits),
- lhs_(lhs),
- scratch_(scratch) {
- // We don't code these in the minor key, so they should always be the same.
- // We don't really want to fix that since this stub is rather large and we
- // don't want many copies of it.
- ASSERT(shift_distance_.is(r9));
- ASSERT(odd_number_.is(r4));
- ASSERT(mask_bits_.is(r3));
- ASSERT(scratch_.is(r5));
- }
-
- private:
- Register result_;
- Register shift_distance_;
- Register odd_number_;
- Register mask_bits_;
- Register lhs_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class ResultRegisterBits: public BitField<int, 0, 4> {};
- class LhsRegisterBits: public BitField<int, 4, 4> {};
-
- Major MajorKey() { return IntegerMod; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return ResultRegisterBits::encode(result_.code())
- | LhsRegisterBits::encode(lhs_.code());
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "IntegerModStub"; }
-
- // Utility functions.
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry);
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry);
- void ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs);
- void ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator);
- void ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits);
-
-
-#ifdef DEBUG
- void Print() { PrintF("IntegerModStub\n"); }
-#endif
-};
-
-
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
deleted file mode 100644
index 81ed2d0..0000000
--- a/src/arm/codegen-arm-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_ARM_CODEGEN_ARM_INL_H_
-#define V8_ARM_CODEGEN_ARM_INL_H_
-
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 91c4747..bf748a9 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,56 +29,14 @@
#if defined(V8_TARGET_ARCH_ARM)
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-arm-inl.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- // On ARM you either have a completely spilled frame or you
- // handle it yourself, but at the moment there's no automation
- // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
@@ -89,7348 +47,6 @@
}
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- previous_(owner->state()) {
- owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target)
- : CodeGenState(owner),
- true_target_(true_target),
- false_target_(false_target) {
- owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot,
- TypeInfo type_info)
- : CodeGenState(owner),
- slot_(slot) {
- owner->set_state(this);
- old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
- owner()->set_type_info(slot_, old_type_info_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- cc_reg_(al),
- state_(NULL),
- loop_nesting_(0),
- type_info_(NULL),
- function_return_(JumpTarget::BIDIRECTIONAL),
- function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// r1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
- Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
- // Initialize state.
- info_ = info;
-
- int slots = scope()->num_parameters() + scope()->num_stack_slots();
- ScopedVector<TypeInfo> type_info_array(slots);
- for (int i = 0; i < slots; i++) {
- type_info_array[i] = TypeInfo::Unknown();
- }
- type_info_ = &type_info_array;
-
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = ®ister_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- cc_reg_ = al;
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
- {
- CodeGenState state(this);
-
- // Entry:
- // Stack: receiver, arguments
- // lr: return address
- // fp: caller's frame pointer
- // sp: stack pointer
- // r1: called JS function
- // cp: callee's context
- allocator_->Initialize();
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ stop("stop-at");
- }
-#endif
-
- frame_->Enter();
- // tos: code slot
-
- // Allocate space for locals and initialize them. This also checks
- // for stack overflow.
- frame_->AllocateStackSlots();
-
- frame_->AssertIsSpilled();
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- // Allocate local context.
- // Get outer context and create a new context based on it.
- __ ldr(r0, frame_->Function());
- frame_->EmitPush(r0);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
-#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, cp);
- verified_true.Branch(eq);
- __ stop("NewContext: r0 is expected to be the same as cp");
- verified_true.Bind();
-#endif
- // Update context local.
- __ str(cp, frame_->Context());
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- frame_->AssertIsSpilled();
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- ASSERT(!scope()->is_global_scope()); // No params in global scope.
- __ ldr(r1, frame_->ParameterAt(i));
- // Loads r2 with context; used below in RecordWrite.
- __ str(r1, SlotOperand(slot, r2));
- // Load the offset into r3.
- int slot_offset =
- FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(r2, Operand(slot_offset), r3, r1);
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
- StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
- }
-
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.SetExpectedHeight();
- function_return_is_shadowed_ = false;
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- frame_->CallRuntime(Runtime::kTraceEnter, 0);
- // Ignore the return value.
- }
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- frame_->CallRuntime(Runtime::kDebugTrace, 0);
- // Ignore the return value.
- }
-#endif
- VisitStatements(info->function()->body());
- }
- }
-
- // Handle the return from the function.
- if (has_valid_frame()) {
- // If there is a valid frame, control flow can fall off the end of
- // the body. In that case there is an implicit return statement.
- ASSERT(!function_return_is_shadowed_);
- frame_->PrepareForReturn();
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- if (function_return_.is_bound()) {
- function_return_.Jump();
- } else {
- function_return_.Bind();
- GenerateReturnSequence();
- }
- } else if (function_return_.is_linked()) {
- // If the return target has dangling jumps to it, then we have not
- // yet generated the return sequence. This can happen when (a)
- // control does not flow off the end of the body so we did not
- // compile an artificial return statement just above, and (b) there
- // are return statements in the body but (c) they are all shadowed.
- function_return_.Bind();
- GenerateReturnSequence();
- }
-
- // Adjust for function-level loop nesting.
- ASSERT(loop_nesting_ == info->is_in_loop()? 1 : 0);
- loop_nesting_ = 0;
-
- // Code generation state must be reset.
- ASSERT(!has_cc());
- ASSERT(state_ == NULL);
- ASSERT(loop_nesting() == 0);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- ProcessDeferred();
- }
-
- allocator_ = NULL;
- type_info_ = NULL;
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
- if (slot == NULL) return kInvalidSlotNumber;
- switch (slot->type()) {
- case Slot::PARAMETER:
- return slot->index();
- case Slot::LOCAL:
- return slot->index() + scope()->num_parameters();
- default:
- break;
- }
- return kInvalidSlotNumber;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(cp)); // do not overwrite context register
- Register context = cp;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return MemOperand(r0, 0);
- }
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
- Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- Register context = cp;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- }
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- }
- // Check that last extension is NULL.
- __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, slot->index());
-}
-
-
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
-void CodeGenerator::LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc) {
- ASSERT(!has_cc());
- int original_height = frame_->height();
-
- { ConditionCodeGenState new_state(this, true_target, false_target);
- Visit(x);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- has_valid_frame() &&
- !has_cc() &&
- frame_->height() == original_height) {
- true_target->Jump();
- }
- }
- if (force_cc && frame_ != NULL && !has_cc()) {
- // Convert the TOS value to a boolean in the condition code register.
- ToBoolean(true_target, false_target);
- }
- ASSERT(!force_cc || !has_valid_frame() || has_cc());
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
- // We generally assume that we are not in a spilled scope for most
- // of the code generator. A failure to ensure this caused issue 815
- // and this assert is designed to catch similar issues.
- frame_->AssertIsNotSpilled();
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- JumpTarget true_target;
- JumpTarget false_target;
- LoadCondition(expr, &true_target, &false_target, false);
-
- if (has_cc()) {
- // Convert cc_reg_ into a boolean value.
- JumpTarget loaded;
- JumpTarget materialize_true;
- materialize_true.Branch(cc_reg_);
- frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
- loaded.Jump();
- materialize_true.Bind();
- frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
- loaded.Bind();
- cc_reg_ = al;
- }
-
- if (true_target.is_linked() || false_target.is_linked()) {
- // We have at least one condition value that has been "translated"
- // into a branch, thus it needs to be loaded explicitly.
- JumpTarget loaded;
- if (frame_ != NULL) {
- loaded.Jump(); // Don't lose the current TOS.
- }
- bool both = true_target.is_linked() && false_target.is_linked();
- // Load "true" if necessary.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
- }
- // If both "true" and "false" need to be loaded jump across the code for
- // "false".
- if (both) {
- loaded.Jump();
- }
- // Load "false" if necessary.
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
- }
- // A value is loaded on all paths reaching this point.
- loaded.Bind();
- }
- ASSERT(has_valid_frame());
- ASSERT(!has_cc());
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadGlobal() {
- Register reg = frame_->GetTOSRegister();
- __ ldr(reg, GlobalObjectOperand());
- frame_->EmitPush(reg);
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- Register reg = frame_->GetTOSRegister();
- __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(reg,
- FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->EmitPush(reg);
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
- // In strict mode there is no need for shadow arguments.
- ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the hole value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
- } else {
- frame_->SpillAll();
- ArgumentsAccessStub stub(is_strict_mode()
- ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
- __ ldr(r2, frame_->Function());
- // The receiver is below the arguments, the return address, and the
- // frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope()->num_parameters();
- __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
- frame_->Adjust(3);
- __ Push(r2, r1, r0);
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
- }
-
- Variable* arguments = scope()->arguments();
- Variable* shadow = scope()->arguments_shadow();
- ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
- scope()->is_strict_mode());
-
- JumpTarget done;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
- Register arguments = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
- __ cmp(arguments, ip);
- done.Branch(ne);
- }
- StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- if (shadow != NULL) {
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
- }
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->AsSlot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- // We generally assume that we are not in a spilled scope for most
- // of the code generator. A failure to ensure this caused issue 815
- // and this assert is designed to catch similar issues.
- cgen->frame()->AssertIsNotSpilled();
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->AsSlot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- int size = ref->size();
- ref->set_unloaded();
- if (size == 0) return;
-
- // Pop a reference from the stack while preserving TOS.
- VirtualFrame::RegisterAllocationScope scope(this);
- Comment cmnt(masm_, "[ UnloadReference");
- if (size > 0) {
- Register tos = frame_->PopToRegister();
- frame_->Drop(size);
- frame_->EmitPush(tos);
- }
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
- JumpTarget* false_target) {
- // Note: The generated code snippet does not change stack variables.
- // Only the condition code should be set.
- bool known_smi = frame_->KnownSmiAt(0);
- Register tos = frame_->PopToRegister();
-
- // Fast case checks
-
- // Check if the value is 'false'.
- if (!known_smi) {
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(tos, ip);
- false_target->Branch(eq);
-
- // Check if the value is 'true'.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(tos, ip);
- true_target->Branch(eq);
-
- // Check if the value is 'undefined'.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, ip);
- false_target->Branch(eq);
- }
-
- // Check if the value is a smi.
- __ cmp(tos, Operand(Smi::FromInt(0)));
-
- if (!known_smi) {
- false_target->Branch(eq);
- __ tst(tos, Operand(kSmiTagMask));
- true_target->Branch(eq);
-
- // Slow case.
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Implements the slow case by using ToBooleanStub.
- // The ToBooleanStub takes a single argument, and
- // returns a non-zero value for true, or zero for false.
- // Both the argument value and the return value use the
- // register assigned to tos_
- ToBooleanStub stub(tos);
- frame_->CallStub(&stub, 0);
- // Convert the result in "tos" to a condition code.
- __ cmp(tos, Operand(0, RelocInfo::NONE));
- } else {
- // Implements slow case by calling the runtime.
- frame_->EmitPush(tos);
- frame_->CallRuntime(Runtime::kToBool, 1);
- // Convert the result (r0) to a condition code.
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
- }
- }
-
- cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int constant_rhs) {
- // top of virtual frame: y
- // 2nd elt. on virtual frame : x
- // result : top of virtual frame
-
- // Stub is entered with a call: 'return address' is in lr.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- if (inline_smi) {
- JumpTarget done;
- Register rhs = frame_->PopToRegister();
- Register lhs = frame_->PopToRegister(rhs);
- Register scratch = VirtualFrame::scratch0();
- __ orr(scratch, rhs, Operand(lhs));
- // Check they are both small and positive.
- __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
- ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
- STATIC_ASSERT(kSmiTag == 0);
- if (op == Token::ADD) {
- __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
- } else {
- __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
- }
- done.Branch(eq);
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->SpillAll();
- frame_->CallStub(&stub, 0);
- done.Bind();
- frame_->EmitPush(r0);
- break;
- } else {
- // Fall through!
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- if (inline_smi) {
- bool rhs_is_smi = frame_->KnownSmiAt(0);
- bool lhs_is_smi = frame_->KnownSmiAt(1);
- Register rhs = frame_->PopToRegister();
- Register lhs = frame_->PopToRegister(rhs);
- Register smi_test_reg;
- Condition cond;
- if (!rhs_is_smi || !lhs_is_smi) {
- if (rhs_is_smi) {
- smi_test_reg = lhs;
- } else if (lhs_is_smi) {
- smi_test_reg = rhs;
- } else {
- smi_test_reg = VirtualFrame::scratch0();
- __ orr(smi_test_reg, rhs, Operand(lhs));
- }
- // Check they are both Smis.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- cond = eq;
- } else {
- cond = al;
- }
- ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
- if (op == Token::BIT_OR) {
- __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
- } else if (op == Token::BIT_AND) {
- __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
- } else {
- ASSERT(op == Token::BIT_XOR);
- STATIC_ASSERT(kSmiTag == 0);
- __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
- }
- if (cond != al) {
- JumpTarget done;
- done.Branch(cond);
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->SpillAll();
- frame_->CallStub(&stub, 0);
- done.Bind();
- }
- frame_->EmitPush(r0);
- break;
- } else {
- // Fall through!
- }
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- Register rhs = frame_->PopToRegister();
- Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register.
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->SpillAll();
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0);
- break;
- }
-
- case Token::COMMA: {
- Register scratch = frame_->PopToRegister();
- // Simply discard left value.
- frame_->Drop();
- frame_->EmitPush(scratch);
- break;
- }
-
- default:
- // Other cases should have been handled before this point.
- UNREACHABLE();
- break;
- }
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- int value,
- bool reversed,
- OverwriteMode overwrite_mode,
- Register tos)
- : op_(op),
- value_(value),
- reversed_(reversed),
- overwrite_mode_(overwrite_mode),
- tos_register_(tos) {
- set_comment("[ DeferredInlinedSmiOperation");
- }
-
- virtual void Generate();
- // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
- // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
- // methods, it is the responsibility of the deferred code to save and restore
- // registers.
- virtual bool AutoSaveAndRestore() { return false; }
-
- void JumpToNonSmiInput(Condition cond);
- void JumpToAnswerOutOfRange(Condition cond);
-
- private:
- void GenerateNonSmiInput();
- void GenerateAnswerOutOfRange();
- void WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch);
-
- Token::Value op_;
- int value_;
- bool reversed_;
- OverwriteMode overwrite_mode_;
- Register tos_register_;
- Label non_smi_input_;
- Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
- ASSERT(Token::IsBitOp(op_));
-
- __ b(cond, &non_smi_input_);
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
- ASSERT(Token::IsBitOp(op_));
-
- if ((op_ == Token::SHR) &&
- !Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- // >>> requires an unsigned to double conversion and the non VFP code
- // does not support this conversion.
- __ b(cond, entry_label());
- } else {
- __ b(cond, &answer_out_of_range_);
- }
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere. The tos_register_ is not used by the
-// virtual frame. On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- Register lhs = r1;
- Register rhs = r0;
- switch (op_) {
- case Token::ADD: {
- // Revert optimistic add.
- if (reversed_) {
- __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- }
- break;
- }
-
- case Token::SUB: {
- // Revert optimistic sub.
- if (reversed_) {
- __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- }
- break;
- }
-
- // For these operations there is no optimistic operation that needs to be
- // reverted.
- case Token::MUL:
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- if (tos_register_.is(r1)) {
- __ mov(r0, Operand(Smi::FromInt(value_)));
- } else {
- ASSERT(tos_register_.is(r0));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- }
- if (reversed_ == tos_register_.is(r1)) {
- lhs = r0;
- rhs = r1;
- }
- break;
- }
-
- default:
- // Other cases should have been handled before this point.
- UNREACHABLE();
- break;
- }
-
- GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
- __ CallStub(&stub);
-
- // The generic stub returns its value in r0, but that's not
- // necessarily what we want. We want whatever the inlined code
- // expected, which is that the answer is in the same register as
- // the operand was.
- __ Move(tos_register_, r0);
-
- // The tos register was not in use for the virtual frame that we
- // came into this function with, so we can merge back to that frame
- // without trashing it.
- copied_frame.MergeTo(frame_state()->frame());
-
- Exit();
-
- if (non_smi_input_.is_linked()) {
- GenerateNonSmiInput();
- }
-
- if (answer_out_of_range_.is_linked()) {
- GenerateAnswerOutOfRange();
- }
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch) {
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, answer);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(scratch, heap_number, Operand(kHeapObjectTag));
- __ vstr(d0, scratch, HeapNumber::kValueOffset);
- } else {
- WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
- __ CallStub(&stub);
- }
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
- // We know the left hand side is not a Smi and the right hand side is an
- // immediate value (value_) which can be represented as a Smi. We only
- // handle bit operations.
- ASSERT(Token::IsBitOp(op_));
-
- if (FLAG_debug_code) {
- __ Abort("Should not fall through!");
- }
-
- __ bind(&non_smi_input_);
- if (FLAG_debug_code) {
- __ AbortIfSmi(tos_register_);
- }
-
- // This routine uses the registers from r2 to r6. At the moment they are
- // not used by the register allocator, but when they are it should use
- // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
- Register heap_number_map = r7;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
- __ cmp(r3, heap_number_map);
- // Not a number, fall back to the GenericBinaryOpStub.
- __ b(ne, entry_label());
-
- Register int32 = r2;
- // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
- __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
-
- // tos_register_ (r0 or r1): Original heap number.
- // int32: signed 32bits int.
-
- Label result_not_a_smi;
- int shift_value = value_ & 0x1f;
- switch (op_) {
- case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break;
- case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
- case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
- case Token::SAR:
- ASSERT(!reversed_);
- if (shift_value != 0) {
- __ mov(int32, Operand(int32, ASR, shift_value));
- }
- break;
- case Token::SHR:
- ASSERT(!reversed_);
- if (shift_value != 0) {
- __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
- } else {
- // SHR is special because it is required to produce a positive answer.
- __ cmp(int32, Operand(0, RelocInfo::NONE));
- }
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- // Non VFP code cannot convert from unsigned to double, so fall back
- // to GenericBinaryOpStub.
- __ b(mi, entry_label());
- }
- break;
- case Token::SHL:
- ASSERT(!reversed_);
- if (shift_value != 0) {
- __ mov(int32, Operand(int32, LSL, shift_value));
- }
- break;
- default: UNREACHABLE();
- }
- // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
- // if the shift if more than 0 or SHR if the shit is more than 1.
- if (!( (op_ == Token::AND && value_ >= 0) ||
- ((op_ == Token::SAR) && (shift_value > 0)) ||
- ((op_ == Token::SHR) && (shift_value > 1)))) {
- __ add(r3, int32, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- }
- __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
- Exit();
-
- if (result_not_a_smi.is_linked()) {
- __ bind(&result_not_a_smi);
- if (overwrite_mode_ != OVERWRITE_LEFT) {
- ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
- (overwrite_mode_ == OVERWRITE_RIGHT));
- // If the allocation fails, fall back to the GenericBinaryOpStub.
- __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
- // Nothing can go wrong now, so overwrite tos.
- __ mov(tos_register_, Operand(r4));
- }
-
- // int32: answer as signed 32bits integer.
- // tos_register_: Heap number to write the answer into.
- WriteNonSmiAnswer(int32, tos_register_, r3);
-
- Exit();
- }
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
- // The input from a bitwise operation were Smis but the result cannot fit
- // into a Smi, so we store it into a heap number. VirtualFrame::scratch0()
- // holds the untagged result to be converted. tos_register_ contains the
- // input. See the calls to JumpToAnswerOutOfRange to see how we got here.
- ASSERT(Token::IsBitOp(op_));
- ASSERT(!reversed_);
-
- Register untagged_result = VirtualFrame::scratch0();
-
- if (FLAG_debug_code) {
- __ Abort("Should not fall through!");
- }
-
- __ bind(&answer_out_of_range_);
- if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
- // >>> 0 is a special case where the untagged_result register is not set up
- // yet. We untag the input to get it.
- __ mov(untagged_result, Operand(tos_register_, ASR, kSmiTagSize));
- }
-
- // This routine uses the registers from r2 to r6. At the moment they are
- // not used by the register allocator, but when they are it should use
- // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
- // Allocate the result heap number.
- Register heap_number_map = VirtualFrame::scratch1();
- Register heap_number = r4;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // If the allocation fails, fall back to the GenericBinaryOpStub.
- __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
- WriteNonSmiAnswer(untagged_result, heap_number, r3);
- __ mov(tos_register_, Operand(heap_number));
-
- Exit();
-}
-
-
-static bool PopCountLessThanEqual2(unsigned int x) {
- x &= x - 1;
- return (x & (x - 1)) == 0;
-}
-
-
-// Returns the index of the lowest bit set.
-static int BitPosition(unsigned x) {
- int bit_posn = 0;
- while ((x & 0xf) == 0) {
- bit_posn += 4;
- x >>= 4;
- }
- while ((x & 1) == 0) {
- bit_posn++;
- x >>= 1;
- }
- return bit_posn;
-}
-
-
-// Can we multiply by x with max two shifts and an add.
-// This answers yes to all integers from 2 to 10.
-static bool IsEasyToMultiplyBy(int x) {
- if (x < 2) return false; // Avoid special cases.
- if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
- if (IsPowerOf2(x)) return true; // Simple shift.
- if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
- if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
- return false;
-}
-
-
-// Can multiply by anything that IsEasyToMultiplyBy returns true for.
-// Source and destination may be the same register. This routine does
-// not set carry and overflow the way a mul instruction would.
-static void InlineMultiplyByKnownInt(MacroAssembler* masm,
- Register source,
- Register destination,
- int known_int) {
- if (IsPowerOf2(known_int)) {
- masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
- } else if (PopCountLessThanEqual2(known_int)) {
- int first_bit = BitPosition(known_int);
- int second_bit = BitPosition(known_int ^ (1 << first_bit));
- masm->add(destination, source,
- Operand(source, LSL, second_bit - first_bit));
- if (first_bit != 0) {
- masm->mov(destination, Operand(destination, LSL, first_bit));
- }
- } else {
- ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
- int the_bit = BitPosition(known_int + 1);
- masm->rsb(destination, source, Operand(source, LSL, the_bit));
- }
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode) {
- int int_value = Smi::cast(*value)->value();
-
- bool both_sides_are_smi = frame_->KnownSmiAt(0);
-
- bool something_to_inline;
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::BIT_AND:
- case Token::BIT_OR:
- case Token::BIT_XOR: {
- something_to_inline = true;
- break;
- }
- case Token::SHL: {
- something_to_inline = (both_sides_are_smi || !reversed);
- break;
- }
- case Token::SHR:
- case Token::SAR: {
- if (reversed) {
- something_to_inline = false;
- } else {
- something_to_inline = true;
- }
- break;
- }
- case Token::MOD: {
- if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
- something_to_inline = false;
- } else {
- something_to_inline = true;
- }
- break;
- }
- case Token::MUL: {
- if (!IsEasyToMultiplyBy(int_value)) {
- something_to_inline = false;
- } else {
- something_to_inline = true;
- }
- break;
- }
- default: {
- something_to_inline = false;
- break;
- }
- }
-
- if (!something_to_inline) {
- if (!reversed) {
- // Push the rhs onto the virtual frame by putting it in a TOS register.
- Register rhs = frame_->GetTOSRegister();
- __ mov(rhs, Operand(value));
- frame_->EmitPush(rhs, TypeInfo::Smi());
- GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
- } else {
- // Pop the rhs, then push lhs and rhs in the right order. Only performs
- // at most one pop, the rest takes place in TOS registers.
- Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
- Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this.
- __ mov(lhs, Operand(value));
- frame_->EmitPush(lhs, TypeInfo::Smi());
- TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
- frame_->EmitPush(rhs, t);
- GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
- GenericBinaryOpStub::kUnknownIntValue);
- }
- return;
- }
-
- // We move the top of stack to a register (normally no move is invoved).
- Register tos = frame_->PopToRegister();
- switch (op) {
- case Token::ADD: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
- __ add(tos, tos, Operand(value), SetCC);
- deferred->Branch(vs);
- if (!both_sides_are_smi) {
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
- deferred->BindExit();
- frame_->EmitPush(tos);
- break;
- }
-
- case Token::SUB: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
- if (reversed) {
- __ rsb(tos, tos, Operand(value), SetCC);
- } else {
- __ sub(tos, tos, Operand(value), SetCC);
- }
- deferred->Branch(vs);
- if (!both_sides_are_smi) {
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
- deferred->BindExit();
- frame_->EmitPush(tos);
- break;
- }
-
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- if (both_sides_are_smi) {
- switch (op) {
- case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
- case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
- default: UNREACHABLE();
- }
- frame_->EmitPush(tos, TypeInfo::Smi());
- } else {
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->JumpToNonSmiInput(ne);
- switch (op) {
- case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
- case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
- default: UNREACHABLE();
- }
- deferred->BindExit();
- TypeInfo result_type = TypeInfo::Integer32();
- if (op == Token::BIT_AND && int_value >= 0) {
- result_type = TypeInfo::Smi();
- }
- frame_->EmitPush(tos, result_type);
- }
- break;
- }
-
- case Token::SHL:
- if (reversed) {
- ASSERT(both_sides_are_smi);
- int max_shift = 0;
- int max_result = int_value == 0 ? 1 : int_value;
- while (Smi::IsValid(max_result << 1)) {
- max_shift++;
- max_result <<= 1;
- }
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
- // Mask off the last 5 bits of the shift operand (rhs). This is part
- // of the definition of shift in JS and we know we have a Smi so we
- // can safely do this. The masked version gets passed to the
- // deferred code, but that makes no difference.
- __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
- __ cmp(tos, Operand(Smi::FromInt(max_shift)));
- deferred->Branch(ge);
- Register scratch = VirtualFrame::scratch0();
- __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag.
- __ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant.
- __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant.
- deferred->BindExit();
- TypeInfo result = TypeInfo::Integer32();
- frame_->EmitPush(tos, result);
- break;
- }
- // Fall through!
- case Token::SHR:
- case Token::SAR: {
- ASSERT(!reversed);
- int shift_value = int_value & 0x1f;
- TypeInfo result = TypeInfo::Number();
-
- if (op == Token::SHR) {
- if (shift_value > 1) {
- result = TypeInfo::Smi();
- } else if (shift_value > 0) {
- result = TypeInfo::Integer32();
- }
- } else if (op == Token::SAR) {
- if (shift_value > 0) {
- result = TypeInfo::Smi();
- } else {
- result = TypeInfo::Integer32();
- }
- } else {
- ASSERT(op == Token::SHL);
- result = TypeInfo::Integer32();
- }
-
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
- if (!both_sides_are_smi) {
- __ tst(tos, Operand(kSmiTagMask));
- deferred->JumpToNonSmiInput(ne);
- }
- switch (op) {
- case Token::SHL: {
- if (shift_value != 0) {
- Register untagged_result = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
- int adjusted_shift = shift_value - kSmiTagSize;
- ASSERT(adjusted_shift >= 0);
-
- if (adjusted_shift != 0) {
- __ mov(untagged_result, Operand(tos, LSL, adjusted_shift));
- } else {
- __ mov(untagged_result, Operand(tos));
- }
- // Check that the *signed* result fits in a smi.
- __ add(scratch, untagged_result, Operand(0x40000000), SetCC);
- deferred->JumpToAnswerOutOfRange(mi);
- __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
- }
- break;
- }
- case Token::SHR: {
- if (shift_value != 0) {
- Register untagged_result = VirtualFrame::scratch0();
- // Remove tag.
- __ mov(untagged_result, Operand(tos, ASR, kSmiTagSize));
- __ mov(untagged_result, Operand(untagged_result, LSR, shift_value));
- if (shift_value == 1) {
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging
- // - 0x40000000: this number would convert to negative when Smi
- // tagging.
- // These two cases can only happen with shifts by 0 or 1 when
- // handed a valid smi.
- __ tst(untagged_result, Operand(0xc0000000));
- deferred->JumpToAnswerOutOfRange(ne);
- }
- __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
- } else {
- __ cmp(tos, Operand(0, RelocInfo::NONE));
- deferred->JumpToAnswerOutOfRange(mi);
- }
- break;
- }
- case Token::SAR: {
- if (shift_value != 0) {
- // Do the shift and the tag removal in one operation. If the shift
- // is 31 bits (the highest possible value) then we emit the
- // instruction as a shift by 0 which in the ARM ISA means shift
- // arithmetically by 32.
- __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
- __ mov(tos, Operand(tos, LSL, kSmiTagSize));
- }
- break;
- }
- default: UNREACHABLE();
- }
- deferred->BindExit();
- frame_->EmitPush(tos, result);
- break;
- }
-
- case Token::MOD: {
- ASSERT(!reversed);
- ASSERT(int_value >= 2);
- ASSERT(IsPowerOf2(int_value));
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- unsigned mask = (0x80000000u | kSmiTagMask);
- __ tst(tos, Operand(mask));
- deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
- mask = (int_value << kSmiTagSize) - 1;
- __ and_(tos, tos, Operand(mask));
- deferred->BindExit();
- // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
- frame_->EmitPush(
- tos,
- both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
- break;
- }
-
- case Token::MUL: {
- ASSERT(IsEasyToMultiplyBy(int_value));
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
- max_smi_that_wont_overflow <<= kSmiTagSize;
- unsigned mask = 0x80000000u;
- while ((mask & max_smi_that_wont_overflow) == 0) {
- mask |= mask >> 1;
- }
- mask |= kSmiTagMask;
- // This does a single mask that checks for a too high value in a
- // conservative way and for a non-Smi. It also filters out negative
- // numbers, unfortunately, but since this code is inline we prefer
- // brevity to comprehensiveness.
- __ tst(tos, Operand(mask));
- deferred->Branch(ne);
- InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
- deferred->BindExit();
- frame_->EmitPush(tos);
- break;
- }
-
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void CodeGenerator::Comparison(Condition cond,
- Expression* left,
- Expression* right,
- bool strict) {
- VirtualFrame::RegisterAllocationScope scope(this);
-
- if (left != NULL) Load(left);
- if (right != NULL) Load(right);
-
- // sp[0] : y
- // sp[1] : x
- // result : cc register
-
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cond == eq);
-
- Register lhs;
- Register rhs;
-
- bool lhs_is_smi;
- bool rhs_is_smi;
-
- // We load the top two stack positions into registers chosen by the virtual
- // frame. This should keep the register shuffling to a minimum.
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cond == gt || cond == le) {
- cond = ReverseCondition(cond);
- lhs_is_smi = frame_->KnownSmiAt(0);
- rhs_is_smi = frame_->KnownSmiAt(1);
- lhs = frame_->PopToRegister();
- rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
- } else {
- rhs_is_smi = frame_->KnownSmiAt(0);
- lhs_is_smi = frame_->KnownSmiAt(1);
- rhs = frame_->PopToRegister();
- lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
- }
-
- bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
-
- ASSERT(rhs.is(r0) || rhs.is(r1));
- ASSERT(lhs.is(r0) || lhs.is(r1));
-
- JumpTarget exit;
-
- if (!both_sides_are_smi) {
- // Now we have the two sides in r0 and r1. We flush any other registers
- // because the stub doesn't know about register allocation.
- frame_->SpillAll();
- Register scratch = VirtualFrame::scratch0();
- Register smi_test_reg;
- if (lhs_is_smi) {
- smi_test_reg = rhs;
- } else if (rhs_is_smi) {
- smi_test_reg = lhs;
- } else {
- __ orr(scratch, lhs, Operand(rhs));
- smi_test_reg = scratch;
- }
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- JumpTarget smi;
- smi.Branch(eq);
-
- // Perform non-smi comparison by stub.
- // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
- // We call with 0 args because there are 0 on the stack.
- CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
- frame_->CallStub(&stub, 0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- exit.Jump();
-
- smi.Bind();
- }
-
- // Do smi comparisons by pointer comparison.
- __ cmp(lhs, Operand(rhs));
-
- exit.Bind();
- cc_reg_ = cond;
-}
-
-
-// Call the function on the stack with the given arguments.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore context and pop function from the stack.
- __ ldr(cp, frame_->Context());
- frame_->Drop(); // discard the TOS
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
-
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
-
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
- frame_->Dup();
- frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
- frame_->EmitPush(r0);
-
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
- // At this point the top two stack elements are probably in registers
- // since they were just loaded. Ensure they are in regs and get the
- // regs.
- Register receiver_reg = frame_->Peek2();
- Register arguments_reg = frame_->Peek();
-
- // From now on the frame is spilled.
- frame_->SpillAll();
-
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of the stack at this point:
- // sp[0]: arguments object of the current function or the hole.
- // sp[1]: receiver
- // sp[2]: applicand.apply
- // sp[3]: applicand.
-
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- JumpTarget slow;
- Label done;
- __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
- __ cmp(ip, arguments_reg);
- slow.Branch(ne);
-
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop();
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // sp[0]: receiver - in the receiver_reg register.
- // sp[1]: applicand.apply
- // sp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ JumpIfSmi(receiver_reg, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ ldr(r0, MemOperand(sp, kPointerSize));
- __ JumpIfSmi(r0, &build_args);
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
- __ b(ne, &build_args);
- Handle<Code> apply_code(
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply));
- __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
- __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ cmp(r1, Operand(apply_code));
- __ b(ne, &build_args);
-
- // Check that applicand is a function.
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ JumpIfSmi(r1, &build_args);
- __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
- __ b(ne, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(r0, Operand(scope()->num_parameters()));
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ ldr(r2, frame_->ParameterAt(i));
- __ push(r2);
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(r0, Operand(r0, LSR, kSmiTagSize));
- __ mov(r3, r0);
- __ cmp(r0, Operand(kArgumentsLimit));
- __ b(gt, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // r3 is a small non-negative integer, due to the test above.
- __ cmp(r3, Operand(0, RelocInfo::NONE));
- __ b(eq, &invoke);
- // Compute the address of the first argument.
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
- __ add(r2, r2, Operand(kPointerSize));
- __ bind(&loop);
- // Post-decrement argument address by kPointerSize on each iteration.
- __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
- __ push(r4);
- __ sub(r3, r3, Operand(1), SetCC);
- __ b(gt, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ push(r0);
- // Stack now has 1 element:
- // sp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // sp[0]: receiver
- // sp[1]: applicand.apply
- // sp[2]: applicand.
- StoreArgumentsObject(false);
-
- // Stack and frame now have 4 elements.
- slow.Bind();
-
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(); // Drop the receiver as well.
- frame_->EmitPush(r0);
- frame_->SpillAll(); // A spilled frame is also jumping to label done.
- // Stack now has 1 element:
- // sp[0]: result
- __ bind(&done);
-
- // Restore the context register after a call.
- __ ldr(cp, frame_->Context());
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- ASSERT(has_cc());
- Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
- target->Branch(cond);
- cc_reg_ = al;
-}
-
-
-void CodeGenerator::CheckStack() {
- frame_->SpillAll();
- Comment cmnt(masm_, "[ check stack");
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- masm_->cmp(sp, Operand(ip));
- StackCheckStub stub;
- // Call the stub if lower.
- masm_->mov(ip,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- masm_->Call(ip, lo);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->SetExpectedHeight();
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(pairs));
- frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
- frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
- // The result is discarded.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(var->name()));
- // Declaration nodes are always declared in only two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Operand(0, RelocInfo::NONE));
- }
-
- frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
-
- ASSERT(frame_->height() == original_height);
- return;
- }
-
- ASSERT(!var->is_global());
-
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(FACTORY->the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
- }
-
-
- if (val != NULL) {
- WriteBarrierCharacter wb_info =
- val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
- if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
- // Set initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT, wb_info);
-
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- frame_->Drop();
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
-
- CodeForStatementPosition(node);
-
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- Comment cmnt(masm_, "[ IfThenElse");
- JumpTarget then;
- JumpTarget else_;
- // if (cond)
- LoadCondition(node->condition(), &then, &else_, true);
- if (frame_ != NULL) {
- Branch(false, &else_);
- }
- // then
- if (frame_ != NULL || then.is_linked()) {
- then.Bind();
- Visit(node->then_statement());
- }
- if (frame_ != NULL) {
- exit.Jump();
- }
- // else
- if (else_.is_linked()) {
- else_.Bind();
- Visit(node->else_statement());
- }
-
- } else if (has_then_stm) {
- Comment cmnt(masm_, "[ IfThen");
- ASSERT(!has_else_stm);
- JumpTarget then;
- // if (cond)
- LoadCondition(node->condition(), &then, &exit, true);
- if (frame_ != NULL) {
- Branch(false, &exit);
- }
- // then
- if (frame_ != NULL || then.is_linked()) {
- then.Bind();
- Visit(node->then_statement());
- }
-
- } else if (has_else_stm) {
- Comment cmnt(masm_, "[ IfElse");
- ASSERT(!has_then_stm);
- JumpTarget else_;
- // if (!cond)
- LoadCondition(node->condition(), &exit, &else_, true);
- if (frame_ != NULL) {
- Branch(true, &exit);
- }
- // else
- if (frame_ != NULL || else_.is_linked()) {
- else_.Bind();
- Visit(node->else_statement());
- }
-
- } else {
- Comment cmnt(masm_, "[ If");
- ASSERT(!has_then_stm && !has_else_stm);
- // if (cond)
- LoadCondition(node->condition(), &exit, &exit, false);
- if (frame_ != NULL) {
- if (has_cc()) {
- cc_reg_ = al;
- } else {
- frame_->Drop();
- }
- }
- }
-
- // end
- if (exit.is_linked()) {
- exit.Bind();
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- Load(node->expression());
- frame_->PopToR0();
- frame_->PrepareForReturn();
- if (function_return_is_shadowed_) {
- function_return_.Jump();
- } else {
- // Pop the result from the frame and prepare the frame for
- // returning thus making it easier to merge.
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump();
- } else {
- function_return_.Bind();
- GenerateReturnSequence();
- }
- }
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns the parameter as it is.
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Tear down the frame which will restore the caller's frame pointer and
- // the link register.
- frame_->Exit();
-
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
- DeleteFrame();
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- if (node->is_catch_block()) {
- frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- frame_->CallRuntime(Runtime::kPushContext, 1);
- }
-#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, cp);
- verified_true.Branch(eq);
- __ stop("PushContext: r0 is expected to be the same as cp");
- verified_true.Bind();
-#endif
- // Update context local.
- __ str(cp, frame_->Context());
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
- // Update context local.
- __ str(cp, frame_->Context());
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->SetExpectedHeight();
-
- Load(node->tag());
-
- JumpTarget next_test;
- JumpTarget fall_through;
- JumpTarget default_entry;
- JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
-
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
- if (clause->is_default()) {
- // Remember the default clause and compile it at the end.
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case clause");
- // Compile the test.
- next_test.Bind();
- next_test.Unuse();
- // Duplicate TOS.
- frame_->Dup();
- Comparison(eq, NULL, clause->label(), true);
- Branch(false, &next_test);
-
- // Before entering the body from the test, remove the switch value from
- // the stack.
- frame_->Drop();
-
- // Label the body so that fall through is enabled.
- if (i > 0 && cases->at(i - 1)->is_default()) {
- default_exit.Bind();
- } else {
- fall_through.Bind();
- fall_through.Unuse();
- }
- VisitStatements(clause->statements());
-
- // If control flow can fall through from the body, jump to the next body
- // or the end of the statement.
- if (frame_ != NULL) {
- if (i < length - 1 && cases->at(i + 1)->is_default()) {
- default_entry.Jump();
- } else {
- fall_through.Jump();
- }
- }
- }
-
- // The final "test" removes the switch value.
- next_test.Bind();
- frame_->Drop();
-
- // If there is a default clause, compile it.
- if (default_clause != NULL) {
- Comment cmnt(masm_, "[ Default clause");
- default_entry.Bind();
- VisitStatements(default_clause->statements());
- // If control flow can fall out of the default and there is a case after
- // it, jump to that case's body.
- if (frame_ != NULL && default_exit.is_bound()) {
- default_exit.Jump();
- }
- }
-
- if (fall_through.is_linked()) {
- fall_through.Bind();
- }
-
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->SetExpectedHeight();
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
-
- // Label the top of the loop for the backward CFG edge. If the test
- // is always true we can use the continue target, and if the test is
- // always false there is no need.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- switch (info) {
- case ALWAYS_TRUE:
- node->continue_target()->SetExpectedHeight();
- node->continue_target()->Bind();
- break;
- case ALWAYS_FALSE:
- node->continue_target()->SetExpectedHeight();
- break;
- case DONT_KNOW:
- node->continue_target()->SetExpectedHeight();
- body.Bind();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control can fall off the end of the body, jump back to the
- // top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case ALWAYS_FALSE:
- // If we have a continue in the body, we only have to bind its
- // jump target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- LoadCondition(node->cond(), &body, node->break_target(), true);
- if (has_valid_frame()) {
- // A invalid frame here indicates that control did not
- // fall out of the test expression.
- Branch(true, &body);
- }
- }
- break;
- }
-
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the test is never true and has no side effects there is no need
- // to compile the test or body.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- node->break_target()->SetExpectedHeight();
- IncrementLoopNesting();
-
- // Label the top of the loop with the continue target for the backward
- // CFG edge.
- node->continue_target()->SetExpectedHeight();
- node->continue_target()->Bind();
-
- if (info == DONT_KNOW) {
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- LoadCondition(node->cond(), &body, node->break_target(), true);
- if (has_valid_frame()) {
- // A NULL frame indicates that control did not fall out of the
- // test expression.
- Branch(false, node->break_target());
- }
- if (has_valid_frame() || body.is_linked()) {
- body.Bind();
- }
- }
-
- if (has_valid_frame()) {
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // If control flow can fall out of the body, jump back to the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
- if (node->init() != NULL) {
- Visit(node->init());
- }
-
- // If the test is never true there is no need to compile the test or
- // body.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- node->break_target()->SetExpectedHeight();
- IncrementLoopNesting();
-
- // We know that the loop index is a smi if it is not modified in the
- // loop body and it is checked against a constant limit in the loop
- // condition. In this case, we reset the static type information of the
- // loop index to smi before compiling the body, the update expression, and
- // the bottom check of the loop condition.
- TypeInfoCodeGenState type_info_scope(this,
- node->is_fast_smi_loop() ?
- node->loop_variable()->AsSlot() :
- NULL,
- TypeInfo::Smi());
-
- // If there is no update statement, label the top of the loop with the
- // continue target, otherwise with the loop target.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- if (node->next() == NULL) {
- node->continue_target()->SetExpectedHeight();
- node->continue_target()->Bind();
- } else {
- node->continue_target()->SetExpectedHeight();
- loop.Bind();
- }
-
- // If the test is always true, there is no need to compile it.
- if (info == DONT_KNOW) {
- JumpTarget body;
- LoadCondition(node->cond(), &body, node->break_target(), true);
- if (has_valid_frame()) {
- Branch(false, node->break_target());
- }
- if (has_valid_frame() || body.is_linked()) {
- body.Bind();
- }
- }
-
- if (has_valid_frame()) {
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- if (node->next() == NULL) {
- // If there is no update statement and control flow can fall out
- // of the loop, jump directly to the continue label.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- } else {
- // If there is an update statement and control flow can reach it
- // via falling out of the body of the loop or continuing, we
- // compile the update statement.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // Record source position of the statement as this code which is
- // after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
- loop.Jump();
- }
- }
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- Load(node->enumerable());
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(r0);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- exit.Branch(eq);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
- exit.Branch(eq);
-
- // Stack layout in body:
- // [iteration counter (Smi)]
- // [length of array]
- // [FixedArray]
- // [Map or 0]
- // [Object]
-
- // Check if enumerable is already a JSObject
- __ tst(r0, Operand(kSmiTagMask));
- primitive.Branch(eq);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- jsobject.Branch(hs);
-
- primitive.Bind();
- frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // r0: value to be iterated over
- frame_->EmitPush(r0); // Push the object being iterated over.
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ mov(r1, Operand(r0));
- loop.Bind();
- // Check that there are no elements.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ cmp(r2, r4);
- call_runtime.Branch(ne);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in r3 for the subsequent
- // prototype load.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
- __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
- __ cmp(r2, ip);
- call_runtime.Branch(eq);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
- __ tst(r2, Operand(kSmiTagMask));
- call_runtime.Branch(eq);
- // For all objects but the receiver, check that the cache is empty.
- // r4: empty fixed array root.
- __ cmp(r1, r0);
- check_prototype.Branch(eq);
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(r2, r4);
- call_runtime.Branch(ne);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r1, ip);
- loop.Branch(ne);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- use_cache.Jump();
-
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- // r0: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ mov(r2, Operand(r0));
- __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
- fixed_array.Branch(ne);
-
- use_cache.Bind();
- // Get enum cache
- // r0: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ mov(r1, Operand(r0));
- __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
- __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
- __ ldr(r2,
- FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- frame_->EmitPush(r0); // map
- frame_->EmitPush(r2); // enum cache bridge cache
- __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
- frame_->EmitPush(r0);
- __ mov(r0, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r0);
- entry.Jump();
-
- fixed_array.Bind();
- __ mov(r1, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r1); // insert 0 in place of Map
- frame_->EmitPush(r0);
-
- // Push the length of the array and the initial index onto the stack.
- __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
- frame_->EmitPush(r0);
- __ mov(r0, Operand(Smi::FromInt(0))); // init index
- frame_->EmitPush(r0);
-
- // Condition.
- entry.Bind();
- // sp[0] : index
- // sp[1] : array/enum cache length
- // sp[2] : array or enum cache
- // sp[3] : 0 or map
- // sp[4] : enumerable
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->SetExpectedHeight();
- node->continue_target()->SetExpectedHeight();
-
- // Load the current count to r0, load the length to r1.
- __ Ldrd(r0, r1, frame_->ElementAt(0));
- __ cmp(r0, r1); // compare to the array length
- node->break_target()->Branch(hs);
-
- // Get the i'th entry of the array.
- __ ldr(r2, frame_->ElementAt(2));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Get Map or 0.
- __ ldr(r2, frame_->ElementAt(3));
- // Check if this (still) matches the map of the enumerable.
- // If not, we have to filter the key.
- __ ldr(r1, frame_->ElementAt(4));
- __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r1, Operand(r2));
- end_del_check.Branch(eq);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- __ ldr(r0, frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(r0);
- frame_->EmitPush(r3); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
- __ mov(r3, Operand(r0), SetCC);
- // If the property has been removed while iterating, we just skip it.
- node->continue_target()->Branch(eq);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. r3: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(r3); // push entry
- { VirtualFrame::RegisterAllocationScope scope(this);
- Reference each(this, node->each());
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll(); // Sync stack to memory.
- // Get the value (under the reference on the stack) from memory.
- __ ldr(r0, frame_->ElementAt(each.size()));
- frame_->EmitPush(r0);
- each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop(2); // The result of the set and the extra pushed value.
- } else {
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop(1); // Drop the result of the set operation.
- }
- }
- }
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- { VirtualFrame::RegisterAllocationScope scope(this);
- Visit(node->body());
- }
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(r0);
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- frame_->EmitPush(r0);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(r0);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
- StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->catch_block()->statements());
- }
- if (frame_ != NULL) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the labels for all escapes from the try block, including
- // returns. During shadowing, the original label is hidden as the
- // LabelShadow and operations on the original actually affect the
- // shadowing label.
- //
- // We should probably try to unify the escaping labels and the return
- // label.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->try_block()->statements());
- }
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original labels are unshadowed and the
- // LabelShadows represent the formerly shadowing labels.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1); // r0 can contain the return value.
- __ mov(r3, Operand(handler_address));
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing labels that have been
- // jumped to. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain;
- shadows[i]->Bind();
- // Because we can be jumping here (to spilled code) from unspilled
- // code, we need to reestablish a spilled frame at this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ mov(r3, Operand(handler_address));
- __ ldr(sp, MemOperand(r3));
- frame_->Forget(frame_->height() - handler_height);
-
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1); // r0 can contain the return value.
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
- frame_->PrepareForReturn();
- }
- shadows[i]->other_target()->Jump();
- }
- }
-
- exit.Bind();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(r0); // save exception object on the stack
- // In case of thrown exceptions, this is where we continue.
- __ mov(r2, Operand(Smi::FromInt(THROWING)));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the labels for all escapes from the try block, including
- // returns. Shadowing hides the original label as the LabelShadow and
- // operations on the original actually affect the shadowing label.
- //
- // We should probably try to unify the escaping labels and the return
- // label.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->try_block()->statements());
- }
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original labels are unshadowed and the
- // LabelShadows represent the formerly shadowing labels.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
- __ mov(r3, Operand(handler_address));
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in r2, then jump around the unlink blocks if any.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r0);
- __ mov(r2, Operand(Smi::FromInt(FALLING)));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // in (a non-refcounted reference to) r0. We must preserve it
- // until it is pushed.
- //
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- shadows[i]->Bind();
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ mov(r3, Operand(handler_address));
- __ ldr(sp, MemOperand(r3));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame. The next
- // handler address is currently on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this label shadowed the function return, materialize the
- // return value on the stack.
- frame_->EmitPush(r0);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r0);
- }
- __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(r2);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->finally_block()->statements());
- }
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(r2);
- frame_->EmitPop(r0);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- JumpTarget* original = shadows[i]->other_target();
- __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
- if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
- JumpTarget skip;
- skip.Branch(ne);
- frame_->PrepareForReturn();
- original->Jump();
- skip.Bind();
- } else {
- original->Branch(eq);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ cmp(r2, Operand(Smi::FromInt(THROWING)));
- exit.Branch(ne);
-
- // Rethrow exception.
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ DebuggerStatament");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- frame_->DebugBreak();
-#endif
- // Ignore the return value.
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (!pretenure &&
- scope()->is_function_scope() &&
- function_info->num_literals() == 0) {
- FastNewClosureStub stub(
- function_info->strict_mode() ? kStrictMode : kNonStrictMode);
- frame_->EmitPush(Operand(function_info));
- frame_->SpillAll();
- frame_->CallStub(&stub, 1);
- frame_->EmitPush(r0);
- } else {
- // Create a new closure.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(function_info));
- frame_->EmitPush(Operand(pretenure
- ? FACTORY->true_value()
- : FACTORY->false_value()));
- frame_->CallRuntime(Runtime::kNewClosure, 3);
- frame_->EmitPush(r0);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script());
- if (function_info.is_null()) {
- SetStackOverflow();
- ASSERT(frame_->height() == original_height);
- return;
- }
- InstantiateFunction(function_info, node->pretenure());
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- InstantiateFunction(node->shared_function_info(), false);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Conditional");
- JumpTarget then;
- JumpTarget else_;
- LoadCondition(node->condition(), &then, &else_, true);
- if (has_valid_frame()) {
- Branch(false, &else_);
- }
- if (has_valid_frame() || then.is_linked()) {
- then.Bind();
- Load(node->then_expression());
- }
- if (else_.is_linked()) {
- JumpTarget exit;
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- if (exit.is_linked()) exit.Bind();
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // JumpTargets do not yet support merging frames so the frame must be
- // spilled when jumping to these targets.
- JumpTarget slow;
- JumpTarget done;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &slow,
- &done);
-
- slow.Bind();
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(slot->var()->name()));
-
- if (typeof_state == INSIDE_TYPEOF) {
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind();
- frame_->EmitPush(r0);
-
- } else {
- Register scratch = VirtualFrame::scratch0();
- TypeInfo info = type_info(slot);
- frame_->EmitPush(SlotOperand(slot, scratch), info);
-
- if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- Comment cmnt(masm_, "[ Unhole const");
- Register tos = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(tos, ip);
- __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
- frame_->EmitPush(tos);
- }
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- VirtualFrame::RegisterAllocationScope scope(this);
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // Load the loaded value from the stack into a register but leave it on the
- // stack.
- Register tos = frame_->Peek();
-
- // If the loaded value is the sentinel that indicates that we
- // haven't loaded the arguments object yet, we need to do it now.
- JumpTarget exit;
- __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
- __ cmp(tos, ip);
- exit.Branch(ne);
- frame_->Drop();
- StoreArgumentsObject(false);
- exit.Bind();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- ASSERT(slot != NULL);
- VirtualFrame::RegisterAllocationScope scope(this);
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(slot->var()->name()));
-
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize
- // const properties (introduced via eval("const foo = (some
- // expr);")). Also, uses the current function context instead of
- // the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the
- // same time, because the const declaration may be at the end of
- // the eval code (sigh...) and the const variable may have been
- // used before (where its value is 'undefined'). Thus, we can only
- // do the initialization when we actually encounter the expression
- // and when the expression operands are defined and valid, and
- // thus we need the split into 2 operations: declaration of the
- // context slot followed by initialization.
- frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
- frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling assignment expressions.
- frame_->EmitPush(r0);
-
- } else {
- ASSERT(!slot->var()->is_dynamic());
- Register scratch = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // The frame must be spilled when branching to this target.
- JumpTarget exit;
-
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is
- // executed, the code is identical to a normal store (see below).
- Comment cmnt(masm_, "[ Init const");
- __ ldr(scratch, SlotOperand(slot, scratch));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- exit.Branch(ne);
- }
-
- // We must execute the store. Storing a variable must keep the
- // (new) value on the stack. This is necessary for compiling
- // assignment expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will
- // initialize consts to 'the hole' value and by doing so, end up
- // calling this code. r2 may be loaded with context; used below in
- // RecordWrite.
- Register tos = frame_->Peek();
- __ str(tos, SlotOperand(slot, scratch));
- if (slot->type() == Slot::CONTEXT) {
- // Skip write barrier if the written value is a smi.
- __ tst(tos, Operand(kSmiTagMask));
- // We don't use tos any more after here.
- exit.Branch(eq);
- // scratch is loaded with context when calling SlotOperand above.
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- // We need an extra register. Until we have a way to do that in the
- // virtual frame we will cheat and ask for a free TOS register.
- Register scratch3 = frame_->GetTOSRegister();
- __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
- }
- // If we definitely did not jump over the assignment, we do not need
- // to bind the exit label. Doing so can defeat peephole
- // optimization.
- if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- exit.Bind();
- }
- }
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register tmp = frame_->scratch0();
- Register tmp2 = frame_->scratch1();
- Register context = cp;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- frame_->SpillAll();
- // Check that extension is NULL.
- __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- }
- // Load next context in chain.
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- frame_->SpillAll();
- Label next, fast;
- __ Move(tmp, context);
- __ bind(&next);
- // Terminate at global context.
- __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
- __ cmp(tmp2, ip);
- __ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- // Load next context in chain.
- __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- __ b(&next);
- __ bind(&fast);
- }
-
- // Load the global object.
- LoadGlobal();
- // Setup the name register and call load IC.
- frame_->CallLoadIC(slot->var()->name(),
- typeof_state == INSIDE_TYPEOF
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT);
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- frame_->SpillAll();
- done->Jump();
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- frame_->SpillAll();
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- __ ldr(r0,
- ContextSlotOperandCheckExtensions(potential_slot,
- r1,
- r2,
- slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- done->Jump();
- } else if (rewrite != NULL) {
- // Generate fast case for argument loads.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- __ ldr(r0,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- r1,
- r2,
- slow));
- frame_->EmitPush(r0);
- __ mov(r1, Operand(key_literal->handle()));
- frame_->EmitPush(r1);
- EmitKeyedLoad();
- done->Jump();
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ VariableProxy");
-
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValue();
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Literal");
- Register reg = frame_->GetTOSRegister();
- bool is_smi = node->handle()->IsSmi();
- __ mov(reg, Operand(node->handle()));
- frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ RexExp Literal");
-
- Register tmp = VirtualFrame::scratch0();
- // Free up a TOS register that can be used to push the literal.
- Register literal = frame_->GetTOSRegister();
-
- // Retrieve the literal array and check the allocated entry.
-
- // Load the function of this activation.
- __ ldr(tmp, frame_->Function());
-
- // Load the literals array of the function.
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ ldr(literal, FieldMemOperand(tmp, literal_offset));
-
- JumpTarget materialized;
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(literal, ip);
- // This branch locks the virtual frame at the done label to match the
- // one we have here, where the literal register is not on the stack and
- // nothing is spilled.
- materialized.Branch(ne);
-
- // If the entry is undefined we call the runtime system to compute
- // the literal.
- // literal array (0)
- frame_->EmitPush(tmp);
- // literal index (1)
- frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
- // RegExp pattern (2)
- frame_->EmitPush(Operand(node->pattern()));
- // RegExp flags (3)
- frame_->EmitPush(Operand(node->flags()));
- frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ Move(literal, r0);
-
- materialized.Bind();
-
- frame_->EmitPush(literal);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- frame_->EmitPush(Operand(Smi::FromInt(size)));
- frame_->CallRuntime(Runtime::kAllocateInNewSpace, 1);
- // TODO(lrn): Use AllocateInNewSpace macro with fallback to runtime.
- // r0 is newly allocated space.
-
- // Reuse literal variable with (possibly) a new register, still holding
- // the materialized boilerplate.
- literal = frame_->PopToRegister(r0);
-
- __ CopyFields(r0, literal, tmp.bit(), size / kPointerSize);
-
- // Push the clone.
- frame_->EmitPush(r0);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- Register literal = frame_->GetTOSRegister();
- // Load the function of this activation.
- __ ldr(literal, frame_->Function());
- // Literal array.
- __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
- frame_->EmitPush(literal);
- // Literal index.
- frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
- // Constant properties.
- frame_->EmitPush(Operand(node->constant_properties()));
- // Should the object literal have fast elements?
- frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
- if (node->depth() > 1) {
- frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->EmitPush(r0); // save the result
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- node->CalculateEmitStore();
-
- for (int i = 0; i < node->properties()->length(); i++) {
- // At the start of each iteration, the top of stack contains
- // the newly created object literal.
- ObjectLiteral::Property* property = node->properties()->at(i);
- Literal* key = property->key();
- Expression* value = property->value();
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kStoreIC_Initialize));
- Load(value);
- if (property->emit_store()) {
- frame_->PopToR0();
- // Fetch the object literal.
- frame_->SpillAllButCopyTOSToR1();
- __ mov(r2, Operand(key->handle()));
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
- } else {
- frame_->Drop();
- }
- break;
- }
- // else fall through
- case ObjectLiteral::Property::PROTOTYPE: {
- frame_->Dup();
- Load(key);
- Load(value);
- if (property->emit_store()) {
- frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes
- frame_->CallRuntime(Runtime::kSetProperty, 4);
- } else {
- frame_->Drop(3);
- }
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- frame_->Dup();
- Load(key);
- frame_->EmitPush(Operand(Smi::FromInt(1)));
- Load(value);
- frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- frame_->Dup();
- Load(key);
- frame_->EmitPush(Operand(Smi::FromInt(0)));
- Load(value);
- frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- break;
- }
- }
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- Register tos = frame_->GetTOSRegister();
- // Load the function of this activation.
- __ ldr(tos, frame_->Function());
- // Load the literals array of the function.
- __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
- frame_->EmitPush(tos);
- frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
- frame_->EmitPush(Operand(node->constant_elements()));
- int length = node->values()->length();
- if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- frame_->CallStub(&stub, 3);
- __ IncrementCounter(masm_->isolate()->counters()->cow_arrays_created_stub(),
- 1, r1, r2);
- } else if (node->depth() > 1) {
- frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- frame_->CallStub(&stub, 3);
- }
- frame_->EmitPush(r0); // save the result
- // r0: created object literal
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < node->values()->length(); i++) {
- Expression* value = node->values()->at(i);
-
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) continue;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) continue;
-
- // The property must be set by generated code.
- Load(value);
- frame_->PopToR0();
- // Fetch the object literal.
- frame_->SpillAllButCopyTOSToR1();
-
- // Get the elements array.
- __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ str(r0, FieldMemOperand(r1, offset));
-
- // Update the write barrier for the array address.
- __ RecordWrite(r1, Operand(offset), r3, r2);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->EmitPush(r0);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm(), "[ Variable Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL);
- Slot* slot = var->AsSlot();
- ASSERT(slot != NULL);
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-
- // Perform the binary operation.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (literal != NULL) {
- ASSERT(!literal->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- inline_smi);
- }
- } else {
- Load(node->value());
- }
-
- // Perform the assignment.
- if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
- CodeForSourcePosition(node->position());
- StoreToSlot(slot,
- node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm(), "[ Named Property Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
- ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
- // Initialize name and evaluate the receiver sub-expression if necessary. If
- // the receiver is trivial it is not placed on the stack at this point, but
- // loaded whenever actually needed.
- Handle<String> name;
- bool is_trivial_receiver = false;
- if (var != NULL) {
- name = var->name();
- } else {
- Literal* lit = prop->key()->AsLiteral();
- ASSERT_NOT_NULL(lit);
- name = Handle<String>::cast(lit->handle());
- // Do not materialize the receiver on the frame if it is trivial.
- is_trivial_receiver = prop->obj()->IsTrivial();
- if (!is_trivial_receiver) Load(prop->obj());
- }
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- // Initialization block consists of assignments of the form expr.x = ..., so
- // this will never be an assignment to a variable, so there must be a
- // receiver object.
- ASSERT_EQ(NULL, var);
- if (is_trivial_receiver) {
- Load(prop->obj());
- } else {
- frame_->Dup();
- }
- frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block() && !is_trivial_receiver) {
- frame_->Dup();
- }
-
- // Stack layout:
- // [tos] : receiver (only materialized if non-trivial)
- // [tos+1] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- if (is_trivial_receiver) {
- Load(prop->obj());
- } else if (var != NULL) {
- LoadGlobal();
- } else {
- frame_->Dup();
- }
- EmitNamedLoad(name, var != NULL);
-
- // Perform the binary operation.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (literal != NULL) {
- ASSERT(!literal->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- inline_smi);
- }
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : receiver (only materialized if non-trivial)
- // [tos+2] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(var == NULL || var->mode() != Variable::CONST);
- ASSERT_NE(Token::INIT_CONST, node->op());
- if (is_trivial_receiver) {
- // Load the receiver and swap with the value.
- Load(prop->obj());
- Register t0 = frame_->PopToRegister();
- Register t1 = frame_->PopToRegister(t0);
- frame_->EmitPush(t0);
- frame_->EmitPush(t1);
- }
- CodeForSourcePosition(node->position());
- bool is_contextual = (var != NULL);
- EmitNamedStore(name, is_contextual);
- frame_->EmitPush(r0);
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- ASSERT_EQ(NULL, var);
- // The argument to the runtime call is the receiver.
- if (is_trivial_receiver) {
- Load(prop->obj());
- } else {
- // A copy of the receiver is below the value of the assignment. Swap
- // the receiver and the value of the assignment expression.
- Register t0 = frame_->PopToRegister();
- Register t1 = frame_->PopToRegister(t0);
- frame_->EmitPush(t0);
- frame_->EmitPush(t1);
- }
- frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Keyed Property Assignment");
- Property* prop = node->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
-
- // Evaluate the receiver subexpression.
- Load(prop->obj());
-
- WriteBarrierCharacter wb_info;
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- frame_->Dup();
- frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block()) {
- frame_->Dup();
- }
-
- // Evaluate the key subexpression.
- Load(prop->key());
-
- // Stack layout:
- // [tos] : key
- // [tos+1] : receiver
- // [tos+2] : receiver if at the end of an initialization block
- //
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- // Duplicate receiver and key for loading the current property value.
- frame_->Dup2();
- EmitKeyedLoad();
- frame_->EmitPush(r0);
-
- // Perform the binary operation.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (literal != NULL) {
- ASSERT(!literal->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- inline_smi);
- }
- wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- wb_info = node->value()->AsLiteral() != NULL ?
- NEVER_NEWSPACE :
- (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : key
- // [tos+2] : receiver
- // [tos+3] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(node->op() != Token::INIT_CONST);
- CodeForSourcePosition(node->position());
- EmitKeyedStore(prop->key()->type(), wb_info);
- frame_->EmitPush(r0);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- // The argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment. Swap the receiver and
- // the value of the assignment expression.
- Register t0 = frame_->PopToRegister();
- Register t1 = frame_->PopToRegister(t0);
- frame_->EmitPush(t1);
- frame_->EmitPush(t0);
- frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
- VirtualFrame::RegisterAllocationScope scope(this);
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Assignment");
-
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
-
- if (var != NULL && !var->is_global()) {
- EmitSlotAssignment(node);
-
- } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
- (var != NULL && var->is_global())) {
- // Properties whose keys are property names and global variables are
- // treated as named property references. We do not need to consider
- // global 'this' because it is not a valid left-hand side.
- EmitNamedPropertyAssignment(node);
-
- } else if (prop != NULL) {
- // Other properties (including rewritten parameters for a function that
- // uses arguments) are keyed property assignments.
- EmitKeyedPropertyAssignment(node);
-
- } else {
- // Invalid left-hand side.
- Load(node->target());
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- // The runtime call doesn't actually return but the code generator will
- // still generate code and expects a certain frame height.
- frame_->EmitPush(r0);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Throw");
-
- Load(node->exception());
- CodeForSourcePosition(node->position());
- frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->EmitPush(r0);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Property");
-
- { Reference property(this, node);
- property.GetValue();
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Call");
-
- Expression* function = node->expression();
- ZoneList<Expression*>* args = node->arguments();
-
- // Standard function call.
- // Check if the function is a variable or a property.
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
-
- // Prepare stack for call to resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->EmitPush(r0);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
- __ ldr(r1, frame_->Receiver());
- frame_->EmitPush(r1);
-
- // Push the strict mode flag.
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
- done.Jump();
- slow.Bind();
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
- frame_->EmitPush(r1);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
- __ ldr(r1, frame_->Receiver());
- frame_->EmitPush(r1);
-
- // Push the strict mode flag.
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
- // Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind();
-
- // Touch up stack with the right values for the function and the receiver.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Call the function.
- CodeForSourcePosition(node->position());
-
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- frame_->CallStub(&call_function, arg_count + 1);
-
- __ ldr(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->Drop();
- frame_->EmitPush(r0);
-
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // Setup the name register and call the IC initialization code.
- __ mov(r2, Operand(var->name()));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
- arg_count + 1);
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
-
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
-
- JumpTarget slow, done;
-
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow,
- &done);
-
- slow.Bind();
- // Load the function
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(var->name()));
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // r0: slot value; r1: receiver
-
- // Load the receiver.
- frame_->EmitPush(r0); // function
- frame_->EmitPush(r1); // receiver
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind();
- frame_->EmitPush(r0); // function
- LoadGlobalReceiver(VirtualFrame::scratch0()); // receiver
- call.Bind();
- }
-
- // Call the function. At this point, everything is spilled but the
- // function and receiver are in r0 and r1.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- frame_->EmitPush(r0);
-
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
-
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
-
- Handle<String> name = Handle<String>::cast(literal->handle());
-
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
-
- } else {
- Load(property->obj()); // Receiver.
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // Set the name register and call the IC initialization code.
- __ mov(r2, Operand(name));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
- }
-
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
-
- // Load the receiver and name of the function.
- Load(property->obj());
- Load(property->key());
-
- if (property->is_synthetic()) {
- EmitKeyedLoad();
- // Put the function below the receiver.
- // Use the global receiver.
- frame_->EmitPush(r0); // Function.
- LoadGlobalReceiver(VirtualFrame::scratch0());
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- frame_->EmitPush(r0);
- } else {
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- Register key = frame_->PopToRegister();
- Register receiver = frame_->PopToRegister(key);
- frame_->EmitPush(key);
- frame_->EmitPush(receiver);
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Load the key into r2 and call the IC initialization code.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count,
- in_loop);
- CodeForSourcePosition(node->position());
- frame_->SpillAll();
- __ ldr(r2, frame_->ElementAt(arg_count + 1));
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
- frame_->Drop(); // Drop the key still on the stack.
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
- }
- }
-
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
-
- // Load the function.
- Load(function);
-
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver(VirtualFrame::scratch0());
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- frame_->EmitPush(r0);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CallNew");
-
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- Load(node->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Spill everything from here to simplify the implementation.
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- // Load the argument count into r0 and the function into r1 as per
- // calling convention.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, frame_->ElementAt(arg_count));
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructCall));
- frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
- frame_->EmitPush(r0);
-
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- Register scratch = VirtualFrame::scratch0();
- JumpTarget null, function, leave, non_function_constructor;
-
- // Load the object into register.
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register tos = frame_->PopToRegister();
-
- // If the object is a smi, we return null.
- __ tst(tos, Operand(kSmiTagMask));
- null.Branch(eq);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
- null.Branch(lt);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
- function.Branch(eq);
-
- // Check if the constructor in the map is a function.
- __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
- __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
- non_function_constructor.Branch(ne);
-
- // The tos register now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(tos,
- FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->EmitPush(tos);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- __ mov(tos, Operand(FACTORY->function_class_symbol()));
- frame_->EmitPush(tos);
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- __ mov(tos, Operand(FACTORY->Object_symbol()));
- frame_->EmitPush(tos);
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- __ LoadRoot(tos, Heap::kNullValueRootIndex);
- frame_->EmitPush(tos);
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- Register scratch = VirtualFrame::scratch0();
- JumpTarget leave;
-
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register tos = frame_->PopToRegister(); // tos contains object.
- // if (object->IsSmi()) return the object.
- __ tst(tos, Operand(kSmiTagMask));
- leave.Branch(eq);
- // It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
- leave.Branch(ne);
- // Load the value.
- __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
- leave.Bind();
- frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- JumpTarget leave;
-
- ASSERT(args->length() == 2);
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Register value = frame_->PopToRegister();
- Register object = frame_->PopToRegister(value);
- // if (object->IsSmi()) return object.
- __ tst(object, Operand(kSmiTagMask));
- leave.Branch(eq);
- // It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
- leave.Branch(ne);
- // Store the value.
- __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
- // Update the write barrier.
- __ RecordWrite(object,
- Operand(JSValue::kValueOffset - kHeapObjectTag),
- scratch1,
- scratch2);
- // Leave.
- leave.Bind();
- frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register reg = frame_->PopToRegister();
- __ tst(reg, Operand(kSmiTagMask));
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register reg = frame_->PopToRegister();
- __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
- cc_reg_ = eq;
-}
-
-
-// Generates the Math.pow method.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
-
- if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- frame_->CallRuntime(Runtime::kMath_pow, 2);
- frame_->EmitPush(r0);
- } else {
- CpuFeatures::Scope scope(VFP3);
- JumpTarget runtime, done;
- Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // Get base and exponent to registers.
- Register exponent = frame_->PopToRegister();
- Register base = frame_->PopToRegister(exponent);
- Register heap_number_map = no_reg;
-
- // Set the frame for the runtime jump target. The code below jumps to the
- // jump target label so the frame needs to be established before that.
- ASSERT(runtime.entry_frame() == NULL);
- runtime.set_entry_frame(frame_);
-
- __ JumpIfNotSmi(exponent, &exponent_nonsmi);
- __ JumpIfNotSmi(base, &base_nonsmi);
-
- heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Exponent is a smi and base is a smi. Get the smi value into vfp register
- // d1.
- __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
- __ b(&powi);
-
- __ bind(&base_nonsmi);
- // Exponent is smi and base is non smi. Get the double value from the base
- // into vfp register d1.
- __ ObjectToDoubleVFPRegister(base, d1,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label());
-
- __ bind(&powi);
-
- // Load 1.0 into d0.
- __ vmov(d0, 1.0);
-
- // Get the absolute untagged value of the exponent and use that for the
- // calculation.
- __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
- // Negate if negative.
- __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative.
-
- // Run through all the bits in the exponent. The result is calculated in d0
- // and d1 holds base^(bit^2).
- Label more_bits;
- __ bind(&more_bits);
- __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
- __ vmul(d0, d0, d1, cs); // Multiply with base^(bit^2) if bit is set.
- __ vmul(d1, d1, d1, ne); // Don't bother calculating next d1 if done.
- __ b(ne, &more_bits);
-
- // If exponent is positive we are done.
- __ cmp(exponent, Operand(0, RelocInfo::NONE));
- __ b(ge, &allocate_return);
-
- // If exponent is negative result is 1/result (d2 already holds 1.0 in that
- // case). However if d0 has reached infinity this will not provide the
- // correct result, so call runtime if that is the case.
- __ mov(scratch2, Operand(0x7FF00000));
- __ mov(scratch1, Operand(0, RelocInfo::NONE));
- __ vmov(d1, scratch1, scratch2); // Load infinity into d1.
- __ VFPCompareAndSetFlags(d0, d1);
- runtime.Branch(eq); // d0 reached infinity.
- __ vdiv(d0, d2, d0);
- __ b(&allocate_return);
-
- __ bind(&exponent_nonsmi);
- // Special handling of raising to the power of -0.5 and 0.5. First check
- // that the value is a heap number and that the lower bits (which for both
- // values are zero).
- heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
- __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
- __ cmp(scratch1, heap_number_map);
- runtime.Branch(ne);
- __ tst(scratch2, scratch2);
- runtime.Branch(ne);
-
- // Load the higher bits (which contains the floating point exponent).
- __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
-
- // Compare exponent with -0.5.
- __ cmp(scratch1, Operand(0xbfe00000));
- __ b(ne, ¬_minus_half);
-
- // Get the double value from the base into vfp register d0.
- __ ObjectToDoubleVFPRegister(base, d0,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label(),
- AVOID_NANS_AND_INFINITIES);
-
- // Convert -0 into +0 by adding +0.
- __ vmov(d2, 0.0);
- __ vadd(d0, d2, d0);
- // Load 1.0 into d2.
- __ vmov(d2, 1.0);
-
- // Calculate the reciprocal of the square root.
- __ vsqrt(d0, d0);
- __ vdiv(d0, d2, d0);
-
- __ b(&allocate_return);
-
- __ bind(¬_minus_half);
- // Compare exponent with 0.5.
- __ cmp(scratch1, Operand(0x3fe00000));
- runtime.Branch(ne);
-
- // Get the double value from the base into vfp register d0.
- __ ObjectToDoubleVFPRegister(base, d0,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label(),
- AVOID_NANS_AND_INFINITIES);
- // Convert -0 into +0 by adding +0.
- __ vmov(d2, 0.0);
- __ vadd(d0, d2, d0);
- __ vsqrt(d0, d0);
-
- __ bind(&allocate_return);
- Register scratch3 = r5;
- __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
- heap_number_map, runtime.entry_label());
- __ mov(base, scratch3);
- done.Jump();
-
- runtime.Bind();
-
- // Push back the arguments again for the runtime call.
- frame_->EmitPush(base);
- frame_->EmitPush(exponent);
- frame_->CallRuntime(Runtime::kMath_pow, 2);
- __ Move(base, r0);
-
- done.Bind();
- frame_->EmitPush(base);
- }
-}
-
-
-// Generates the Math.sqrt method.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
-
- if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- frame_->CallRuntime(Runtime::kMath_sqrt, 1);
- frame_->EmitPush(r0);
- } else {
- CpuFeatures::Scope scope(VFP3);
- JumpTarget runtime, done;
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // Get the value from the frame.
- Register tos = frame_->PopToRegister();
-
- // Set the frame for the runtime jump target. The code below jumps to the
- // jump target label so the frame needs to be established before that.
- ASSERT(runtime.entry_frame() == NULL);
- runtime.set_entry_frame(frame_);
-
- Register heap_number_map = r6;
- Register new_heap_number = r5;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Get the double value from the heap number into vfp register d0.
- __ ObjectToDoubleVFPRegister(tos, d0,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label());
-
- // Calculate the square root of d0 and place result in a heap number object.
- __ vsqrt(d0, d0);
- __ AllocateHeapNumberWithValue(new_heap_number,
- d0,
- scratch1, scratch2,
- heap_number_map,
- runtime.entry_label());
- __ mov(tos, Operand(new_heap_number));
- done.Jump();
-
- runtime.Bind();
- // Push back the argument again for the runtime call.
- frame_->EmitPush(tos);
- frame_->CallRuntime(Runtime::kMath_sqrt, 1);
- __ Move(tos, r0);
-
- done.Bind();
- frame_->EmitPush(tos);
- }
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result_, Heap::kNanValueRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
-
- Register index = frame_->PopToRegister();
- Register object = frame_->PopToRegister(index);
-
- // We need two extra registers.
- Register scratch = VirtualFrame::scratch0();
- Register result = VirtualFrame::scratch1();
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object,
- index,
- scratch,
- result);
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Register result = frame_->GetTOSRegister();
- Register code = frame_->PopToRegister(result);
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code, result);
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ mov(result_, Operand(Smi::FromInt(0)));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
-
- Register index = frame_->PopToRegister();
- Register object = frame_->PopToRegister(index);
-
- // We need three extra registers.
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- // Use r6 without notifying the virtual frame.
- Register result = r6;
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object,
- index,
- scratch1,
- scratch2,
- result);
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- JumpTarget answer;
- // We need the CC bits to come out as not_equal in the case where the
- // object is a smi. This can't be done with the usual test opcode so
- // we use XOR to get the right CC bits.
- Register possible_array = frame_->PopToRegister();
- Register scratch = VirtualFrame::scratch0();
- __ and_(scratch, possible_array, Operand(kSmiTagMask));
- __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
- answer.Branch(ne);
- // It is a heap object - get the map. Check if the object is a JS array.
- __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
- answer.Bind();
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- JumpTarget answer;
- // We need the CC bits to come out as not_equal in the case where the
- // object is a smi. This can't be done with the usual test opcode so
- // we use XOR to get the right CC bits.
- Register possible_regexp = frame_->PopToRegister();
- Register scratch = VirtualFrame::scratch0();
- __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
- __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
- answer.Branch(ne);
- // It is a heap object - get the map. Check if the object is a regexp.
- __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
- answer.Bind();
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register possible_object = frame_->PopToRegister();
- __ tst(possible_object, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(possible_object, ip);
- true_target()->Branch(eq);
-
- Register map_reg = VirtualFrame::scratch0();
- __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
- __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
- false_target()->Branch(ne);
-
- __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
- __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
- false_target()->Branch(lt);
- __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
- cc_reg_ = le;
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
- // typeof(arg) == function).
- // It includes undetectable objects (as opposed to IsObject).
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
- __ tst(value, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- // Check that this is an object.
- __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
- __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
- __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
- cc_reg_ = ge;
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- Label false_result;
-
- // Check that map is loaded as expected.
- if (FLAG_debug_code) {
- __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ cmp(map_result_, ip);
- __ Assert(eq, "Map not in expected register");
- }
-
- // Check for fast case object. Generate false result for slow case object.
- __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
- __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(scratch1_, ip);
- __ b(eq, &false_result);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ ldr(map_result_,
- FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
- __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
- // map_result_: descriptor array
- // scratch2_: length of descriptor array
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ add(scratch1_,
- map_result_,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch1_,
- scratch1_,
- Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Calculate location of the first key name.
- __ add(map_result_,
- map_result_,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- // The use of ip to store the valueOf symbol asumes that it is not otherwise
- // used in the loop below.
- __ mov(ip, Operand(FACTORY->value_of_symbol()));
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(scratch2_, MemOperand(map_result_, 0));
- __ cmp(scratch2_, ip);
- __ b(eq, &false_result);
- __ add(map_result_, map_result_, Operand(kPointerSize));
- __ bind(&entry);
- __ cmp(map_result_, Operand(scratch1_));
- __ b(ne, &loop);
-
- // Reload map as register map_result_ was used as temporary above.
- __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
- __ tst(scratch1_, Operand(kSmiTagMask));
- __ b(eq, &false_result);
- __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
- __ ldr(scratch2_,
- ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(scratch2_,
- FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
- __ ldr(scratch2_,
- ContextOperand(
- scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(scratch1_, scratch2_);
- __ b(ne, &false_result);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
- __ orr(scratch1_,
- scratch1_,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
- __ mov(map_result_, Operand(1));
- __ jmp(exit_label());
- __ bind(&false_result);
- // Set false result.
- __ mov(map_result_, Operand(0, RelocInfo::NONE));
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register obj = frame_->PopToRegister(); // Pop the string wrapper.
- if (FLAG_debug_code) {
- __ AbortIfSmi(obj);
- }
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- Register map_result = VirtualFrame::scratch0();
- __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- true_target()->Branch(ne);
-
- // We need an additional two scratch registers for the deferred code.
- Register scratch1 = VirtualFrame::scratch1();
- // Use r6 without notifying the virtual frame.
- Register scratch2 = r6;
-
- DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
- new DeferredIsStringWrapperSafeForDefaultValueOf(
- obj, map_result, scratch1, scratch2);
- deferred->Branch(eq);
- deferred->BindExit();
- __ tst(map_result, Operand(map_result));
- cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register possible_function = frame_->PopToRegister();
- __ tst(possible_function, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- Register map_reg = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
- __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register possible_undetectable = frame_->PopToRegister();
- __ tst(possible_undetectable, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- Register scratch = VirtualFrame::scratch0();
- __ ldr(scratch,
- FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Register scratch0 = VirtualFrame::scratch0();
- Register scratch1 = VirtualFrame::scratch1();
- // Get the frame pointer for the calling frame.
- __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- __ ldr(scratch1,
- MemOperand(scratch0, StandardFrameConstants::kContextOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(scratch0,
- MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
-
- // Check the marker in the calling frame.
- __ ldr(scratch1,
- MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Register tos = frame_->GetTOSRegister();
- Register scratch0 = VirtualFrame::scratch0();
- Register scratch1 = VirtualFrame::scratch1();
-
- // Check if the calling frame is an arguments adaptor frame.
- __ ldr(scratch0,
- MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(scratch1,
- MemOperand(scratch0, StandardFrameConstants::kContextOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Get the number of formal parameters.
- __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ ldr(tos,
- MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
- eq);
-
- frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // Satisfy contract with ArgumentsAccessStub:
- // Load the key into r1 and the formal parameters count into r0.
- Load(args->at(0));
- frame_->PopToR1();
- frame_->SpillAll();
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
-
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- ASSERT(args->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r4, Operand(r0));
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in r0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- __ PrepareCallCFunction(0, r1);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
-
- CpuFeatures::Scope scope(VFP3);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- frame_->EmitPush(r4);
- } else {
- __ mov(r0, Operand(r4));
- __ PrepareCallCFunction(1, r1);
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 1);
- frame_->EmitPush(r0);
- }
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- frame_->SpillAll();
- frame_->CallStub(&stub, 2);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 2);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(4, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
- RegExpExecStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 4);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
- RegExpConstructResultStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst, Register cache, Register key)
- : dst_(dst), cache_(cache), key_(key) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
- __ Push(cache_, key_);
- __ CallRuntime(Runtime::kGetFromCache, 2);
- __ Move(dst_, r0);
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- Isolate::Current()->global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
- return;
- }
-
- Load(args->at(1));
-
- frame_->PopToR1();
- frame_->SpillAll();
- Register key = r1; // Just poped to r1
- Register result = r0; // Free, as frame has just been spilled.
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(scratch1,
- FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
- __ ldr(scratch1,
- ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(scratch1,
- FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
-
- DeferredSearchCache* deferred =
- new DeferredSearchCache(result, scratch1, key);
-
- const int kFingerOffset =
- FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
- // result now holds finger offset as a smi.
- __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // scratch2 now points to the start of fixed array elements.
- __ ldr(result,
- MemOperand(
- scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
- // Note side effect of PreIndex: scratch2 now points to the key of the pair.
- __ cmp(key, result);
- deferred->Branch(ne);
-
- __ ldr(result, MemOperand(scratch2, kPointerSize));
-
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and jump to the runtime.
- Load(args->at(0));
-
- NumberToStringStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 1);
- frame_->EmitPush(r0);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- Register index2 = r2;
- Register index1 = r1;
- Register object = r0;
- Register tmp1 = r3;
- Register tmp2 = r4;
-
- frame_->EmitPop(index2);
- frame_->EmitPop(index1);
- frame_->EmitPop(object);
-
- DeferredSwapElements* deferred =
- new DeferredSwapElements(object, index1, index2);
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CompareObjectType(object, tmp1, tmp2, JS_ARRAY_TYPE);
- deferred->Branch(ne);
- __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
- __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(ne);
-
- // Check the object's elements are in fast case and writable.
- __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(tmp2, ip);
- deferred->Branch(ne);
-
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- // Check that both indices are smis.
- __ mov(tmp2, index1);
- __ orr(tmp2, tmp2, index2);
- __ tst(tmp2, Operand(kSmiTagMask));
- deferred->Branch(ne);
-
- // Check that both indices are valid.
- __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
- __ cmp(tmp2, index1);
- __ cmp(tmp2, index2, hi);
- deferred->Branch(ls);
-
- // Bring the offsets into the fixed array in tmp1 into index1 and
- // index2.
- __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Swap elements.
- Register tmp3 = object;
- object = no_reg;
- __ ldr(tmp3, MemOperand(tmp1, index1));
- __ ldr(tmp2, MemOperand(tmp1, index2));
- __ str(tmp3, MemOperand(tmp1, index2));
- __ str(tmp2, MemOperand(tmp1, index1));
-
- Label done;
- __ InNewSpace(tmp1, tmp2, eq, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ mov(tmp2, tmp1);
- __ add(index1, index1, tmp1);
- __ add(index2, index2, tmp1);
- __ RecordWriteHelper(tmp1, index1, tmp3);
- __ RecordWriteHelper(tmp2, index2, tmp3);
- __ bind(&done);
-
- deferred->BindExit();
- __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(tmp1);
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- frame_->CallJSFunction(n_args);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- frame_->SpillAllButCopyTOSToR0();
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kMath_sin, 1);
- }
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- frame_->SpillAllButCopyTOSToR0();
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kMath_cos, 1);
- }
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- frame_->SpillAllButCopyTOSToR0();
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kMath_log, 1);
- }
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Register lhs = frame_->PopToRegister();
- Register rhs = frame_->PopToRegister(lhs);
- __ cmp(lhs, rhs);
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Register right = frame_->PopToRegister();
- Register left = frame_->PopToRegister(right);
- Register tmp = frame_->scratch0();
- Register tmp2 = frame_->scratch1();
-
- // Jumps to done must have the eq flag set if the test is successful
- // and clear if the test has failed.
- Label done;
-
- // Fail if either is a non-HeapObject.
- __ cmp(left, Operand(right));
- __ b(eq, &done);
- __ and_(tmp, left, Operand(right));
- __ eor(tmp, tmp, Operand(kSmiTagMask));
- __ tst(tmp, Operand(kSmiTagMask));
- __ b(ne, &done);
- __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
- __ b(ne, &done);
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ cmp(tmp, Operand(tmp2));
- __ b(ne, &done);
- __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ cmp(tmp, tmp2);
- __ bind(&done);
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
- Register tmp = frame_->scratch0();
- __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
- __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
-
- __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
- __ IndexFromHash(value, value);
- frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
- __ LoadRoot(value, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- if (CheckForInlineRuntimeCall(node)) {
- ASSERT((has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- const Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Prepare stack for calling JS runtime function.
- // Push the builtins object found in the current global object.
- Register scratch = VirtualFrame::scratch0();
- __ ldr(scratch, GlobalObjectOperand());
- Register builtins = frame_->GetTOSRegister();
- __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
- frame_->EmitPush(builtins);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- if (function == NULL) {
- // Call the JS runtime function.
- __ mov(r2, Operand(node->name()));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
- } else {
- // Call the C runtime function.
- frame_->CallRuntime(function, arg_count);
- frame_->EmitPush(r0);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- LoadCondition(node->expression(), false_target(), true_target(), true);
- // LoadCondition may (and usually does) leave a test and branch to
- // be emitted by the caller. In that case, negate the condition.
- if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
- frame_->EmitPush(r0);
-
- } else if (variable != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
- Slot* slot = variable->AsSlot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->EmitPush(Operand(variable->name()));
- frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
- frame_->EmitPush(r0);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Delete from the context holding the named variable.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(variable->name()));
- frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
- frame_->EmitPush(r0);
-
- } else {
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
- }
-
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->Drop();
- frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->EmitPush(r0); // r0 has result
-
- } else {
- bool can_overwrite = node->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-
- bool no_negative_zero = node->expression()->no_negative_zero();
- Load(node->expression());
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- frame_->PopToR0();
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- NO_UNARY_FLAGS,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0); // r0 has result
- break;
- }
-
- case Token::BIT_NOT: {
- Register tos = frame_->PopToRegister();
- JumpTarget not_smi_label;
- JumpTarget continue_label;
- // Smi check.
- __ tst(tos, Operand(kSmiTagMask));
- not_smi_label.Branch(ne);
-
- __ mvn(tos, Operand(tos));
- __ bic(tos, tos, Operand(kSmiTagMask)); // Bit-clear inverted smi-tag.
- frame_->EmitPush(tos);
- // The fast case is the first to jump to the continue label, so it gets
- // to decide the virtual frame layout.
- continue_label.Jump();
-
- not_smi_label.Bind();
- frame_->SpillAll();
- __ Move(r0, tos);
- GenericUnaryOpStub stub(Token::BIT_NOT,
- overwrite,
- NO_UNARY_SMI_CODE_IN_STUB);
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0);
-
- continue_label.Bind();
- break;
- }
-
- case Token::VOID:
- frame_->Drop();
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
- break;
-
- case Token::ADD: {
- Register tos = frame_->Peek();
- // Smi check.
- JumpTarget continue_label;
- __ tst(tos, Operand(kSmiTagMask));
- continue_label.Branch(eq);
-
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
- frame_->EmitPush(r0);
-
- continue_label.Bind();
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
- DeferredCountOperation(Register value,
- bool is_increment,
- bool is_postfix,
- int target_size)
- : value_(value),
- is_increment_(is_increment),
- is_postfix_(is_postfix),
- target_size_(target_size) {}
-
- virtual void Generate() {
- VirtualFrame copied_frame(*frame_state()->frame());
-
- Label slow;
- // Check for smi operand.
- __ tst(value_, Operand(kSmiTagMask));
- __ b(ne, &slow);
-
- // Revert optimistic increment/decrement.
- if (is_increment_) {
- __ sub(value_, value_, Operand(Smi::FromInt(1)));
- } else {
- __ add(value_, value_, Operand(Smi::FromInt(1)));
- }
-
- // Slow case: Convert to number. At this point the
- // value to be incremented is in the value register..
- __ bind(&slow);
-
- // Convert the operand to a number.
- copied_frame.EmitPush(value_);
-
- copied_frame.InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
-
- if (is_postfix_) {
- // Postfix: store to result (on the stack).
- __ str(r0, MemOperand(sp, target_size_ * kPointerSize));
- }
-
- copied_frame.EmitPush(r0);
- copied_frame.EmitPush(Operand(Smi::FromInt(1)));
-
- if (is_increment_) {
- copied_frame.CallRuntime(Runtime::kNumberAdd, 2);
- } else {
- copied_frame.CallRuntime(Runtime::kNumberSub, 2);
- }
-
- __ Move(value_, r0);
-
- copied_frame.MergeTo(frame_state()->frame());
- }
-
- private:
- Register value_;
- bool is_increment_;
- bool is_postfix_;
- int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CountOperation");
- VirtualFrame::RegisterAllocationScope scope(this);
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
- bool is_slot = (var != NULL && var->mode() == Variable::VAR);
-
- if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
- // The type info declares that this variable is always a Smi. That
- // means it is a Smi both before and after the increment/decrement.
- // Lets make use of that to make a very minimal count.
- Reference target(this, node->expression(), !is_const);
- ASSERT(!target.is_illegal());
- target.GetValue(); // Pushes the value.
- Register value = frame_->PopToRegister();
- if (is_postfix) frame_->EmitPush(value);
- if (is_increment) {
- __ add(value, value, Operand(Smi::FromInt(1)));
- } else {
- __ sub(value, value, Operand(Smi::FromInt(1)));
- }
- frame_->EmitPush(value);
- target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
- if (is_postfix) frame_->Pop();
- ASSERT_EQ(original_height + 1, frame_->height());
- return;
- }
-
- // If it's a postfix expression and its result is not ignored and the
- // reference is non-trivial, then push a placeholder on the stack now
- // to hold the result of the expression.
- bool placeholder_pushed = false;
- if (!is_slot && is_postfix) {
- frame_->EmitPush(Operand(Smi::FromInt(0)));
- placeholder_pushed = true;
- }
-
- // A constant reference is not saved to, so a constant reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
- ASSERT_EQ(original_height + 1, frame_->height());
- return;
- }
-
- // This pushes 0, 1 or 2 words on the object to be used later when updating
- // the target. It also pushes the current value of the target.
- target.GetValue();
-
- bool value_is_known_smi = frame_->KnownSmiAt(0);
- Register value = frame_->PopToRegister();
-
- // Postfix: Store the old value as the result.
- if (placeholder_pushed) {
- frame_->SetElementAt(value, target.size());
- } else if (is_postfix) {
- frame_->EmitPush(value);
- __ mov(VirtualFrame::scratch0(), value);
- value = VirtualFrame::scratch0();
- }
-
- // We can't use any type information here since the virtual frame from the
- // deferred code may have lost information and we can't merge a virtual
- // frame with less specific type knowledge to a virtual frame with more
- // specific knowledge that has already used that specific knowledge to
- // generate code.
- frame_->ForgetTypeInfo();
-
- // The constructor here will capture the current virtual frame and use it to
- // merge to after the deferred code has run. No virtual frame changes are
- // allowed from here until the 'BindExit' below.
- DeferredCode* deferred =
- new DeferredCountOperation(value,
- is_increment,
- is_postfix,
- target.size());
- if (!value_is_known_smi) {
- // Check for smi operand.
- __ tst(value, Operand(kSmiTagMask));
-
- deferred->Branch(ne);
- }
-
- // Perform optimistic increment/decrement.
- if (is_increment) {
- __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
- } else {
- __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
- }
-
- // If increment/decrement overflows, go to deferred code.
- deferred->Branch(vs);
-
- deferred->BindExit();
-
- // Store the new value in the target if not const.
- // At this point the answer is in the value register.
- frame_->EmitPush(value);
- // Set the target with the result, leaving the result on
- // top of the stack. Removes the target from the stack if
- // it has a non-zero size.
- if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
- }
-
- // Postfix: Discard the new value and use the old.
- if (is_postfix) frame_->Pop();
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not in
- // the CC register), we force the right hand side to do the
- // same. This is necessary because we may have to branch to the exit
- // after evaluating the left hand side (due to the shortcut
- // semantics), but the compiler must (statically) know if the result
- // of compiling the binary operation is materialized or not.
- if (node->op() == Token::AND) {
- JumpTarget is_true;
- LoadCondition(node->left(), &is_true, false_target(), false);
- if (has_valid_frame() && !has_cc()) {
- // The left-hand side result is on top of the virtual frame.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- frame_->Dup();
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- ToBoolean(&pop_and_continue, &exit);
- Branch(false, &exit);
-
- // Pop the result of evaluating the first part.
- pop_and_continue.Bind();
- frame_->Pop();
-
- // Evaluate right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- } else if (has_cc() || is_true.is_linked()) {
- // The left-hand side is either (a) partially compiled to
- // control flow with a final branch left to emit or (b) fully
- // compiled to control flow and possibly true.
- if (has_cc()) {
- Branch(false, false_target());
- }
- is_true.Bind();
- LoadCondition(node->right(), true_target(), false_target(), false);
- } else {
- // Nothing to do.
- ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- JumpTarget is_false;
- LoadCondition(node->left(), true_target(), &is_false, false);
- if (has_valid_frame() && !has_cc()) {
- // The left-hand side result is on top of the virtual frame.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- frame_->Dup();
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- ToBoolean(&exit, &pop_and_continue);
- Branch(true, &exit);
-
- // Pop the result of evaluating the first part.
- pop_and_continue.Bind();
- frame_->Pop();
-
- // Evaluate right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- } else if (has_cc() || is_false.is_linked()) {
- // The left-hand side is either (a) partially compiled to
- // control flow with a final branch left to emit or (b) fully
- // compiled to control flow and possibly false.
- if (has_cc()) {
- Branch(true, true_target());
- }
- is_false.Bind();
- LoadCondition(node->right(), true_target(), false_target(), false);
- } else {
- // Nothing to do.
- ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
- }
- }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else {
- // Optimize for the case where (at least) one of the expressions
- // is a literal small integer.
- Literal* lliteral = node->left()->AsLiteral();
- Literal* rliteral = node->right()->AsLiteral();
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- bool overwrite_left = node->left()->ResultOverwriteAllowed();
- bool overwrite_right = node->right()->ResultOverwriteAllowed();
-
- if (rliteral != NULL && rliteral->handle()->IsSmi()) {
- VirtualFrame::RegisterAllocationScope scope(this);
- Load(node->left());
- if (frame_->KnownSmiAt(0)) overwrite_left = false;
- SmiOperation(node->op(),
- rliteral->handle(),
- false,
- overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
- } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
- VirtualFrame::RegisterAllocationScope scope(this);
- Load(node->right());
- if (frame_->KnownSmiAt(0)) overwrite_right = false;
- SmiOperation(node->op(),
- lliteral->handle(),
- true,
- overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (lliteral != NULL) {
- ASSERT(!lliteral->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- if (rliteral != NULL) {
- ASSERT(!rliteral->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- VirtualFrame::RegisterAllocationScope scope(this);
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (overwrite_left) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (overwrite_right) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
- Load(node->left());
- Load(node->right());
- GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
- }
- }
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- frame_->EmitPush(MemOperand(frame_->Function()));
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CompareOperation");
-
- VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
-
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
- // Load the operand, move it to a register.
- LoadTypeofExpression(operation->expression());
- Register tos = frame_->PopToRegister();
-
- Register scratch = VirtualFrame::scratch0();
-
- if (check->Equals(HEAP->number_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- true_target()->Branch(eq);
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(tos, ip);
- cc_reg_ = eq;
-
- } else if (check->Equals(HEAP->string_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-
- // It can be an undetectable string object.
- __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
- false_target()->Branch(eq);
-
- __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
- cc_reg_ = lt;
-
- } else if (check->Equals(HEAP->boolean_symbol())) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(tos, ip);
- true_target()->Branch(eq);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(tos, ip);
- cc_reg_ = eq;
-
- } else if (check->Equals(HEAP->undefined_symbol())) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, ip);
- true_target()->Branch(eq);
-
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
-
- cc_reg_ = eq;
-
- } else if (check->Equals(HEAP->function_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- Register map_reg = scratch;
- __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
- true_target()->Branch(eq);
- // Regular expressions are callable so typeof == 'function'.
- __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
- cc_reg_ = eq;
-
- } else if (check->Equals(HEAP->object_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos, ip);
- true_target()->Branch(eq);
-
- Register map_reg = scratch;
- __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
- __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
- __ cmp(tos, Operand(1 << Map::kIsUndetectable));
- false_target()->Branch(eq);
-
- __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
- __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
- false_target()->Branch(lt);
- __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
- cc_reg_ = le;
-
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- false_target()->Jump();
- }
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height));
- return;
- }
-
- switch (op) {
- case Token::EQ:
- Comparison(eq, left, right, false);
- break;
-
- case Token::LT:
- Comparison(lt, left, right);
- break;
-
- case Token::GT:
- Comparison(gt, left, right);
- break;
-
- case Token::LTE:
- Comparison(le, left, right);
- break;
-
- case Token::GTE:
- Comparison(ge, left, right);
- break;
-
- case Token::EQ_STRICT:
- Comparison(eq, left, right, true);
- break;
-
- case Token::IN: {
- Load(left);
- Load(right);
- frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
- frame_->EmitPush(r0);
- break;
- }
-
- case Token::INSTANCEOF: {
- Load(left);
- Load(right);
- InstanceofStub stub(InstanceofStub::kNoFlags);
- frame_->CallStub(&stub, 2);
- // At this point if instanceof succeeded then r0 == 0.
- __ tst(r0, Operand(r0));
- cc_reg_ = eq;
- break;
- }
-
- default:
- UNREACHABLE();
- }
- ASSERT((has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CompareToNull");
-
- Load(node->expression());
- Register tos = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos, ip);
-
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- if (!node->is_strict()) {
- true_target()->Branch(eq);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, Operand(ip));
- true_target()->Branch(eq);
-
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
- __ cmp(tos, Operand(1 << Map::kIsUndetectable));
- }
-
- cc_reg_ = eq;
- ASSERT(has_cc() && frame_->height() == original_height);
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetNamedValue(Register receiver,
- Handle<String> name,
- bool is_contextual)
- : receiver_(receiver),
- name_(name),
- is_contextual_(is_contextual),
- is_dont_delete_(false) {
- set_comment(is_contextual
- ? "[ DeferredReferenceGetNamedValue (contextual)"
- : "[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- void set_is_dont_delete(bool value) {
- ASSERT(is_contextual_);
- is_dont_delete_ = value;
- }
-
- private:
- Register receiver_;
- Handle<String> name_;
- bool is_contextual_;
- bool is_dont_delete_;
-};
-
-
-// Convention for this is that on entry the receiver is in a register that
-// is not used by the stack. On exit the answer is found in that same
-// register and the stack has the same height.
-void DeferredReferenceGetNamedValue::Generate() {
-#ifdef DEBUG
- int expected_height = frame_state()->frame()->height();
-#endif
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
- __ DecrementCounter(masm_->isolate()->counters()->named_load_inline(),
- 1, scratch1, scratch2);
- __ IncrementCounter(masm_->isolate()->counters()->named_load_inline_miss(),
- 1, scratch1, scratch2);
-
- // Ensure receiver in r0 and name in r2 to match load ic calling convention.
- __ Move(r0, receiver_);
- __ mov(r2, Operand(name_));
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
- RelocInfo::Mode mode = is_contextual_
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- __ Call(ic, mode);
- // We must mark the code just after the call with the correct marker.
- MacroAssembler::NopMarkerTypes code_marker;
- if (is_contextual_) {
- code_marker = is_dont_delete_
- ? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE
- : MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT;
- } else {
- code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED;
- }
- __ MarkCode(code_marker);
-
- // At this point the answer is in r0. We move it to the expected register
- // if necessary.
- __ Move(receiver_, r0);
-
- // Now go back to the frame that we entered with. This will not overwrite
- // the receiver register since that register was not in use when we came
- // in. The instructions emitted by this merge are skipped over by the
- // inline load patching mechanism when looking for the branch instruction
- // that tells it where the code to patch is.
- copied_frame.MergeTo(frame_state()->frame());
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
- ASSERT_EQ(expected_height, frame_state()->frame()->height());
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceGetKeyedValue(Register key, Register receiver)
- : key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register key_;
- Register receiver_;
-};
-
-
-// Takes key and register in r0 and r1 or vice versa. Returns result
-// in r0.
-void DeferredReferenceGetKeyedValue::Generate() {
- ASSERT((key_.is(r0) && receiver_.is(r1)) ||
- (key_.is(r1) && receiver_.is(r0)));
-
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- __ DecrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
- 1, scratch1, scratch2);
- __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline_miss(),
- 1, scratch1, scratch2);
-
- // Ensure key in r0 and receiver in r1 to match keyed load ic calling
- // convention.
- if (key_.is(r1)) {
- __ Swap(r0, r1, ip);
- }
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a nop instruction to indicate that the
- // keyed load has been inlined.
- __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
- // Now go back to the frame that we entered with. This will not overwrite
- // the receiver or key registers since they were not in use when we came
- // in. The instructions emitted by this merge are skipped over by the
- // inline load patching mechanism when looking for the branch instruction
- // that tells it where the code to patch is.
- copied_frame.MergeTo(frame_state()->frame());
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver,
- StrictModeFlag strict_mode)
- : value_(value),
- key_(key),
- receiver_(receiver),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
- StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- __ DecrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
- 1, scratch1, scratch2);
- __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline_miss(),
- 1, scratch1, scratch2);
-
- // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
- // calling convention.
- if (value_.is(r1)) {
- __ Swap(r0, r1, ip);
- }
- ASSERT(receiver_.is(r2));
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed store IC. It has the arguments value, key and receiver in r0,
- // r1 and r2.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode_ == kStrictMode)
- ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a nop instruction to indicate that the
- // keyed store has been inlined.
- __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
- DeferredReferenceSetNamedValue(Register value,
- Register receiver,
- Handle<String> name,
- StrictModeFlag strict_mode)
- : value_(value),
- receiver_(receiver),
- name_(name),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetNamedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register receiver_;
- Handle<String> name_;
- StrictModeFlag strict_mode_;
-};
-
-
-// Takes value in r0, receiver in r1 and returns the result (the
-// value) in r0.
-void DeferredReferenceSetNamedValue::Generate() {
- // Record the entry frame and spill.
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- // Ensure value in r0, receiver in r1 to match store ic calling
- // convention.
- ASSERT(value_.is(r0) && receiver_.is(r1));
- __ mov(r2, Operand(name_));
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed store IC. It has the arguments value, key and receiver in r0,
- // r1 and r2.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode_ == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
- : Builtins::kStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a nop instruction to indicate that the
- // named store has been inlined.
- __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
- // Go back to the frame we entered with. The instructions
- // generated by this merge are skipped over by the inline store
- // patching mechanism when looking for the branch instruction that
- // tells it where the code to patch is.
- copied_frame.MergeTo(frame_state()->frame());
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
-}
-
-
-// Consumes the top of stack (the receiver) and pushes the result instead.
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
- bool contextual_load_in_builtin =
- is_contextual &&
- (ISOLATE->bootstrapper()->IsActive() ||
- (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
- if (scope()->is_global_scope() ||
- loop_nesting() == 0 ||
- contextual_load_in_builtin) {
- Comment cmnt(masm(), "[ Load from named Property");
- // Setup the name register and call load IC.
- frame_->CallLoadIC(name,
- is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET);
- frame_->EmitPush(r0); // Push answer.
- } else {
- // Inline the in-object property case.
- Comment cmnt(masm(), is_contextual
- ? "[ Inlined contextual property load"
- : "[ Inlined named property load");
-
- // Counter will be decremented in the deferred code. Placed here to avoid
- // having it in the instruction stream below where patching will occur.
- if (is_contextual) {
- __ IncrementCounter(
- masm_->isolate()->counters()->named_load_global_inline(),
- 1, frame_->scratch0(), frame_->scratch1());
- } else {
- __ IncrementCounter(masm_->isolate()->counters()->named_load_inline(),
- 1, frame_->scratch0(), frame_->scratch1());
- }
-
- // The following instructions are the inlined load of an in-object property.
- // Parts of this code is patched, so the exact instructions generated needs
- // to be fixed. Therefore the instruction pool is blocked when generating
- // this code
-
- // Load the receiver from the stack.
- Register receiver = frame_->PopToRegister();
-
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
-
- bool is_dont_delete = false;
- if (is_contextual) {
- if (!info_->closure().is_null()) {
- // When doing lazy compilation we can check if the global cell
- // already exists and use its "don't delete" status as a hint.
- AssertNoAllocation no_gc;
- v8::internal::GlobalObject* global_object =
- info_->closure()->context()->global();
- LookupResult lookup;
- global_object->LocalLookupRealNamedProperty(*name, &lookup);
- if (lookup.IsProperty() && lookup.type() == NORMAL) {
- ASSERT(lookup.holder() == global_object);
- ASSERT(global_object->property_dictionary()->ValueAt(
- lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
- is_dont_delete = lookup.IsDontDelete();
- }
- }
- if (is_dont_delete) {
- __ IncrementCounter(
- masm_->isolate()->counters()->dont_delete_hint_hit(),
- 1, frame_->scratch0(), frame_->scratch1());
- }
- }
-
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- if (!is_contextual) {
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
- }
-
- // Check for the_hole_value if necessary.
- // Below we rely on the number of instructions generated, and we can't
- // cope with the Check macro which does not generate a fixed number of
- // instructions.
- Label skip, check_the_hole, cont;
- if (FLAG_debug_code && is_contextual && is_dont_delete) {
- __ b(&skip);
- __ bind(&check_the_hole);
- __ Check(ne, "DontDelete cells can't contain the hole");
- __ b(&cont);
- __ bind(&skip);
- }
-
-#ifdef DEBUG
- int InlinedNamedLoadInstructions = 5;
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
-
- Register scratch = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // Check the map. The null map used below is patched by the inline cache
- // code. Therefore we can't use a LoadRoot call.
- __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ mov(scratch2, Operand(FACTORY->null_value()));
- __ cmp(scratch, scratch2);
- deferred->Branch(ne);
-
- if (is_contextual) {
-#ifdef DEBUG
- InlinedNamedLoadInstructions += 1;
-#endif
- // Load the (initially invalid) cell and get its value.
- masm()->mov(receiver, Operand(FACTORY->null_value()));
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
-
- deferred->set_is_dont_delete(is_dont_delete);
-
- if (!is_dont_delete) {
-#ifdef DEBUG
- InlinedNamedLoadInstructions += 3;
-#endif
- __ cmp(receiver, Operand(FACTORY->the_hole_value()));
- deferred->Branch(eq);
- } else if (FLAG_debug_code) {
-#ifdef DEBUG
- InlinedNamedLoadInstructions += 3;
-#endif
- __ cmp(receiver, Operand(FACTORY->the_hole_value()));
- __ b(&check_the_hole, eq);
- __ bind(&cont);
- }
- } else {
- // Initially use an invalid index. The index will be patched by the
- // inline cache code.
- __ ldr(receiver, MemOperand(receiver, 0));
- }
-
- // Make sure that the expected number of instructions are generated.
- // If the code before is updated, the offsets in ic-arm.cc
- // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
- // to be updated.
- ASSERT_EQ(InlinedNamedLoadInstructions,
- masm_->InstructionsGeneratedSince(&check_inlined_codesize));
- }
-
- deferred->BindExit();
- // At this point the receiver register has the result, either from the
- // deferred code or from the inlined code.
- frame_->EmitPush(receiver);
- }
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
- Result result;
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- } else {
- // Inline the in-object property case.
- JumpTarget slow, done;
-
- // Get the value and receiver from the stack.
- frame()->PopToR0();
- Register value = r0;
- frame()->PopToR1();
- Register receiver = r1;
-
- DeferredReferenceSetNamedValue* deferred =
- new DeferredReferenceSetNamedValue(
- value, receiver, name, strict_mode_flag());
-
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
-
- // The following instructions are the part of the inlined
- // in-object property store code which can be patched. Therefore
- // the exact number of instructions generated must be fixed, so
- // the constant pool is blocked while generating this code.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- Register scratch0 = VirtualFrame::scratch0();
- Register scratch1 = VirtualFrame::scratch1();
-
- // Check the map. Initially use an invalid map to force a
- // failure. The map check will be patched in the runtime system.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
- __ mov(scratch0, Operand(FACTORY->null_value()));
- __ cmp(scratch0, scratch1);
- deferred->Branch(ne);
-
- int offset = 0;
- __ str(value, MemOperand(receiver, offset));
-
- // Update the write barrier and record its size. We do not use
- // the RecordWrite macro here because we want the offset
- // addition instruction first to make it easy to patch.
- Label record_write_start, record_write_done;
- __ bind(&record_write_start);
- // Add offset into the object.
- __ add(scratch0, receiver, Operand(offset));
- // Test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- __ InNewSpace(receiver, scratch1, eq, &record_write_done);
- // Record the actual write.
- __ RecordWriteHelper(receiver, scratch0, scratch1);
- __ bind(&record_write_done);
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (FLAG_debug_code) {
- __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
- __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
- }
- // Check that this is the first inlined write barrier or that
- // this inlined write barrier has the same size as all the other
- // inlined write barriers.
- ASSERT((Isolate::Current()->inlined_write_barrier_size() == -1) ||
- (Isolate::Current()->inlined_write_barrier_size() ==
- masm()->InstructionsGeneratedSince(&record_write_start)));
- Isolate::Current()->set_inlined_write_barrier_size(
- masm()->InstructionsGeneratedSince(&record_write_start));
-
- // Make sure that the expected number of instructions are generated.
- ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
- masm()->InstructionsGeneratedSince(&check_inlined_codesize));
- }
- deferred->BindExit();
- }
- ASSERT_EQ(expected_height, frame()->height());
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
- if (loop_nesting() == 0) {
- Comment cmnt(masm_, "[ Load from keyed property");
- frame_->CallKeyedLoadIC();
- } else {
- // Inline the keyed load.
- Comment cmnt(masm_, "[ Inlined load from keyed property");
-
- // Counter will be decremented in the deferred code. Placed here to avoid
- // having it in the instruction stream below where patching will occur.
- __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
- 1, frame_->scratch0(), frame_->scratch1());
-
- // Load the key and receiver from the stack.
- bool key_is_known_smi = frame_->KnownSmiAt(0);
- Register key = frame_->PopToRegister();
- Register receiver = frame_->PopToRegister(key);
-
- // The deferred code expects key and receiver in registers.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(key, receiver);
-
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
-
- // The following instructions are the part of the inlined load keyed
- // property code which can be patched. Therefore the exact number of
- // instructions generated need to be fixed, so the constant pool is blocked
- // while generating this code.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- // Check the map. The null map used below is patched by the inline cache
- // code.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that the key is a smi.
- if (!key_is_known_smi) {
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
-
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
- __ mov(scratch2, Operand(FACTORY->null_value()));
- __ cmp(scratch1, scratch2);
- deferred->Branch(ne);
-
- // Get the elements array from the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(scratch1);
-
- // Check that key is within bounds. Use unsigned comparison to handle
- // negative keys.
- __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ cmp(scratch2, key);
- deferred->Branch(ls); // Unsigned less equal.
-
- // Load and check that the result is not the hole (key is a smi).
- __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ add(scratch1,
- scratch1,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch1,
- MemOperand(scratch1, key, LSL,
- kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
- __ cmp(scratch1, scratch2);
- deferred->Branch(eq);
-
- __ mov(r0, scratch1);
- // Make sure that the expected number of instructions are generated.
- ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
- masm_->InstructionsGeneratedSince(&check_inlined_codesize));
- }
-
- deferred->BindExit();
- }
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
- WriteBarrierCharacter wb_info) {
- // Generate inlined version of the keyed store if the code is in a loop
- // and the key is likely to be a smi.
- if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
- // Inline the keyed store.
- Comment cmnt(masm_, "[ Inlined store to keyed property");
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- Register scratch3 = r3;
-
- // Counter will be decremented in the deferred code. Placed here to avoid
- // having it in the instruction stream below where patching will occur.
- __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
- 1, scratch1, scratch2);
-
-
- // Load the value, key and receiver from the stack.
- bool value_is_harmless = frame_->KnownSmiAt(0);
- if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
- bool key_is_smi = frame_->KnownSmiAt(1);
- Register value = frame_->PopToRegister();
- Register key = frame_->PopToRegister(value);
- VirtualFrame::SpilledScope spilled(frame_);
- Register receiver = r2;
- frame_->EmitPop(receiver);
-
-#ifdef DEBUG
- bool we_remembered_the_write_barrier = value_is_harmless;
-#endif
-
- // The deferred code expects value, key and receiver in registers.
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(
- value, key, receiver, strict_mode_flag());
-
- // Check that the value is a smi. As this inlined code does not set the
- // write barrier it is only possible to store smi values.
- if (!value_is_harmless) {
- // If the value is not likely to be a Smi then let's test the fixed array
- // for new space instead. See below.
- if (wb_info == LIKELY_SMI) {
- __ tst(value, Operand(kSmiTagMask));
- deferred->Branch(ne);
-#ifdef DEBUG
- we_remembered_the_write_barrier = true;
-#endif
- }
- }
-
- if (!key_is_smi) {
- // Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
-
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
-
- // Check that the receiver is a JSArray.
- __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
- deferred->Branch(ne);
-
- // Get the elements array from the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (!value_is_harmless && wb_info != LIKELY_SMI) {
- Label ok;
- __ and_(scratch2,
- scratch1,
- Operand(ExternalReference::new_space_mask(isolate())));
- __ cmp(scratch2, Operand(ExternalReference::new_space_start(isolate())));
- __ tst(value, Operand(kSmiTagMask), ne);
- deferred->Branch(ne);
-#ifdef DEBUG
- we_remembered_the_write_barrier = true;
-#endif
- }
- // Check that the elements array is not a dictionary.
- __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
-
- // The following instructions are the part of the inlined store keyed
- // property code which can be patched. Therefore the exact number of
- // instructions generated need to be fixed, so the constant pool is blocked
- // while generating this code.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
-
- // Read the fixed array map from the constant pool (not from the root
- // array) so that the value can be patched. When debugging, we patch this
- // comparison to always fail so that we will hit the IC call in the
- // deferred code which will allow the debugger to break for fast case
- // stores.
- __ mov(scratch3, Operand(FACTORY->fixed_array_map()));
- __ cmp(scratch2, scratch3);
- deferred->Branch(ne);
-
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis (because the fixed array check above ensures the
- // elements are in fast case). Use unsigned comparison to handle negative
- // keys.
- __ ldr(scratch3, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmp(scratch3, key);
- deferred->Branch(ls); // Unsigned less equal.
-
- // Store the value.
- __ add(scratch1, scratch1,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(value,
- MemOperand(scratch1, key, LSL,
- kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
-
- // Make sure that the expected number of instructions are generated.
- ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
- masm_->InstructionsGeneratedSince(&check_inlined_codesize));
- }
-
- ASSERT(we_remembered_the_write_barrier);
-
- deferred->BindExit();
- } else {
- frame()->CallKeyedStoreIC(strict_mode_flag());
- }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() { return true; }
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
-}
-
-
-void Reference::DupIfPersist() {
- if (persist_after_get_) {
- switch (type_) {
- case KEYED:
- cgen_->frame()->Dup2();
- break;
- case NAMED:
- cgen_->frame()->Dup();
- // Fall through.
- case UNLOADED:
- case ILLEGAL:
- case SLOT:
- // Do nothing.
- ;
- }
- } else {
- set_unloaded();
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- MacroAssembler* masm = cgen_->masm();
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- DupIfPersist();
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- break;
- }
-
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- Handle<String> name = GetName();
- DupIfPersist();
- cgen_->EmitNamedLoad(name, is_global);
- break;
- }
-
- case KEYED: {
- ASSERT(property != NULL);
- DupIfPersist();
- cgen_->EmitKeyedLoad();
- cgen_->frame()->EmitPush(r0);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- MacroAssembler* masm = cgen_->masm();
- VirtualFrame* frame = cgen_->frame();
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- cgen_->StoreToSlot(slot, init_state);
- set_unloaded();
- break;
- }
-
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- cgen_->EmitNamedStore(GetName(), false);
- frame->EmitPush(r0);
- set_unloaded();
- break;
- }
-
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- Property* property = expression_->AsProperty();
- ASSERT(property != NULL);
- cgen_->CodeForSourcePosition(property->position());
- cgen_->EmitKeyedStore(property->key()->type(), wb_info);
- frame->EmitPush(r0);
- set_unloaded();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(len);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s",
- op_name,
- overwrite_name,
- specialized_on_rhs_ ? "_ConstantRhs" : "",
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-#undef __
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 9b1f103..01aa805 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,162 +37,8 @@
// Forward declarations
class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
-enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
- // This is in preparation for something that uses the reference on the stack.
- // If we need this reference afterwards get then dup it now. Otherwise mark
- // it as used.
- inline void DupIfPersist();
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair). It is threaded through the
-// call stack. Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- virtual ~CodeGenState();
-
- virtual JumpTarget* true_target() const { return NULL; }
- virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
- inline CodeGenerator* owner() { return owner_; }
- inline CodeGenState* previous() const { return previous_; }
-
- private:
- CodeGenerator* owner_;
- CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
- // Create a code generator state based on a code generator's current
- // state. The new state has its own pair of branch labels.
- ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
-
- virtual JumpTarget* true_target() const { return true_target_; }
- virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
- TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot_number,
- TypeInfo info);
- ~TypeInfoCodeGenState();
-
- virtual JumpTarget* true_target() const { return previous()->true_target(); }
- virtual JumpTarget* false_target() const {
- return previous()->false_target();
- }
-
- private:
- Slot* slot_;
- TypeInfo old_type_info_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
// -------------------------------------------------------------------------
// CodeGenerator
@@ -225,45 +71,6 @@
int pos,
bool right_here = false);
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- TypeInfo type_info(Slot* slot) {
- int index = NumberOfSlot(slot);
- if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
- return (*type_info_)[index];
- }
-
- TypeInfo set_type_info(Slot* slot, TypeInfo info) {
- int index = NumberOfSlot(slot);
- ASSERT(index >= kInvalidSlotNumber);
- if (index != kInvalidSlotNumber) {
- TypeInfo previous_value = (*type_info_)[index];
- (*type_info_)[index] = info;
- return previous_value;
- }
- return TypeInfo::Unknown();
- }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 32 : 13;
@@ -275,317 +82,6 @@
}
private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // Construction/Destruction
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors
- inline bool is_eval();
- inline Scope* scope();
- inline bool is_strict_mode();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- static const int kInvalidSlotNumber = -1;
-
- int NumberOfSlot(Slot* slot);
-
- // State
- bool has_cc() const { return cc_reg_ != al; }
- JumpTarget* true_target() const { return state_->true_target(); }
- JumpTarget* false_target() const { return state_->false_target(); }
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once). The return value should
- // be in r0.
- void GenerateReturnSequence();
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- void StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- MemOperand SlotOperand(Slot* slot, Register tmp);
-
- MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow);
-
- // Expressions
- void LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc);
- void Load(Expression* expr);
- void LoadGlobal();
- void LoadGlobalReceiver(Register scratch);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
- // Store the value on top of the stack to a slot.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Load a named property, returning it in r0. The receiver is passed on the
- // stack, and remains there.
- void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // Store to a named property. If the store is contextual, value is passed on
- // the frame and consumed. Otherwise, receiver and value are passed on the
- // frame and consumed. The result is returned in r0.
- void EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Load a keyed property, leaving it in r0. The receiver and key are
- // passed on the stack, and remain there.
- void EmitKeyedLoad();
-
- // Store a keyed property. Key and receiver are on the stack and the value is
- // in r0. Result is returned in r0.
- void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
- void LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int known_rhs =
- GenericBinaryOpStub::kUnknownIntValue);
- void Comparison(Condition cc,
- Expression* left,
- Expression* right,
- bool strict = false);
-
- void SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- // Control flow
- void Branch(bool if_true, JumpTarget* target);
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- static Handle<Code> ComputeLazyCompile(int argc);
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- // Fast support for StringAdd.
- void GenerateStringAdd(ZoneList<Expression*>* args);
-
- // Fast support for SubString.
- void GenerateSubString(ZoneList<Expression*>* args);
-
- // Fast support for StringCompare.
- void GenerateStringCompare(ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block.
- bool HasValidEntryRegisters();
-#endif
-
- List<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- Condition cc_reg_;
- CodeGenState* state_;
- int loop_nesting_;
-
- Vector<TypeInfo>* type_info_;
-
- // Jump targets
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- friend class VirtualFrame;
- friend class Isolate;
- friend class JumpTarget;
- friend class Reference;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
- friend class InlineRuntimeFunctionsTable;
- friend class LCodeGen;
-
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 0ac567c..823c6ff 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,12 +28,9 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
-// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
-// are not running on real ARM hardware. One reason for this is that the
-// old ABI uses fp registers in the calling convention and the simulator does
-// not simulate fp registers or coroutine instructions.
-#if defined(__ARM_EABI__) || !defined(__arm__)
-# define USE_ARM_EABI 1
+// ARM EABI is required.
+#if defined(__arm__) && !defined(__ARM_EABI__)
+#error ARM EABI support is required.
#endif
// This means that interwork-compatible jump instructions are generated. We
@@ -346,7 +343,9 @@
da_x = (0|0|0) << 21, // Decrement after.
ia_x = (0|4|0) << 21, // Increment after.
db_x = (8|0|0) << 21, // Decrement before.
- ib_x = (8|4|0) << 21 // Increment before.
+ ib_x = (8|4|0) << 21, // Increment before.
+
+ kBlockAddrModeMask = (8|4|1) << 21
};
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 0f5bf56..51cfeb6 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -42,11 +42,12 @@
namespace internal {
void CPU::Setup() {
- CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
- cpu_features->Probe(true);
- if (!cpu_features->IsSupported(VFP3) || Serializer::enabled()) {
- V8::DisableCrankshaft();
- }
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return CpuFeatures::IsSupported(VFP3);
}
@@ -74,62 +75,33 @@
register uint32_t end asm("a2") =
reinterpret_cast<uint32_t>(start) + size;
register uint32_t flg asm("a3") = 0;
- #ifdef __ARM_EABI__
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
- asm volatile(
- "svc 0x0"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (scno));
- #else
- // r7 is reserved by the EABI in thumb mode.
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: push {r7} \n\t"
- "mov r7, %4 \n\t"
- "svc 0x0 \n\t"
- "pop {r7} \n\t"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
- : "r3");
- #endif
+ #if defined (__arm__) && !defined(__thumb__)
+ // __arm__ may be defined in thumb mode.
+ register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
+ asm volatile(
+ "svc 0x0"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (scno));
#else
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- asm volatile(
- "svc %1"
- : "=r" (beg)
- : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
- #else
- // Do not use the value of __ARM_NR_cacheflush in the inline assembly
- // below, because the thumb mode value would be used, which would be
- // wrong, since we switch to ARM mode before executing the svc instruction
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: svc 0x9f0002 \n"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg)
- : "r3");
- #endif
+ // r7 is reserved by the EABI in thumb mode.
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ "adr r3, 1f \n\t"
+ "bx r3 \n\t"
+ ".ALIGN 4 \n\t"
+ ".ARM \n"
+ "1: push {r7} \n\t"
+ "mov r7, %4 \n\t"
+ "svc 0x0 \n\t"
+ "pop {r7} \n\t"
+ "@ Enter THUMB Mode\n\t"
+ "adr r3, 2f+1 \n\t"
+ "bx r3 \n\t"
+ ".THUMB \n"
+ "2: \n\t"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
+ : "r3");
#endif
#endif
}
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index e6ad98c..07a2272 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
namespace v8 {
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 3a3dcf0..f0a6937 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -586,14 +586,16 @@
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
- __ PrepareCallCFunction(5, r5);
+ __ PrepareCallCFunction(6, r5);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(r1, Operand(type())); // bailout type,
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
+ __ mov(r5, Operand(ExternalReference::isolate_address()));
+ __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 5);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 899b88a..a3775b5 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -371,25 +371,34 @@
int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D'));
+ VFPRegPrecision precision =
+ format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
+
+ int retval = 2;
+ int reg = -1;
if (format[1] == 'n') {
- int reg = instr->VnValue();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
- if (format[0] == 'D') PrintDRegister(reg);
- return 2;
+ reg = instr->VFPNRegValue(precision);
} else if (format[1] == 'm') {
- int reg = instr->VmValue();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
- if (format[0] == 'D') PrintDRegister(reg);
- return 2;
+ reg = instr->VFPMRegValue(precision);
} else if (format[1] == 'd') {
- int reg = instr->VdValue();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
- if (format[0] == 'D') PrintDRegister(reg);
- return 2;
+ reg = instr->VFPDRegValue(precision);
+ if (format[2] == '+') {
+ int immed8 = instr->Immed8Value();
+ if (format[0] == 'S') reg += immed8 - 1;
+ if (format[0] == 'D') reg += (immed8 / 2 - 1);
+ }
+ if (format[2] == '+') retval = 3;
+ } else {
+ UNREACHABLE();
}
- UNREACHABLE();
- return -1;
+ if (precision == kSinglePrecision) {
+ PrintSRegister(reg);
+ } else {
+ PrintDRegister(reg);
+ }
+
+ return retval;
}
@@ -1273,9 +1282,22 @@
Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
}
break;
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
+ bool to_vfp_register = (instr->VLValue() == 0x1);
+ if (to_vfp_register) {
+ Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}");
+ } else {
+ Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}");
+ }
+ break;
+ }
default:
Unknown(instr); // Not used by V8.
- break;
}
} else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeValue()) {
@@ -1303,9 +1325,19 @@
Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
}
break;
+ case 0x4:
+ case 0x5:
+ case 0x9: {
+ bool to_vfp_register = (instr->VLValue() == 0x1);
+ if (to_vfp_register) {
+ Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
+ } else {
+ Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}");
+ }
+ break;
+ }
default:
Unknown(instr); // Not used by V8.
- break;
}
} else {
Unknown(instr); // Not used by V8.
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 4aa8d6a..d6846c8 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -136,7 +136,7 @@
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 088ba58..85e4262 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
@@ -245,7 +245,7 @@
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
+ PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
@@ -431,8 +431,7 @@
if (true_label_ != fall_through_) __ b(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- __ b(false_label_);
+ if (false_label_ != fall_through_) __ b(false_label_);
} else {
if (true_label_ != fall_through_) __ b(true_label_);
}
@@ -562,7 +561,7 @@
void FullCodeGenerator::DoTest(Label* if_true,
Label* if_false,
Label* fall_through) {
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Emit the inlined tests assumed by the stub.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -824,7 +823,7 @@
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
- clause->body_target()->entry_label()->Unuse();
+ clause->body_target()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
@@ -851,7 +850,7 @@
__ cmp(r1, r0);
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target()->entry_label());
+ __ b(clause->body_target());
__ bind(&slow_case);
}
@@ -862,7 +861,7 @@
__ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target()->entry_label());
+ __ b(clause->body_target());
}
// Discard the test value and jump to the default if present, otherwise to
@@ -872,14 +871,14 @@
if (default_clause == NULL) {
__ b(nested_statement.break_target());
} else {
- __ b(default_clause->body_target()->entry_label());
+ __ b(default_clause->body_target());
}
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target()->entry_label());
+ __ bind(clause->body_target());
PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
VisitStatements(clause->statements());
}
@@ -1622,27 +1621,26 @@
break;
}
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
if (expr->is_compound()) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
}
}
- // For property compound assignments we need another deoptimization
- // point after the property load.
- if (property != NULL) {
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- }
-
Token::Value op = expr->binary_op();
__ push(r0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
@@ -2352,16 +2350,6 @@
}
}
} else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the fast code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(isolate()->heap()->empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_full_codegen(true);
- }
-
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(fun);
}
@@ -2543,11 +2531,75 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
- // used in a few functions in runtime.js which should not normally be hit by
- // this compiler.
+ if (FLAG_debug_code) __ AbortIfSmi(r0);
+
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ b(ne, if_true);
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r2, ip);
+ __ b(eq, if_false);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ ldr(r4, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
+ __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ // r4: descriptor array
+ // r3: length of descriptor array
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+ // Calculate location of the first key name.
+ __ add(r4,
+ r4,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ // The use of ip to store the valueOf symbol asumes that it is not otherwise
+ // used in the loop below.
+ __ mov(ip, Operand(FACTORY->value_of_symbol()));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ ldr(r3, MemOperand(r4, 0));
+ __ cmp(r3, ip);
+ __ b(eq, if_false);
+ __ add(r4, r4, Operand(kPointerSize));
+ __ bind(&entry);
+ __ cmp(r4, Operand(r2));
+ __ b(ne, &loop);
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ cmp(r2, r3);
+ __ b(ne, if_false);
+
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ jmp(if_true);
+
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ jmp(if_false);
context()->Plug(if_true, if_false);
}
@@ -2802,9 +2854,10 @@
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (isolate()->cpu_features()->IsSupported(VFP3)) {
- __ PrepareCallCFunction(0, r1);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ PrepareCallCFunction(1, r0);
+ __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(VFP3);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
@@ -2822,10 +2875,11 @@
__ vstr(d7, r0, HeapNumber::kValueOffset);
__ mov(r0, r4);
} else {
+ __ PrepareCallCFunction(2, r0);
__ mov(r0, Operand(r4));
- __ PrepareCallCFunction(1, r1);
+ __ mov(r1, Operand(ExternalReference::isolate_address()));
__ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 1);
+ ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
context()->Plug(r0);
@@ -3107,15 +3161,14 @@
void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
- int arg_count = args->length() - 2; // For receiver and function.
- VisitForStackValue(args->at(0)); // Receiver.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i + 1));
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
}
- VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
+ VisitForAccumulatorValue(args->last()); // Function.
- // InvokeFunction requires function in r1. Move it in there.
- if (!result_register().is(r1)) __ mov(r1, result_register());
+ // InvokeFunction requires the function in r1. Move it in there.
+ __ mov(r1, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(r1, count, CALL_FUNCTION);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3827,7 +3880,11 @@
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
- PrepareForBailout(expr->increment(), TOS_REG);
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ }
// Call ToNumber only if operand is not a smi.
Label no_conversion;
@@ -4237,7 +4294,6 @@
default:
break;
}
-
__ Call(ic, mode);
}
@@ -4259,7 +4315,6 @@
default:
break;
}
-
__ Call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index dc4f761..db04f33 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,7 @@
#include "assembler-arm.h"
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "disasm.h"
#include "ic-inl.h"
#include "runtime.h"
@@ -926,217 +926,6 @@
__ TailCallExternalReference(ref, 2, 1);
}
-// Returns the code marker, or the 0 if the code is not marked.
-static inline int InlinedICSiteMarker(Address address,
- Address* inline_end_address) {
- if (V8::UseCrankshaft()) return false;
-
- // If the instruction after the call site is not the pseudo instruction nop1
- // then this is not related to an inlined in-object property load. The nop1
- // instruction is located just after the call to the IC in the deferred code
- // handling the miss in the inlined code. After the nop1 instruction there is
- // a branch instruction for jumping back from the deferred code.
- Address address_after_call = address + Assembler::kCallTargetAddressOffset;
- Instr instr_after_call = Assembler::instr_at(address_after_call);
- int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
-
- // A negative result means the code is not marked.
- if (code_marker <= 0) return 0;
-
- Address address_after_nop = address_after_call + Assembler::kInstrSize;
- Instr instr_after_nop = Assembler::instr_at(address_after_nop);
- // There may be some reg-reg move and frame merging code to skip over before
- // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
- // code.
- while (!Assembler::IsBranch(instr_after_nop)) {
- address_after_nop += Assembler::kInstrSize;
- instr_after_nop = Assembler::instr_at(address_after_nop);
- }
-
- // Find the end of the inlined code for handling the load.
- int b_offset =
- Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
- ASSERT(b_offset < 0); // Jumping back from deferred code.
- *inline_end_address = address_after_nop + b_offset;
-
- return code_marker;
-}
-
-
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // Find the end of the inlined code for handling the load if this is an
- // inlined IC call site.
- Address inline_end_address = 0;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
- // The immediate must be representable in 12 bits.
- ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
- Address ldr_property_instr_address =
- inline_end_address - Assembler::kInstrSize;
- ASSERT(Assembler::IsLdrRegisterImmediate(
- Assembler::instr_at(ldr_property_instr_address)));
- Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
- ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
- ldr_property_instr, offset - kHeapObjectTag);
- Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
-
- // Indicate that code has changed.
- CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
-
- // Patch the map check.
- // For PROPERTY_ACCESS_INLINED, the load map instruction is generated
- // 4 instructions before the end of the inlined code.
- // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
- int ldr_map_offset = -4;
- Address ldr_map_instr_address =
- inline_end_address + ldr_map_offset * Assembler::kInstrSize;
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- // Find the end of the inlined code for handling the contextual load if
- // this is inlined IC call site.
- Address inline_end_address = 0;
- int marker = InlinedICSiteMarker(address, &inline_end_address);
- if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
- (marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
- return false;
- }
- // On ARM we don't rely on the is_dont_delete argument as the hint is already
- // embedded in the code marker.
- bool marker_is_dont_delete =
- marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
-
- // These are the offsets from the end of the inlined code.
- // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
- int ldr_map_offset = marker_is_dont_delete ? -5: -8;
- int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
- if (FLAG_debug_code && marker_is_dont_delete) {
- // Three extra instructions were generated to check for the_hole_value.
- ldr_map_offset -= 3;
- ldr_cell_offset -= 3;
- }
- Address ldr_map_instr_address =
- inline_end_address + ldr_map_offset * Assembler::kInstrSize;
- Address ldr_cell_instr_address =
- inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
-
- // Patch the map check.
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- // Patch the cell address.
- Assembler::set_target_address_at(ldr_cell_instr_address,
- reinterpret_cast<Address>(cell));
-
- return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // Find the end of the inlined code for the store if there is an
- // inlined version of the store.
- Address inline_end_address = 0;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Compute the address of the map load instruction.
- Address ldr_map_instr_address =
- inline_end_address -
- (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
- Assembler::kInstrSize);
-
- // Update the offsets if initializing the inlined store. No reason
- // to update the offsets when clearing the inlined version because
- // it will bail out in the map check.
- if (map != HEAP->null_value()) {
- // Patch the offset in the actual store instruction.
- Address str_property_instr_address =
- ldr_map_instr_address + 3 * Assembler::kInstrSize;
- Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
- ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
- str_property_instr = Assembler::SetStrRegisterImmediateOffset(
- str_property_instr, offset - kHeapObjectTag);
- Assembler::instr_at_put(str_property_instr_address, str_property_instr);
-
- // Patch the offset in the add instruction that is part of the
- // write barrier.
- Address add_offset_instr_address =
- str_property_instr_address + Assembler::kInstrSize;
- Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
- ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
- add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
- add_offset_instr, offset - kHeapObjectTag);
- Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
-
- // Indicate that code has changed.
- CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
- }
-
- // Patch the map check.
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
-
- return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- Address inline_end_address = 0;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Patch the map check.
- Address ldr_map_instr_address =
- inline_end_address -
- (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
- Assembler::kInstrSize);
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- return true;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- // Find the end of the inlined code for handling the store if this is an
- // inlined IC call site.
- Address inline_end_address = 0;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Patch the map check.
- Address ldr_map_instr_address =
- inline_end_address -
- (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
- Assembler::kInstrSize);
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- return true;
-}
-
Object* KeyedLoadIC_Miss(Arguments args);
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
deleted file mode 100644
index df370c4..0000000
--- a/src/arm/jump-target-arm.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (entry_frame_set_) {
- if (entry_label_.is_bound()) {
- // If we already bound and generated code at the destination then it
- // is too late to ask for less optimistic type assumptions.
- ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
- }
- // There already a frame expectation at the target.
- cgen()->frame()->MergeTo(&entry_frame_);
- cgen()->DeleteFrame();
- } else {
- // Clone the current frame to use as the expected one at the target.
- set_entry_frame(cgen()->frame());
- // Zap the fall-through frame since the jump was unconditional.
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- }
- if (entry_label_.is_bound()) {
- // You can't jump backwards to an already bound label unless you admitted
- // up front that this was a bidirectional jump target. Bidirectional jump
- // targets will zap their type info when bound in case some later virtual
- // frame with less precise type info branches to them.
- ASSERT(direction_ != FORWARD_ONLY);
- }
- __ jmp(&entry_label_);
-}
-
-
-void JumpTarget::DoBranch(Condition cond, Hint ignored) {
- ASSERT(cgen()->has_valid_frame());
-
- if (entry_frame_set_) {
- if (entry_label_.is_bound()) {
- // If we already bound and generated code at the destination then it
- // is too late to ask for less optimistic type assumptions.
- ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
- }
- // We have an expected frame to merge to on the backward edge.
- cgen()->frame()->MergeTo(&entry_frame_, cond);
- } else {
- // Clone the current frame to use as the expected one at the target.
- set_entry_frame(cgen()->frame());
- }
- if (entry_label_.is_bound()) {
- // You can't branch backwards to an already bound label unless you admitted
- // up front that this was a bidirectional jump target. Bidirectional jump
- // targets will zap their type info when bound in case some later virtual
- // frame with less precise type info branches to them.
- ASSERT(direction_ != FORWARD_ONLY);
- }
- __ b(cond, &entry_label_);
- if (cond == al) {
- cgen()->DeleteFrame();
- }
-}
-
-
-void JumpTarget::Call() {
- // Call is used to push the address of the catch block on the stack as
- // a return address when compiling try/catch and try/finally. We
- // fully spill the frame before making the call. The expected frame
- // at the label (which should be the only one) is the spilled current
- // frame plus an in-memory return address. The "fall-through" frame
- // at the return site is the spilled current frame.
- ASSERT(cgen()->has_valid_frame());
- // There are no non-frame references across the call.
- ASSERT(cgen()->HasValidEntryRegisters());
- ASSERT(!is_linked());
-
- // Calls are always 'forward' so we use a copy of the current frame (plus
- // one for a return address) as the expected frame.
- ASSERT(!entry_frame_set_);
- VirtualFrame target_frame = *cgen()->frame();
- target_frame.Adjust(1);
- set_entry_frame(&target_frame);
-
- __ bl(&entry_label_);
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- if (cgen()->has_valid_frame()) {
- if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
- // If there is a current frame we can use it on the fall through.
- if (!entry_frame_set_) {
- entry_frame_ = *cgen()->frame();
- entry_frame_set_ = true;
- } else {
- cgen()->frame()->MergeTo(&entry_frame_);
- // On fall through we may have to merge both ways.
- if (direction_ != FORWARD_ONLY) {
- // This will not need to adjust the virtual frame entries that are
- // register allocated since that was done above and they now match.
- // But it does need to adjust the entry_frame_ of this jump target
- // to make it potentially less optimistic. Later code can branch back
- // to this jump target and we need to assert that that code does not
- // have weaker assumptions about types.
- entry_frame_.MergeTo(cgen()->frame());
- }
- }
- } else {
- // If there is no current frame we must have an entry frame which we can
- // copy.
- ASSERT(entry_frame_set_);
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
- }
-
- __ bind(&entry_label_);
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 5d31473..faf6404 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -61,22 +61,21 @@
#ifdef DEBUG
void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs must use a fixed register.
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ LUnallocated* operand = LUnallocated::cast(it.Next());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
}
for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ LUnallocated* operand = LUnallocated::cast(it.Next());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
@@ -301,6 +300,13 @@
}
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[r2] #%d / ", arity());
}
@@ -1114,9 +1120,9 @@
return new LIsConstructCallAndBranch(TempRegister());
} else {
if (v->IsConstant()) {
- if (HConstant::cast(v)->handle()->IsTrue()) {
+ if (HConstant::cast(v)->ToBoolean()) {
return new LGoto(instr->FirstSuccessor()->block_id());
- } else if (HConstant::cast(v)->handle()->IsFalse()) {
+ } else {
return new LGoto(instr->SecondSuccessor()->block_id());
}
}
@@ -1212,6 +1218,14 @@
}
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), r1);
+ argument_count_ -= instr->argument_count();
+ LInvokeFunction* result = new LInvokeFunction(function);
+ return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1329,7 +1343,7 @@
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
// TODO(1042) The fixed register allocation
- // is needed because we call GenericBinaryOpStub from
+ // is needed because we call TypeRecordingBinaryOpStub from
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
@@ -1723,26 +1737,42 @@
}
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LLoadGlobal* result = new LLoadGlobal();
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), r0);
+ LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
if (instr->check_hole_value()) {
LOperand* temp = TempRegister();
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(new LStoreGlobal(value, temp));
+ return AssignEnvironment(new LStoreGlobalCell(value, temp));
} else {
LOperand* value = UseRegisterAtStart(instr->value());
- return new LStoreGlobal(value, NULL);
+ return new LStoreGlobalCell(value, NULL);
}
}
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), r1);
+ LOperand* value = UseFixed(instr->value(), r0);
+ LStoreGlobalGeneric* result =
+ new LStoreGlobalGeneric(global_object, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
@@ -1824,21 +1854,20 @@
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
- // TODO(danno): Add support for other external array types.
- if (instr->array_type() != kExternalPixelArray) {
- Abort("unsupported load for external array type.");
- return NULL;
- }
-
- ASSERT(instr->representation().IsInteger32());
+ ExternalArrayType array_type = instr->array_type();
+ Representation representation(instr->representation());
+ ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
+ (representation.IsDouble() && array_type == kExternalFloatArray));
ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer =
- UseRegisterAtStart(instr->external_pointer());
- LOperand* key = UseRegisterAtStart(instr->key());
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegister(instr->key());
LLoadKeyedSpecializedArrayElement* result =
- new LLoadKeyedSpecializedArrayElement(external_pointer,
- key);
- return DefineAsRegister(result);
+ new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
+ // An unsigned int array load might overflow and cause a deopt, make sure it
+ // has an environment.
+ return (array_type == kExternalUnsignedIntArray) ?
+ AssignEnvironment(load_instr) : load_instr;
}
@@ -1873,23 +1902,24 @@
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
- // TODO(danno): Add support for other external array types.
- if (instr->array_type() != kExternalPixelArray) {
- Abort("unsupported store for external array type.");
- return NULL;
- }
-
- ASSERT(instr->value()->representation().IsInteger32());
+ Representation representation(instr->value()->representation());
+ ExternalArrayType array_type = instr->array_type();
+ ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
+ (representation.IsDouble() && array_type == kExternalFloatArray));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* value = UseTempRegister(instr->value()); // changed by clamp.
+ bool val_is_temp_register = array_type == kExternalPixelArray ||
+ array_type == kExternalFloatArray;
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
LOperand* key = UseRegister(instr->key());
return new LStoreKeyedSpecializedArrayElement(external_pointer,
key,
- value);
+ val);
}
@@ -1930,6 +1960,13 @@
}
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
@@ -2061,8 +2098,6 @@
}
}
- ASSERT(env->length() == instr->environment_length());
-
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 77aabaf..4add6bf 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -106,6 +106,7 @@
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(Integer32ToDouble) \
+ V(InvokeFunction) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
@@ -119,7 +120,8 @@
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
- V(LoadGlobal) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
@@ -144,12 +146,14 @@
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
- V(StoreGlobal) \
+ V(StoreGlobalCell) \
+ V(StoreGlobalGeneric) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringLength) \
@@ -1259,22 +1263,55 @@
};
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
public:
- LStoreGlobal(LOperand* value, LOperand* temp) {
+ explicit LLoadGlobalGeneric(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ LOperand* global_object() { return inputs_[0]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LStoreGlobalGeneric(LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = global_object;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+ LOperand* global_object() { return InputAt(0); }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LOperand* value() { return InputAt(1); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1377,6 +1414,23 @@
};
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInvokeFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ LOperand* function() { return inputs_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
@@ -1605,6 +1659,7 @@
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1644,6 +1699,7 @@
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
@@ -1669,6 +1725,22 @@
};
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringAdd(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+};
+
+
+
class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 75406cf..2d415cb 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -91,7 +91,7 @@
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
- code->set_stack_slots(StackSlotCount());
+ code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -149,7 +149,7 @@
__ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
// Reserve space for the stack slots needed by the code.
- int slots = StackSlotCount();
+ int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
__ mov(r0, Operand(slots));
@@ -263,7 +263,7 @@
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
- safepoints_.Emit(masm(), StackSlotCount());
+ safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -459,7 +459,7 @@
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsArgument()) {
ASSERT(is_tagged);
- int src_index = StackSlotCount() + op->index();
+ int src_index = GetStackSlotCount() + op->index();
translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -484,11 +484,19 @@
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ Call(code, mode);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, safepoint_mode);
}
@@ -501,11 +509,21 @@
RecordPosition(pointers->position());
__ CallRuntime(function, num_arguments);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
}
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr) {
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode) {
// Create the environment to bailout to. If the call has side effects
// execution has to continue after the call otherwise execution can continue
// from a previous bailout point repeating the call.
@@ -517,8 +535,16 @@
}
RegisterEnvironmentForDeoptimization(deoptimization_environment);
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(),
+ deoptimization_environment->deoptimization_index());
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(),
+ 0,
+ deoptimization_environment->deoptimization_index());
+ }
}
@@ -650,6 +676,8 @@
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
@@ -1015,7 +1043,7 @@
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
- __ PushSafepointRegistersAndDoubles();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
// Move left to r1 and right to r0 for the stub call.
if (left.is(r1)) {
__ Move(r0, right);
@@ -1037,7 +1065,6 @@
Safepoint::kNoDeoptimizationIndex);
// Overwrite the stored value of r0 with the result of the stub.
__ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
- __ PopSafepointRegistersAndDoubles();
}
@@ -1460,11 +1487,8 @@
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- __ PushSafepointRegisters();
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ PopSafepointRegisters();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
}
@@ -2065,7 +2089,7 @@
flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(flags);
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// Get the temp register reserved by the instruction. This needs to be r4 as
// its slot of the pushing of safepoint registers is used to communicate the
@@ -2080,12 +2104,13 @@
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
__ StoreToSafepointRegisterSlot(temp, temp);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
// Put the result value into the result register slot and
// restore all registers.
__ StoreToSafepointRegisterSlot(result, result);
-
- __ PopSafepointRegisters();
}
@@ -2155,7 +2180,7 @@
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (ParameterCount() + 1) * kPointerSize;
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand(sp_delta));
@@ -2163,7 +2188,7 @@
}
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
@@ -2175,7 +2200,19 @@
}
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(r0));
+ ASSERT(ToRegister(instr->result()).is(r0));
+
+ __ mov(r2, Operand(instr->name()));
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
@@ -2200,6 +2237,18 @@
}
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(r1));
+ ASSERT(ToRegister(instr->value()).is(r0));
+
+ __ mov(r2, Operand(instr->name()));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2361,12 +2410,14 @@
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
- __ LoadRoot(ip, Heap::kExternalPixelArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
__ cmp(scratch, ip);
- __ Check(eq, "Check for fast elements failed.");
+ __ b(eq, &done);
+ __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ sub(scratch, scratch, Operand(FIRST_EXTERNAL_ARRAY_TYPE));
+ __ cmp(scratch, Operand(kExternalArrayTypeCount));
+ __ Check(cc, "Check for fast elements failed.");
__ bind(&done);
}
}
@@ -2419,14 +2470,47 @@
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
- ASSERT(instr->array_type() == kExternalPixelArray);
-
Register external_pointer = ToRegister(instr->external_pointer());
Register key = ToRegister(instr->key());
- Register result = ToRegister(instr->result());
-
- // Load the result.
- __ ldrb(result, MemOperand(external_pointer, key));
+ ExternalArrayType array_type = instr->array_type();
+ if (array_type == kExternalFloatArray) {
+ CpuFeatures::Scope scope(VFP3);
+ DwVfpRegister result(ToDoubleRegister(instr->result()));
+ __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
+ __ vldr(result.low(), scratch0(), 0);
+ __ vcvt_f64_f32(result, result.low());
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (array_type) {
+ case kExternalByteArray:
+ __ ldrsb(result, MemOperand(external_pointer, key));
+ break;
+ case kExternalUnsignedByteArray:
+ case kExternalPixelArray:
+ __ ldrb(result, MemOperand(external_pointer, key));
+ break;
+ case kExternalShortArray:
+ __ ldrsh(result, MemOperand(external_pointer, key, LSL, 1));
+ break;
+ case kExternalUnsignedShortArray:
+ __ ldrh(result, MemOperand(external_pointer, key, LSL, 1));
+ break;
+ case kExternalIntArray:
+ __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+ break;
+ case kExternalUnsignedIntArray:
+ __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+ __ cmp(result, Operand(0x80000000));
+ // TODO(danno): we could be more clever here, perhaps having a special
+ // version of the stub that detects if the overflow case actually
+ // happens, and generate code that returns a double rather than int.
+ DeoptimizeIf(cs, instr->environment());
+ break;
+ case kExternalFloatArray:
+ UNREACHABLE();
+ break;
+ }
+ }
}
@@ -2617,7 +2701,7 @@
__ Call(ip);
// Setup deoptimization.
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
// Restore context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2655,44 +2739,43 @@
// Input is negative. Reverse its sign.
// Preserve the value of all registers.
- __ PushSafepointRegisters();
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(r1) ? r0 : r1;
- Register tmp2 = input.is(r2) ? r0 : r2;
- Register tmp3 = input.is(r3) ? r0 : r3;
- Register tmp4 = input.is(r4) ? r0 : r4;
+ // Registers were saved at the safepoint, so we can use
+ // many scratch registers.
+ Register tmp1 = input.is(r1) ? r0 : r1;
+ Register tmp2 = input.is(r2) ? r0 : r2;
+ Register tmp3 = input.is(r3) ? r0 : r3;
+ Register tmp4 = input.is(r4) ? r0 : r4;
- // exponent: floating point exponent value.
+ // exponent: floating point exponent value.
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ b(&allocated);
+ Label allocated, slow;
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+ __ b(&allocated);
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input, input);
+ __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
- __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+ __ bind(&allocated);
+ // exponent: floating point exponent value.
+ // tmp1: allocated heap number.
+ __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
+ __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
- __ StoreToSafepointRegisterSlot(tmp1, input);
- __ PopSafepointRegisters();
+ __ StoreToSafepointRegisterSlot(tmp1, input);
+ }
__ bind(&done);
}
@@ -2778,9 +2861,49 @@
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register scratch2 = result;
- __ EmitVFPTruncate(kRoundToNearest,
+ Register scratch1 = result;
+ Register scratch2 = scratch0();
+ Label done, check_sign_on_zero;
+
+ // Extract exponent bits.
+ __ vmov(scratch1, input.high());
+ __ ubfx(scratch2,
+ scratch1,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+ __ cmp(scratch2, Operand(HeapNumber::kExponentBias - 2));
+ __ mov(result, Operand(0), LeaveCC, le);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ b(le, &check_sign_on_zero);
+ } else {
+ __ b(le, &done);
+ }
+
+ // The following conversion will not work with numbers
+ // outside of ]-2^32, 2^32[.
+ __ cmp(scratch2, Operand(HeapNumber::kExponentBias + 32));
+ DeoptimizeIf(ge, instr->environment());
+
+ // Save the original sign for later comparison.
+ __ and_(scratch2, scratch1, Operand(HeapNumber::kSignMask));
+
+ __ vmov(double_scratch0(), 0.5);
+ __ vadd(input, input, double_scratch0());
+
+ // Check sign of the result: if the sign changed, the input
+ // value was in ]0.5, 0[ and the result should be -0.
+ __ vmov(scratch1, input.high());
+ __ eor(scratch1, scratch1, Operand(scratch2), SetCC);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(mi, instr->environment());
+ } else {
+ __ mov(result, Operand(0), LeaveCC, mi);
+ __ b(mi, &done);
+ }
+
+ __ EmitVFPTruncate(kRoundToMinusInf,
double_scratch0().low(),
input,
scratch1,
@@ -2790,14 +2913,14 @@
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
- Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
+ __ bind(&check_sign_on_zero);
__ vmov(scratch1, input.high());
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
- __ bind(&done);
}
+ __ bind(&done);
}
@@ -2942,6 +3065,21 @@
}
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(r1));
+ ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(r1, count, CALL_FUNCTION, &generator);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
@@ -3049,7 +3187,7 @@
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = info_->is_strict()
+ Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3090,15 +3228,41 @@
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
- ASSERT(instr->array_type() == kExternalPixelArray);
Register external_pointer = ToRegister(instr->external_pointer());
Register key = ToRegister(instr->key());
- Register value = ToRegister(instr->value());
-
- // Clamp the value to [0..255].
- __ Usat(value, 8, Operand(value));
- __ strb(value, MemOperand(external_pointer, key, LSL, 0));
+ ExternalArrayType array_type = instr->array_type();
+ if (array_type == kExternalFloatArray) {
+ CpuFeatures::Scope scope(VFP3);
+ DwVfpRegister value(ToDoubleRegister(instr->value()));
+ __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
+ __ vcvt_f32_f64(double_scratch0().low(), value);
+ __ vstr(double_scratch0().low(), scratch0(), 0);
+ } else {
+ Register value(ToRegister(instr->value()));
+ switch (array_type) {
+ case kExternalPixelArray:
+ // Clamp the value to [0..255].
+ __ Usat(value, 8, Operand(value));
+ __ strb(value, MemOperand(external_pointer, key));
+ break;
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ strb(value, MemOperand(external_pointer, key));
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ strh(value, MemOperand(external_pointer, key, LSL, 1));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ str(value, MemOperand(external_pointer, key, LSL, 2));
+ break;
+ case kExternalFloatArray:
+ UNREACHABLE();
+ break;
+ }
+ }
}
@@ -3107,13 +3271,21 @@
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic = info_->is_strict()
+ Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ __ push(ToRegister(instr->left()));
+ __ push(ToRegister(instr->right()));
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
public:
@@ -3230,7 +3402,7 @@
// contained in the register pointer map.
__ mov(result, Operand(0));
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
@@ -3243,15 +3415,12 @@
__ SmiTag(index);
__ push(index);
}
- __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
if (FLAG_debug_code) {
__ AbortIfNotSmi(r0);
}
__ SmiUntag(r0);
__ StoreToSafepointRegisterSlot(r0, result);
- __ PopSafepointRegisters();
}
@@ -3294,14 +3463,11 @@
// contained in the register pointer map.
__ mov(result, Operand(0));
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(char_code);
__ push(char_code);
- __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
__ StoreToSafepointRegisterSlot(r0, result);
- __ PopSafepointRegisters();
}
@@ -3357,7 +3523,7 @@
SwVfpRegister flt_scratch = s0;
// Preserve the value of all registers.
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -3382,9 +3548,7 @@
// integer value.
__ mov(ip, Operand(0));
__ StoreToSafepointRegisterSlot(ip, reg);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
if (!reg.is(r0)) __ mov(reg, r0);
// Done. Put the value in dbl_scratch into the value of the allocated heap
@@ -3393,7 +3557,6 @@
__ sub(ip, reg, Operand(kHeapObjectTag));
__ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
__ StoreToSafepointRegisterSlot(reg, reg);
- __ PopSafepointRegisters();
}
@@ -3433,12 +3596,9 @@
Register reg = ToRegister(instr->result());
__ mov(reg, Operand(0));
- __ PushSafepointRegisters();
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ StoreToSafepointRegisterSlot(r0, reg);
- __ PopSafepointRegisters();
}
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index caa85d2..1110ea6 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -57,7 +57,8 @@
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
- resolver_(this) {
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -137,7 +138,7 @@
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
- return info()->is_strict() ? kStrictMode : kNonStrictMode;
+ return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -157,8 +158,8 @@
Register temporary,
Register temporary2);
- int StackSlotCount() const { return chunk()->spill_slot_count(); }
- int ParameterCount() const { return scope()->num_parameters(); }
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
@@ -172,12 +173,24 @@
bool GenerateDeferredCode();
bool GenerateSafepointTable();
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
+
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
@@ -185,6 +198,10 @@
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr);
+
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
@@ -193,7 +210,9 @@
void LoadHeapObject(Register result, Handle<HeapObject> object);
- void RegisterLazyDeoptimization(LInstruction* instr);
+ void RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
@@ -292,6 +311,48 @@
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PushSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PushSafepointRegistersAndDoubles();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PopSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PopSafepointRegistersAndDoubles();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 3a1a8b6..6a095d3 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -32,18 +32,21 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "runtime.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(HEAP->undefined_value()) {
+ allow_stub_calls_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
}
@@ -292,7 +295,7 @@
} else if (!src2.is_single_instruction() &&
!src2.must_use_constant_pool() &&
- Isolate::Current()->cpu_features()->IsSupported(ARMv7) &&
+ CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
@@ -305,7 +308,7 @@
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
@@ -320,7 +323,7 @@
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
@@ -348,7 +351,7 @@
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
- if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
@@ -362,7 +365,7 @@
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
- if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
} else {
@@ -373,7 +376,7 @@
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
- if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7)) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
@@ -619,7 +622,7 @@
ASSERT_EQ(dst1.code() + 1, dst2.code());
// Generate two ldr instructions if ldrd is not available.
- if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
@@ -644,7 +647,7 @@
ASSERT_EQ(src1.code() + 1, src2.code());
// Generate two str instructions if strd is not available.
- if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
@@ -746,12 +749,10 @@
// Optionally save all double registers.
if (save_doubles) {
- sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
- const int offset = -2 * kPointerSize;
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
- }
+ DwVfpRegister first = d0;
+ DwVfpRegister last =
+ DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
+ vstm(db_w, sp, first, last);
// Note that d0 will be accessible at
// fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
@@ -808,11 +809,13 @@
Register argument_count) {
// Optionally restore all double registers.
if (save_doubles) {
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- const int offset = -2 * kPointerSize;
- vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
- }
+ // Calculate the stack location of the saved doubles and restore them.
+ const int offset = 2 * kPointerSize;
+ sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
+ DwVfpRegister first = d0;
+ DwVfpRegister last =
+ DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
+ vldm(ia, r3, first, last);
}
// Clear top frame.
@@ -836,11 +839,7 @@
}
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
-#if !defined(USE_ARM_EABI)
- UNREACHABLE();
-#else
vmov(dst, r0, r1);
-#endif
}
@@ -1799,9 +1798,10 @@
bind(&delete_allocated_handles);
str(r5, MemOperand(r7, kLimitOffset));
mov(r4, r0);
- PrepareCallCFunction(0, r5);
+ PrepareCallCFunction(1, r5);
+ mov(r0, Operand(ExternalReference::isolate_address()));
CallCFunction(
- ExternalReference::delete_handle_scope_extensions(isolate()), 0);
+ ExternalReference::delete_handle_scope_extensions(isolate()), 1);
mov(r0, r4);
jmp(&leave_exit_frame);
@@ -1902,7 +1902,7 @@
Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32) {
- if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(double_scratch, scratch, HeapNumber::kValueOffset);
@@ -1998,7 +1998,7 @@
Register scratch1,
Register scratch2,
CheckForInexactConversion check_inexact) {
- ASSERT(Isolate::Current()->cpu_features()->IsSupported(VFP3));
+ ASSERT(CpuFeatures::IsSupported(VFP3));
CpuFeatures::Scope scope(VFP3);
Register prev_fpscr = scratch1;
Register scratch = scratch2;
@@ -2156,7 +2156,7 @@
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
- if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
@@ -2797,9 +2797,6 @@
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = ActivationFrameAlignment();
- // Reserve space for Isolate address which is always passed as last parameter
- num_arguments += 1;
-
// Up to four simple arguments are passed in registers r0..r3.
int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
0 : num_arguments - kRegisterPassedArguments;
@@ -2836,19 +2833,6 @@
ExternalReference function_reference,
Register scratch,
int num_arguments) {
- // Push Isolate address as the last argument.
- if (num_arguments < kRegisterPassedArguments) {
- Register arg_to_reg[] = {r0, r1, r2, r3};
- Register r = arg_to_reg[num_arguments];
- mov(r, Operand(ExternalReference::isolate_address()));
- } else {
- int stack_passed_arguments = num_arguments - kRegisterPassedArguments;
- // Push Isolate address on the stack after the arguments.
- mov(scratch, Operand(ExternalReference::isolate_address()));
- str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
- }
- num_arguments += 1;
-
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@@ -2911,7 +2895,7 @@
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(address, size_ + Assembler::kGap) {
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 2b81c08..ab5efb0 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -90,7 +90,11 @@
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- MacroAssembler(void* buffer, int size);
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
@@ -781,7 +785,10 @@
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
- Handle<Object> CodeObject() { return code_object_; }
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
// ---------------------------------------------------------------------------
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 8d540d4..4bd8c80 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -116,7 +116,7 @@
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
Mode mode,
int registers_to_save)
- : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -347,7 +347,7 @@
__ sub(current_input_offset(), r2, end_of_input_address());
} else {
ASSERT(mode_ == UC16);
- int argument_count = 3;
+ int argument_count = 4;
__ PrepareCallCFunction(argument_count, r2);
// r0 - offset of start of capture
@@ -358,6 +358,7 @@
// r0: Address byte_offset1 - Address captured substring's start.
// r1: Address byte_offset2 - Address of current character position.
// r2: size_t byte_length - length of capture in bytes(!)
+ // r3: Isolate* isolate
// Address of start of capture.
__ add(r0, r0, Operand(end_of_input_address()));
@@ -367,6 +368,8 @@
__ mov(r4, Operand(r1));
// Address of current input position.
__ add(r1, current_input_offset(), Operand(end_of_input_address()));
+ // Isolate.
+ __ mov(r3, Operand(ExternalReference::isolate_address()));
ExternalReference function =
ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
@@ -778,10 +781,11 @@
Label grow_failed;
// Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 2;
+ static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd));
+ __ mov(r2, Operand(ExternalReference::isolate_address()));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate());
__ CallCFunction(grow_stack, num_arguments);
diff --git a/src/arm/register-allocator-arm-inl.h b/src/arm/register-allocator-arm-inl.h
deleted file mode 100644
index 945cdeb..0000000
--- a/src/arm/register-allocator-arm-inl.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
-}
-
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers. The mapping is:
-//
-// r0 <-> 0
-// r1 <-> 1
-// r2 <-> 2
-// r3 <-> 3
-// r4 <-> 4
-// r5 <-> 5
-// r6 <-> 6
-// r7 <-> 7
-// r9 <-> 8
-// r10 <-> 9
-// ip <-> 10
-// lr <-> 11
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // r0
- 1, // r1
- 2, // r2
- 3, // r3
- 4, // r4
- 5, // r5
- 6, // r6
- 7, // r7
- -1, // cp
- 8, // r9
- 9, // r10
- -1, // fp
- 10, // ip
- -1, // sp
- 11, // lr
- -1 // pc
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] =
- { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/src/arm/register-allocator-arm.cc b/src/arm/register-allocator-arm.cc
deleted file mode 100644
index 3b35574..0000000
--- a/src/arm/register-allocator-arm.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- UNIMPLEMENTED();
-}
-
-
-void Result::ToRegister(Register target) {
- UNIMPLEMENTED();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // No byte registers on ARM.
- UNREACHABLE();
- return Result();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/register-allocator-arm.h b/src/arm/register-allocator-arm.h
deleted file mode 100644
index fdbc88f..0000000
--- a/src/arm/register-allocator-arm.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
- // No registers are currently managed by the register allocator on ARM.
- static const int kNumRegisters = 0;
- static const int kInvalidRegister = -1;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 46797d9..da554c2 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -67,6 +67,7 @@
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
+ double GetRegisterPairDoubleValue(int regnum);
double GetVFPDoubleRegisterValue(int regnum);
bool GetValue(const char* desc, int32_t* value);
bool GetVFPSingleValue(const char* desc, float* value);
@@ -168,6 +169,11 @@
}
+double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
+ return sim_->get_double_from_register_pair(regnum);
+}
+
+
double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_d_register(regnum);
}
@@ -305,14 +311,22 @@
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (argc == 2) {
+ if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
int32_t value;
float svalue;
double dvalue;
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
- PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
+ PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value);
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
+ i < 8 &&
+ (i % 2) == 0) {
+ dvalue = GetRegisterPairDoubleValue(i);
+ PrintF(" (%f)\n", dvalue);
+ } else {
+ PrintF("\n");
+ }
}
for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
dvalue = GetVFPDoubleRegisterValue(i);
@@ -550,6 +564,7 @@
PrintF("print <register>\n");
PrintF(" print register content (alias 'p')\n");
PrintF(" use register name 'all' to print all registers\n");
+ PrintF(" add argument 'fp' to print register pair double values\n");
PrintF("printobject <register>\n");
PrintF(" print an object from a register (alias 'po')\n");
PrintF("flags\n");
@@ -873,6 +888,19 @@
}
+double Simulator::get_double_from_register_pair(int reg) {
+ ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[2 * sizeof(vfp_register[0])];
+ memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ return(dm_val);
+}
+
+
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
@@ -938,12 +966,7 @@
// 2*sreg and 2*sreg+1.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
-#ifndef BIG_ENDIAN_FLOATING_POINT
memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
-#else
- memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
- memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
-#endif
}
@@ -980,12 +1003,7 @@
// Read the bits from the unsigned integer vfp_register[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
-#ifdef BIG_ENDIAN_FLOATING_POINT
- memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
- memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
-#else
memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
-#endif
memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
return(dm_val);
}
@@ -1504,36 +1522,34 @@
}
-// Addressing Mode 4 - Load and Store Multiple
-void Simulator::HandleRList(Instruction* instr, bool load) {
+void Simulator::ProcessPUW(Instruction* instr,
+ int num_regs,
+ int reg_size,
+ intptr_t* start_address,
+ intptr_t* end_address) {
int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
- int rlist = instr->RlistValue();
- int num_regs = count_bits(rlist);
-
- intptr_t start_address = 0;
- intptr_t end_address = 0;
switch (instr->PUField()) {
case da_x: {
UNIMPLEMENTED();
break;
}
case ia_x: {
- start_address = rn_val;
- end_address = rn_val + (num_regs * 4) - 4;
- rn_val = rn_val + (num_regs * 4);
+ *start_address = rn_val;
+ *end_address = rn_val + (num_regs * reg_size) - reg_size;
+ rn_val = rn_val + (num_regs * reg_size);
break;
}
case db_x: {
- start_address = rn_val - (num_regs * 4);
- end_address = rn_val - 4;
- rn_val = start_address;
+ *start_address = rn_val - (num_regs * reg_size);
+ *end_address = rn_val - reg_size;
+ rn_val = *start_address;
break;
}
case ib_x: {
- start_address = rn_val + 4;
- end_address = rn_val + (num_regs * 4);
- rn_val = end_address;
+ *start_address = rn_val + reg_size;
+ *end_address = rn_val + (num_regs * reg_size);
+ rn_val = *end_address;
break;
}
default: {
@@ -1544,6 +1560,17 @@
if (instr->HasW()) {
set_register(rn, rn_val);
}
+}
+
+// Addressing Mode 4 - Load and Store Multiple
+void Simulator::HandleRList(Instruction* instr, bool load) {
+ int rlist = instr->RlistValue();
+ int num_regs = count_bits(rlist);
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
+
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
int reg = 0;
while (rlist != 0) {
@@ -1562,6 +1589,57 @@
}
+// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
+void Simulator::HandleVList(Instruction* instr) {
+ VFPRegPrecision precision =
+ (instr->SzValue() == 0) ? kSinglePrecision : kDoublePrecision;
+ int operand_size = (precision == kSinglePrecision) ? 4 : 8;
+
+ bool load = (instr->VLValue() == 0x1);
+
+ int vd;
+ int num_regs;
+ vd = instr->VFPDRegValue(precision);
+ if (precision == kSinglePrecision) {
+ num_regs = instr->Immed8Value();
+ } else {
+ num_regs = instr->Immed8Value() / 2;
+ }
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
+
+ intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+ for (int reg = vd; reg < vd + num_regs; reg++) {
+ if (precision == kSinglePrecision) {
+ if (load) {
+ set_s_register_from_sinteger(
+ reg, ReadW(reinterpret_cast<int32_t>(address), instr));
+ } else {
+ WriteW(reinterpret_cast<int32_t>(address),
+ get_sinteger_from_s_register(reg), instr);
+ }
+ address += 1;
+ } else {
+ if (load) {
+ set_s_register_from_sinteger(
+ 2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
+ set_s_register_from_sinteger(
+ 2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
+ } else {
+ WriteW(reinterpret_cast<int32_t>(address),
+ get_sinteger_from_s_register(2 * reg), instr);
+ WriteW(reinterpret_cast<int32_t>(address + 1),
+ get_sinteger_from_s_register(2 * reg + 1), instr);
+ }
+ address += 2;
+ }
+ }
+ ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+}
+
+
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
@@ -2945,9 +3023,17 @@
}
break;
}
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB:
+ // Load/store multiple single from memory: vldm/vstm.
+ HandleVList(instr);
+ break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
} else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeValue()) {
@@ -2994,9 +3080,14 @@
}
break;
}
+ case 0x4:
+ case 0x5:
+ case 0x9:
+ // Load/store multiple double from memory: vldm/vstm.
+ HandleVList(instr);
+ break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
} else {
UNIMPLEMENTED(); // Not used by V8.
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index b7b1b68..a16cae5 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -155,6 +155,7 @@
// instruction.
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
@@ -236,7 +237,13 @@
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
+ void ProcessPUW(Instruction* instr,
+ int num_regs,
+ int operand_size,
+ intptr_t* start_address,
+ intptr_t* end_address);
void HandleRList(Instruction* instr, bool load);
+ void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 9936ac0..47d675b 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
@@ -953,7 +953,7 @@
Register fval,
Register scratch1,
Register scratch2) {
- if (masm->isolate()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
@@ -2048,7 +2048,7 @@
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+ if (!CpuFeatures::IsSupported(VFP3)) {
return heap()->undefined_value();
}
@@ -3509,7 +3509,7 @@
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case kExternalFloatArray:
- if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
@@ -3548,7 +3548,7 @@
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
- if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
@@ -3563,7 +3563,7 @@
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
- if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
@@ -3627,7 +3627,7 @@
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
@@ -3820,7 +3820,7 @@
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
- if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (array_type == kExternalFloatArray) {
diff --git a/src/arm/virtual-frame-arm-inl.h b/src/arm/virtual-frame-arm-inl.h
deleted file mode 100644
index 6a7902a..0000000
--- a/src/arm/virtual-frame-arm-inl.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
-#define V8_VIRTUAL_FRAME_ARM_INL_H_
-
-#include "assembler-arm.h"
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
-// file if such a thing existed.
-MemOperand VirtualFrame::ParameterAt(int index) {
- // Index -1 corresponds to the receiver.
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index <= parameter_count());
- return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
-}
-
- // The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
- return ParameterAt(-1);
-}
-
-
-void VirtualFrame::Forget(int count) {
- SpillAll();
- LowerHeight(count);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_ARM_INL_H_
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
deleted file mode 100644
index a852d6e..0000000
--- a/src/arm/virtual-frame-arm.cc
+++ /dev/null
@@ -1,843 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToR1R0() {
- // Shuffle things around so the top of stack is in r0 and r1.
- MergeTOSTo(R0_R1_TOS);
- // Pop the two registers off the stack so they are detached from the frame.
- LowerHeight(2);
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR1() {
- // Shuffle things around so the top of stack is only in r1.
- MergeTOSTo(R1_TOS);
- // Pop the register off the stack so it is detached from the frame.
- LowerHeight(1);
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR0() {
- // Shuffle things around so the top of stack only in r0.
- MergeTOSTo(R0_TOS);
- // Pop the register off the stack so it is detached from the frame.
- LowerHeight(1);
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
- if (Equals(expected)) return;
- ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) ==
- expected->tos_known_smi_map_);
- ASSERT(expected->IsCompatibleWith(this));
- MergeTOSTo(expected->top_of_stack_state_, cond);
- ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
- if (Equals(expected)) return;
- tos_known_smi_map_ &= expected->tos_known_smi_map_;
- MergeTOSTo(expected->top_of_stack_state_, cond);
- ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTOSTo(
- VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
-#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
- switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
- case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
- __ pop(r0, cond);
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
- __ pop(r1, cond);
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
- __ push(r0, cond);
- break;
- case CASE_NUMBER(R0_TOS, R0_TOS):
- break;
- case CASE_NUMBER(R0_TOS, R1_TOS):
- __ mov(r1, r0, LeaveCC, cond);
- break;
- case CASE_NUMBER(R0_TOS, R0_R1_TOS):
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(R0_TOS, R1_R0_TOS):
- __ mov(r1, r0, LeaveCC, cond);
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
- __ push(r1, cond);
- break;
- case CASE_NUMBER(R1_TOS, R0_TOS):
- __ mov(r0, r1, LeaveCC, cond);
- break;
- case CASE_NUMBER(R1_TOS, R1_TOS):
- break;
- case CASE_NUMBER(R1_TOS, R0_R1_TOS):
- __ mov(r0, r1, LeaveCC, cond);
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(R1_TOS, R1_R0_TOS):
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
- __ Push(r1, r0, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, R0_TOS):
- __ push(r1, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, R1_TOS):
- __ push(r1, cond);
- __ mov(r1, r0, LeaveCC, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
- break;
- case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
- __ Swap(r0, r1, ip, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
- __ Push(r0, r1, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R0_TOS):
- __ push(r0, cond);
- __ mov(r0, r1, LeaveCC, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R1_TOS):
- __ push(r0, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
- __ Swap(r0, r1, ip, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
- break;
- default:
- UNREACHABLE();
-#undef CASE_NUMBER
- }
- // A conditional merge will be followed by a conditional branch and the
- // fall-through code will have an unchanged virtual frame state. If the
- // merge is unconditional ('al'ways) then it might be followed by a fall
- // through. We need to update the virtual frame state to match the code we
- // are falling into. The final case is an unconditional merge followed by an
- // unconditional branch, in which case it doesn't matter what we do to the
- // virtual frame state, because the virtual frame will be invalidated.
- if (cond == al) {
- top_of_stack_state_ = expected_top_of_stack_state;
- }
-}
-
-
-void VirtualFrame::Enter() {
- Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
- // Verify that r1 contains a JS function. The following code relies
- // on r2 being available for use.
- if (FLAG_debug_code) {
- Label map_check, done;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &map_check);
- __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
- __ bind(&map_check);
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(eq, &done);
- __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
- __ bind(&done);
- }
-#endif // DEBUG
-
- // We are about to push four values to the frame.
- Adjust(4);
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm(), "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Drop the execution stack down to the frame pointer and restore the caller
- // frame pointer and return address.
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- Adjust(count);
- // Initialize stack slots with 'undefined' value.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- if (count < kLocalVarBound) {
- // For less locals the unrolled loop is more compact.
- for (int i = 0; i < count; i++) {
- __ push(ip);
- }
- } else {
- // For more locals a loop in generated code is more compact.
- Label alloc_locals_loop;
- __ mov(r1, Operand(count));
- __ bind(&alloc_locals_loop);
- __ push(ip);
- __ sub(r1, r1, Operand(1), SetCC);
- __ b(ne, &alloc_locals_loop);
- }
- } else {
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- }
- // Check the stack for overflow or a break request.
- masm()->cmp(sp, Operand(r2));
- StackCheckStub stub;
- // Call the stub if lower.
- masm()->mov(ip,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- masm()->Call(ip, lo);
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- UNIMPLEMENTED();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- // Grow the expression stack by handler size less one (the return
- // address in lr is already counted by a call instruction).
- Adjust(kHandlerSize - 1);
- __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
- // InvokeFunction requires function in r1.
- PopToR1();
- SpillAll();
-
- // +1 for receiver.
- Forget(arg_count + 1);
- ASSERT(cgen()->HasValidEntryRegisters());
- ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION);
- // Restore the context.
- __ ldr(cp, Context());
-}
-
-
-void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
- SpillAll();
- Forget(arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- SpillAll();
- Forget(arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ DebugBreak();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- int arg_count) {
- Forget(arg_count);
- __ InvokeBuiltin(id, flags);
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
- PopToR0();
- SpillAll();
- __ mov(r2, Operand(name));
- CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name,
- bool is_contextual,
- StrictModeFlag strict_mode) {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
- : Builtins::kStoreIC_Initialize));
- PopToR0();
- RelocInfo::Mode mode;
- if (is_contextual) {
- SpillAll();
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- mode = RelocInfo::CODE_TARGET_CONTEXT;
- } else {
- EmitPop(r1);
- SpillAll();
- mode = RelocInfo::CODE_TARGET;
- }
- __ mov(r2, Operand(name));
- CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize));
- PopToR1R0();
- SpillAll();
- CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- PopToR1R0();
- SpillAll();
- EmitPop(r2);
- CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
- switch (code->kind()) {
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- case Code::FUNCTION:
- break;
- case Code::KEYED_LOAD_IC:
- case Code::LOAD_IC:
- case Code::KEYED_STORE_IC:
- case Code::STORE_IC:
- ASSERT(dropped_args == 0);
- break;
- case Code::BUILTIN:
- ASSERT(*code == Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructCall));
- break;
- default:
- UNREACHABLE();
- break;
- }
- Forget(dropped_args);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
-}
-
-
-// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
-const bool VirtualFrame::kR0InUse[TOS_STATES] =
- { false, true, false, true, true };
-const bool VirtualFrame::kR1InUse[TOS_STATES] =
- { false, false, true, true, true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
- { 0, 1, 1, 2, 2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
- { r0, r0, r1, r1, r0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
- { r0, r0, r1, r0, r1 };
-const Register VirtualFrame::kAllocatedRegisters[
- VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 };
-// Popping is done by the transition implied by kStateAfterPop. Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
- { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush. Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
- { R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- // Discard elements from the virtual frame and free any registers.
- int num_virtual_elements = kVirtualElements[top_of_stack_state_];
- while (num_virtual_elements > 0) {
- Pop();
- num_virtual_elements--;
- count--;
- if (count == 0) return;
- }
- if (count == 0) return;
- __ add(sp, sp, Operand(count * kPointerSize));
- LowerHeight(count);
-}
-
-
-void VirtualFrame::Pop() {
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- __ add(sp, sp, Operand(kPointerSize));
- } else {
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- }
- LowerHeight(1);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- __ pop(reg);
- } else {
- __ mov(reg, kTopRegister[top_of_stack_state_]);
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- }
- LowerHeight(1);
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR0() {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- break;
- case R0_TOS:
- __ push(r0);
- break;
- case R1_TOS:
- __ push(r1);
- __ mov(r0, r1);
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- __ mov(r0, r1);
- break;
- default:
- UNREACHABLE();
- }
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1() {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r1, MemOperand(sp, 0));
- break;
- case R0_TOS:
- __ push(r0);
- __ mov(r1, r0);
- break;
- case R1_TOS:
- __ push(r1);
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- __ mov(r1, r0);
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- break;
- default:
- UNREACHABLE();
- }
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1R0() {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r1, MemOperand(sp, 0));
- __ ldr(r0, MemOperand(sp, kPointerSize));
- break;
- case R0_TOS:
- __ push(r0);
- __ mov(r1, r0);
- __ ldr(r0, MemOperand(sp, kPointerSize));
- break;
- case R1_TOS:
- __ push(r1);
- __ ldr(r0, MemOperand(sp, kPointerSize));
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- __ Swap(r0, r1, ip);
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- break;
- default:
- UNREACHABLE();
- }
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-Register VirtualFrame::Peek() {
- AssertIsNotSpilled();
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- Register answer = kTopRegister[top_of_stack_state_];
- __ pop(answer);
- return answer;
- } else {
- return kTopRegister[top_of_stack_state_];
- }
-}
-
-
-Register VirtualFrame::Peek2() {
- AssertIsNotSpilled();
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- case R0_TOS:
- case R0_R1_TOS:
- MergeTOSTo(R0_R1_TOS);
- return r1;
- case R1_TOS:
- case R1_R0_TOS:
- MergeTOSTo(R1_R0_TOS);
- return r0;
- default:
- UNREACHABLE();
- return no_reg;
- }
-}
-
-
-void VirtualFrame::Dup() {
- if (SpilledScope::is_spilled()) {
- __ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
- } else {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- top_of_stack_state_ = R0_TOS;
- break;
- case R0_TOS:
- __ mov(r1, r0);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_TOS:
- __ mov(r0, r1);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R0_R1_TOS:
- __ push(r1);
- __ mov(r1, r0);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_R0_TOS:
- __ push(r0);
- __ mov(r0, r1);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- default:
- UNREACHABLE();
- }
- }
- RaiseHeight(1, tos_known_smi_map_ & 1);
-}
-
-
-void VirtualFrame::Dup2() {
- if (SpilledScope::is_spilled()) {
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- } else {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- __ ldr(r1, MemOperand(sp, kPointerSize));
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R0_TOS:
- __ push(r0);
- __ ldr(r1, MemOperand(sp, kPointerSize));
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_TOS:
- __ push(r1);
- __ ldr(r0, MemOperand(sp, kPointerSize));
- top_of_stack_state_ = R1_R0_TOS;
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- top_of_stack_state_ = R1_R0_TOS;
- break;
- default:
- UNREACHABLE();
- }
- }
- RaiseHeight(2, tos_known_smi_map_ & 3);
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
- ASSERT(but_not_to_this_one.is(r0) ||
- but_not_to_this_one.is(r1) ||
- but_not_to_this_one.is(no_reg));
- LowerHeight(1);
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- if (but_not_to_this_one.is(r0)) {
- __ pop(r1);
- return r1;
- } else {
- __ pop(r0);
- return r0;
- }
- } else {
- Register answer = kTopRegister[top_of_stack_state_];
- ASSERT(!answer.is(but_not_to_this_one));
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- return answer;
- }
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
- if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) {
- __ push(kBottomRegister[top_of_stack_state_]);
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- }
- ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- RaiseHeight(1, info.IsSmi() ? 1 : 0);
- if (reg.is(cp)) {
- // If we are pushing cp then we are about to make a call and things have to
- // be pushed to the physical stack. There's nothing to be gained my moving
- // to a TOS register and then pushing that, we might as well push to the
- // physical stack immediately.
- MergeTOSTo(NO_TOS_REGISTERS);
- __ push(reg);
- return;
- }
- if (SpilledScope::is_spilled()) {
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- __ push(reg);
- return;
- }
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- if (reg.is(r0)) {
- top_of_stack_state_ = R0_TOS;
- return;
- }
- if (reg.is(r1)) {
- top_of_stack_state_ = R1_TOS;
- return;
- }
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- Register dest = kTopRegister[top_of_stack_state_];
- __ Move(dest, reg);
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
- if (this_far_down < kTOSKnownSmiMapSize) {
- tos_known_smi_map_ &= ~(1 << this_far_down);
- }
- if (this_far_down == 0) {
- Pop();
- Register dest = GetTOSRegister();
- if (dest.is(reg)) {
- // We already popped one item off the top of the stack. If the only
- // free register is the one we were asked to push then we have been
- // asked to push a register that was already in use, which cannot
- // happen. It therefore folows that there are two free TOS registers:
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- dest = dest.is(r0) ? r1 : r0;
- }
- __ mov(dest, reg);
- EmitPush(dest);
- } else if (this_far_down == 1) {
- int virtual_elements = kVirtualElements[top_of_stack_state_];
- if (virtual_elements < 2) {
- __ str(reg, ElementAt(this_far_down));
- } else {
- ASSERT(virtual_elements == 2);
- ASSERT(!reg.is(r0));
- ASSERT(!reg.is(r1));
- Register dest = kBottomRegister[top_of_stack_state_];
- __ mov(dest, reg);
- }
- } else {
- ASSERT(this_far_down >= 2);
- ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
- __ str(reg, ElementAt(this_far_down));
- }
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
- if (SpilledScope::is_spilled()) return r0;
-
- EnsureOneFreeTOSRegister();
- return kTopRegister[kStateAfterPush[top_of_stack_state_]];
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
- RaiseHeight(1, info.IsSmi() ? 1 : 0);
- if (SpilledScope::is_spilled()) {
- __ mov(r0, operand);
- __ push(r0);
- return;
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- __ mov(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
- RaiseHeight(1, info.IsSmi() ? 1 : 0);
- if (SpilledScope::is_spilled()) {
- __ ldr(r0, operand);
- __ push(r0);
- return;
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- __ ldr(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
- RaiseHeight(1, 0);
- if (SpilledScope::is_spilled()) {
- __ LoadRoot(r0, index);
- __ push(r0);
- return;
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- __ LoadRoot(kTopRegister[top_of_stack_state_], index);
-}
-
-
-void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
- ASSERT(SpilledScope::is_spilled());
- Adjust(count);
- __ stm(db_w, sp, src_regs);
-}
-
-
-void VirtualFrame::SpillAll() {
- switch (top_of_stack_state_) {
- case R1_R0_TOS:
- masm()->push(r0);
- // Fall through.
- case R1_TOS:
- masm()->push(r1);
- top_of_stack_state_ = NO_TOS_REGISTERS;
- break;
- case R0_R1_TOS:
- masm()->push(r1);
- // Fall through.
- case R0_TOS:
- masm()->push(r0);
- top_of_stack_state_ = NO_TOS_REGISTERS;
- // Fall through.
- case NO_TOS_REGISTERS:
- break;
- default:
- UNREACHABLE();
- break;
- }
- ASSERT(register_allocation_map_ == 0); // Not yet implemented.
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
deleted file mode 100644
index 6d67e70..0000000
--- a/src/arm/virtual-frame-arm.h
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
-#define V8_ARM_VIRTUAL_FRAME_ARM_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- class RegisterAllocationScope;
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, and keeps it spilled.
- class SpilledScope BASE_EMBEDDED {
- public:
- explicit SpilledScope(VirtualFrame* frame)
- : old_is_spilled_(
- Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
- if (frame != NULL) {
- if (!old_is_spilled_) {
- frame->SpillAll();
- } else {
- frame->AssertIsSpilled();
- }
- }
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
- }
- ~SpilledScope() {
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
- old_is_spilled_);
- }
- static bool is_spilled() {
- return Isolate::Current()->is_virtual_frame_in_spilled_scope();
- }
-
- private:
- int old_is_spilled_;
-
- SpilledScope() { }
-
- friend class RegisterAllocationScope;
- };
-
- class RegisterAllocationScope BASE_EMBEDDED {
- public:
- // A utility class to introduce a scope where the virtual frame
- // is not spilled, ie. where register allocation occurs. Eventually
- // when RegisterAllocationScope is ubiquitous it can be removed
- // along with the (by then unused) SpilledScope class.
- inline explicit RegisterAllocationScope(CodeGenerator* cgen);
- inline ~RegisterAllocationScope();
-
- private:
- CodeGenerator* cgen_;
- bool old_is_spilled_;
-
- RegisterAllocationScope() { }
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct an invalid virtual frame, used by JumpTargets.
- inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- inline CodeGenerator* cgen() const;
- inline MacroAssembler* masm();
-
- // The number of elements on the virtual frame.
- int element_count() const { return element_count_; }
-
- // The height of the virtual expression stack.
- inline int height() const;
-
- bool is_used(int num) {
- switch (num) {
- case 0: { // r0.
- return kR0InUse[top_of_stack_state_];
- }
- case 1: { // r1.
- return kR1InUse[top_of_stack_state_];
- }
- case 2:
- case 3:
- case 4:
- case 5:
- case 6: { // r2 to r6.
- ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
- ASSERT(num >= kFirstAllocatedRegister);
- if ((register_allocation_map_ &
- (1 << (num - kFirstAllocatedRegister))) == 0) {
- return false;
- } else {
- return true;
- }
- }
- default: {
- ASSERT(num < kFirstAllocatedRegister ||
- num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
- return false;
- }
- }
- }
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted except to bring the
- // frame to a spilled state.
- void Forget(int count);
-
- // Spill all values from the frame to memory.
- void SpillAll();
-
- void AssertIsSpilled() const {
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- ASSERT(register_allocation_map_ == 0);
- }
-
- void AssertIsNotSpilled() {
- ASSERT(!SpilledScope::is_spilled());
- }
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- UNIMPLEMENTED();
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references). Unimplemented.
- Register SpillAnyRegister();
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected, Condition cond = al);
- void MergeTo(const VirtualFrame* expected, Condition cond = al);
-
- // Checks whether this frame can be branched to by the other frame.
- bool IsCompatibleWith(const VirtualFrame* other) const {
- return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
- }
-
- inline void ForgetTypeInfo() {
- tos_known_smi_map_ = 0;
- }
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by elements in the virtual frame. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. No spill code emitted. Value to return should be in r0.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 5;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // The current top of the expression stack as an assembly operand.
- MemOperand Top() {
- AssertIsSpilled();
- return MemOperand(sp, 0);
- }
-
- // An element of the expression stack as an assembly operand.
- MemOperand ElementAt(int index) {
- int adjusted_index = index - kVirtualElements[top_of_stack_state_];
- ASSERT(adjusted_index >= 0);
- return MemOperand(sp, adjusted_index * kPointerSize);
- }
-
- bool KnownSmiAt(int index) {
- if (index >= kTOSKnownSmiMapSize) return false;
- return (tos_known_smi_map_ & (1 << index)) != 0;
- }
-
- // A frame-allocated local as an assembly operand.
- inline MemOperand LocalAt(int index);
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // The function frame slot.
- MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
- // The context frame slot.
- MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
- // A parameter as an assembly operand.
- inline MemOperand ParameterAt(int index);
-
- // The receiver frame slot.
- inline MemOperand Receiver();
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline void CallStub(CodeStub* stub, int arg_count);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- void CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- void CallRuntime(const Runtime::Function* f, int arg_count);
- void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flag,
- int arg_count);
-
- // Call load IC. Receiver is on the stack and is consumed. Result is returned
- // in r0.
- void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are consumed.
- // Result is returned in r0.
- void CallStoreIC(Handle<String> name, bool is_contextual,
- StrictModeFlag strict_mode);
-
- // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
- // Result is returned in r0.
- void CallKeyedLoadIC();
-
- // Call keyed store IC. Value, key and receiver are on the stack. All three
- // are consumed. Result is returned in r0.
- void CallKeyedStoreIC(StrictModeFlag strict_mode);
-
- // Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments to the IC stub are implicit,
- // and depend on the type of IC stub.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
-
- // Pop an element from the top of the expression stack. Discards
- // the result.
- void Pop();
-
- // Pop an element from the top of the expression stack. The register
- // will be one normally used for the top of stack register allocation
- // so you can't hold on to it if you push on the stack.
- Register PopToRegister(Register but_not_to_this_one = no_reg);
-
- // Look at the top of the stack. The register returned is aliased and
- // must be copied to a scratch register before modification.
- Register Peek();
-
- // Look at the value beneath the top of the stack. The register returned is
- // aliased and must be copied to a scratch register before modification.
- Register Peek2();
-
- // Duplicate the top of stack.
- void Dup();
-
- // Duplicate the two elements on top of stack.
- void Dup2();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in r0.
- void SpillAllButCopyTOSToR0();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in r1.
- void SpillAllButCopyTOSToR1();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in r1
- // and the next value on the stack in r0.
- void SpillAllButCopyTOSToR1R0();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
-
- // Takes the top two elements and puts them in r0 (top element) and r1
- // (second element).
- void PopToR1R0();
-
- // Takes the top element and puts it in r1.
- void PopToR1();
-
- // Takes the top element and puts it in r0.
- void PopToR0();
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPushRoot(Heap::RootListIndex index);
-
- // Overwrite the nth thing on the stack. If the nth position is in a
- // register then this turns into a mov, otherwise an str. Afterwards
- // you can still use the register even if it is a register that can be
- // used for TOS (r0 or r1).
- void SetElementAt(Register reg, int this_far_down);
-
- // Get a register which is free and which must be immediately used to
- // push on the top of the stack.
- Register GetTOSRegister();
-
- // Push multiple registers on the stack and the virtual frame
- // Register are selected by setting bit in src_regs and
- // are pushed in decreasing order: r15 .. r0.
- void EmitPushMultiple(int count, int src_regs);
-
- static Register scratch0() { return r7; }
- static Register scratch1() { return r9; }
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- // 5 states for the top of stack, which can be in memory or in r0 and r1.
- enum TopOfStack {
- NO_TOS_REGISTERS,
- R0_TOS,
- R1_TOS,
- R1_R0_TOS,
- R0_R1_TOS,
- TOS_STATES
- };
-
- static const int kMaxTOSRegisters = 2;
-
- static const bool kR0InUse[TOS_STATES];
- static const bool kR1InUse[TOS_STATES];
- static const int kVirtualElements[TOS_STATES];
- static const TopOfStack kStateAfterPop[TOS_STATES];
- static const TopOfStack kStateAfterPush[TOS_STATES];
- static const Register kTopRegister[TOS_STATES];
- static const Register kBottomRegister[TOS_STATES];
-
- // We allocate up to 5 locals in registers.
- static const int kNumberOfAllocatedRegisters = 5;
- // r2 to r6 are allocated to locals.
- static const int kFirstAllocatedRegister = 2;
-
- static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
- static Register AllocatedRegister(int r) {
- ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
- return kAllocatedRegisters[r];
- }
-
- // The number of elements on the stack frame.
- int element_count_;
- TopOfStack top_of_stack_state_:3;
- int register_allocation_map_:kNumberOfAllocatedRegisters;
- static const int kTOSKnownSmiMapSize = 4;
- unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
- // The index of the element that is at the processor's stack pointer
- // (the sp register). For now since everything is in memory it is given
- // by the number of elements on the not-very-virtual stack frame.
- int stack_pointer() { return element_count_ - 1; }
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count() const;
- inline int local_count() const;
-
- // The index of the element that is at the processor's frame pointer
- // (the fp register). The parameters, receiver, function, and context
- // are below the frame pointer.
- inline int frame_pointer() const;
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // below the frame pointer.
- inline int context_index();
-
- // The index of the function slot in the frame. It is below the frame
- // pointer and context slot.
- inline int function_index();
-
- // The index of the first local. Between the frame pointer and the
- // locals lies the return address.
- inline int local0_index() const;
-
- // The index of the base of the expression stack.
- inline int expression_base_index() const;
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- inline int fp_relative(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // If all top-of-stack registers are in use then the lowest one is pushed
- // onto the physical stack and made free.
- void EnsureOneFreeTOSRegister();
-
- // Emit instructions to get the top of stack state from where we are to where
- // we want to be.
- void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
-
- inline bool Equals(const VirtualFrame* other);
-
- inline void LowerHeight(int count) {
- element_count_ -= count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = 0;
- } else {
- tos_known_smi_map_ >>= count;
- }
- }
-
- inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
- ASSERT(count >= 32 || known_smi_map < (1u << count));
- element_count_ += count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = known_smi_map;
- } else {
- tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
- }
- }
-
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_VIRTUAL_FRAME_ARM_H_