Update V8 to r3431 as required by WebKit r51976.
Change-Id: I567392c3f8c0a0d5201a4249611ac4ccf468cd5b
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 48cc090..5f47cb7 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -85,7 +85,7 @@
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
@@ -245,6 +245,12 @@
}
+void Assembler::set_target_at(Address constant_pool_entry,
+ Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
void Assembler::set_target_address_at(Address pc, Address target) {
Memory::Address_at(target_address_address_at(pc)) = target;
// Intuitively, we would think it is necessary to flush the instruction cache
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index bc3b8e6..d924728 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -42,6 +42,34 @@
namespace v8 {
namespace internal {
+// Safe default is no features.
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::enabled_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
+void CpuFeatures::Probe() {
+ // If the compiler is allowed to use vfp then we can use vfp too in our
+ // code generation.
+#if !defined(__arm__)
+ // For the simulator=arm build, always use VFP since the arm simulator has
+ // VFP support.
+ supported_ |= 1u << VFP3;
+#else
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
+
+ if (OS::ArmCpuHasFeature(VFP3)) {
+ // This implementation also sets the VFP flags if
+ // runtime detection of VFP returns true.
+ supported_ |= 1u << VFP3;
+ found_by_runtime_probing_ |= 1u << VFP3;
+ }
+#endif
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Register and CRegister
@@ -84,6 +112,57 @@
CRegister cr14 = { 14 };
CRegister cr15 = { 15 };
+// Support for the VFP registers s0 to s31 (d0 to d15).
+// Note that "sN:sM" is the same as "dN/2".
+Register s0 = { 0 };
+Register s1 = { 1 };
+Register s2 = { 2 };
+Register s3 = { 3 };
+Register s4 = { 4 };
+Register s5 = { 5 };
+Register s6 = { 6 };
+Register s7 = { 7 };
+Register s8 = { 8 };
+Register s9 = { 9 };
+Register s10 = { 10 };
+Register s11 = { 11 };
+Register s12 = { 12 };
+Register s13 = { 13 };
+Register s14 = { 14 };
+Register s15 = { 15 };
+Register s16 = { 16 };
+Register s17 = { 17 };
+Register s18 = { 18 };
+Register s19 = { 19 };
+Register s20 = { 20 };
+Register s21 = { 21 };
+Register s22 = { 22 };
+Register s23 = { 23 };
+Register s24 = { 24 };
+Register s25 = { 25 };
+Register s26 = { 26 };
+Register s27 = { 27 };
+Register s28 = { 28 };
+Register s29 = { 29 };
+Register s30 = { 30 };
+Register s31 = { 31 };
+
+Register d0 = { 0 };
+Register d1 = { 1 };
+Register d2 = { 2 };
+Register d3 = { 3 };
+Register d4 = { 4 };
+Register d5 = { 5 };
+Register d6 = { 6 };
+Register d7 = { 7 };
+Register d8 = { 8 };
+Register d9 = { 9 };
+Register d10 = { 10 };
+Register d11 = { 11 };
+Register d12 = { 12 };
+Register d13 = { 13 };
+Register d14 = { 14 };
+Register d15 = { 15 };
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -203,10 +282,14 @@
B4 = 1 << 4,
B5 = 1 << 5,
+ B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
+ B9 = 1 << 9,
B12 = 1 << 12,
B16 = 1 << 16,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
B20 = 1 << 20,
B21 = 1 << 21,
B22 = 1 << 22,
@@ -523,6 +606,11 @@
// encoded.
static bool MustUseIp(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
return Serializer::enabled();
} else if (rmode == RelocInfo::NONE) {
return false;
@@ -1282,6 +1370,187 @@
}
+// Support for VFP.
+void Assembler::fmdrr(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dm = <Rt,Rt2>.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src1.is(pc) && !src2.is(pc));
+ emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
+ src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+}
+
+
+void Assembler::fmrrd(const Register dst1,
+ const Register dst2,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // <Rt,Rt2> = Dm.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst1.is(pc) && !dst2.is(pc));
+ emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
+ dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+}
+
+
+void Assembler::fmsr(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Sn = Rt.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src.is(pc));
+ emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
+ src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
+}
+
+
+void Assembler::fmrs(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Rt = Sn.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst.is(pc));
+ emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
+ dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
+}
+
+
+void Assembler::fsitod(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
+ dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
+ (0x1 & src.code())*B5 | (src.code() >> 1));
+}
+
+
+void Assembler::ftosid(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
+ 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
+ 0x5*B9 | B8 | B7 | B6 | src.code());
+}
+
+
+void Assembler::faddd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = faddd(Dn, Dm) double precision floating point addition.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-536.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::fsubd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fsubd(Dn, Dm) double precision floating point subtraction.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::fmuld(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fmuld(Dn, Dm) double precision floating point multiplication.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::fdivd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fdivd(Dn, Dm) double precision floating point division.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-584.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::fcmp(const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // vcmp(Dd, Dm) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406A, A8-570.
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
+ src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::vmrs(Register dst, Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-652.
+ // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
+ // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0xF*B20 | B16 |
+ dst.code()*B12 | 0xA*B8 | B4);
+}
+
+
// Pseudo instructions
void Assembler::lea(Register dst,
const MemOperand& x,
@@ -1311,6 +1580,18 @@
}
+bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
+ uint32_t dummy1;
+ uint32_t dummy2;
+ return fits_shifter(imm32, &dummy1, &dummy2, NULL);
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
// Debugging
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
@@ -1429,10 +1710,15 @@
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !Serializer::enabled() &&
- !FLAG_debug_code) {
- return;
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !FLAG_debug_code) {
+ return;
+ }
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index d1df08c..86bc18a 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include "assembler.h"
+#include "serialize.h"
namespace v8 {
namespace internal {
@@ -102,6 +103,57 @@
extern Register lr;
extern Register pc;
+// Support for VFP registers s0 to s32 (d0 to d16).
+// Note that "sN:sM" is the same as "dN/2".
+extern Register s0;
+extern Register s1;
+extern Register s2;
+extern Register s3;
+extern Register s4;
+extern Register s5;
+extern Register s6;
+extern Register s7;
+extern Register s8;
+extern Register s9;
+extern Register s10;
+extern Register s11;
+extern Register s12;
+extern Register s13;
+extern Register s14;
+extern Register s15;
+extern Register s16;
+extern Register s17;
+extern Register s18;
+extern Register s19;
+extern Register s20;
+extern Register s21;
+extern Register s22;
+extern Register s23;
+extern Register s24;
+extern Register s25;
+extern Register s26;
+extern Register s27;
+extern Register s28;
+extern Register s29;
+extern Register s30;
+extern Register s31;
+
+extern Register d0;
+extern Register d1;
+extern Register d2;
+extern Register d3;
+extern Register d4;
+extern Register d5;
+extern Register d6;
+extern Register d7;
+extern Register d8;
+extern Register d9;
+extern Register d10;
+extern Register d11;
+extern Register d12;
+extern Register d13;
+extern Register d14;
+extern Register d15;
// Coprocessor register
struct CRegister {
@@ -372,6 +424,51 @@
friend class Assembler;
};
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ if (f == VFP3 && !FLAG_enable_vfp3) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(CpuFeature f) {
+ return (enabled_ & (1u << f)) != 0;
+ }
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(CpuFeature f) {
+ ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (found_by_runtime_probing_ & (1u << f)) == 0);
+ old_enabled_ = CpuFeatures::enabled_;
+ CpuFeatures::enabled_ |= 1u << f;
+ }
+ ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ private:
+ unsigned old_enabled_;
+#else
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ private:
+ static unsigned supported_;
+ static unsigned enabled_;
+ static unsigned found_by_runtime_probing_;
+};
+
typedef int32_t Instr;
@@ -437,6 +534,23 @@
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address constant_pool_entry, Address target);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address constant_pool_entry,
+ Address target) {
+ set_target_at(constant_pool_entry, target);
+ }
+
+ // Here we are patching the address in the constant pool, not the actual call
+ // instruction. The address in the constant pool is the same size as a
+ // pointer.
+ static const int kCallTargetSize = kPointerSize;
+ static const int kExternalTargetSize = kPointerSize;
+
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
@@ -452,6 +566,7 @@
// register.
static const int kPcLoadDelta = 8;
+ static const int kJSReturnSequenceLength = 4;
// ---------------------------------------------------------------------------
// Code generation
@@ -638,6 +753,66 @@
void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
+ // Support for VFP.
+ // All these APIs support S0 to S31 and D0 to D15.
+ // Currently these APIs do not support extended D registers, i.e, D16 to D31.
+ // However, some simple modifications can allow
+ // these APIs to support D16 to D31.
+
+ void fmdrr(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmrrd(const Register dst1,
+ const Register dst2,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmsr(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmrs(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fsitod(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void ftosid(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+
+ void faddd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fsubd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmuld(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fdivd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fcmp(const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void vmrs(const Register dst,
+ const Condition cond = al);
+
// Pseudo instructions
void nop() { mov(r0, Operand(r0)); }
@@ -665,6 +840,13 @@
return (pc_offset() - l->pos()) / kInstrSize;
}
+ // Check whether an immediate fits an addressing mode 1 instruction.
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
// Debugging
// Mark address of the ExitJSFrame code.
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index d7afb37..5389a3c 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -284,7 +284,7 @@
// Both registers are preserved by this code so no need to differentiate between
// construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm,
- Label *call_generic_code) {
+ Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
@@ -949,6 +949,8 @@
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -1027,44 +1029,24 @@
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
- Label no_preemption, retry_preemption;
- __ bind(&retry_preemption);
- ExternalReference stack_guard_limit_address =
- ExternalReference::address_of_stack_guard_limit();
- __ mov(r2, Operand(stack_guard_limit_address));
- __ ldr(r2, MemOperand(r2));
- __ cmp(sp, r2);
- __ b(hi, &no_preemption);
-
- // We have encountered a preemption or stack overflow already before we push
- // the array contents. Save r0 which is the Smi-tagged length of the array.
- __ push(r0);
-
- // Runtime routines expect at least one argument, so give it a Smi.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ CallRuntime(Runtime::kStackGuard, 1);
-
- // Since we returned, it wasn't a stack overflow. Restore r0 and try again.
- __ pop(r0);
- __ b(&retry_preemption);
-
- __ bind(&no_preemption);
-
- // Eagerly check for stack-overflow before starting to push the arguments.
- // r0: number of arguments.
- // r2: stack limit.
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
Label okay;
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ // Make r2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
__ sub(r2, sp, r2);
-
+ // Check if the arguments will overflow the stack.
__ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(hi, &okay);
+ __ b(gt, &okay); // Signed comparison.
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ push(r1);
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
+ // End of stack check.
// Push current limit and index.
__ bind(&okay);
@@ -1107,6 +1089,8 @@
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
index 9ff02cb..749f32d 100644
--- a/src/arm/codegen-arm-inl.h
+++ b/src/arm/codegen-arm-inl.h
@@ -35,18 +35,15 @@
#define __ ACCESS_MASM(masm_)
void CodeGenerator::LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control) {
- LoadCondition(expression, typeof_state, true_target, false_target,
- force_control);
+ LoadCondition(expression, true_target, false_target, force_control);
}
-void CodeGenerator::LoadAndSpill(Expression* expression,
- TypeofState typeof_state) {
- Load(expression, typeof_state);
+void CodeGenerator::LoadAndSpill(Expression* expression) {
+ Load(expression);
}
@@ -60,8 +57,8 @@
}
-void Reference::GetValueAndSpill(TypeofState typeof_state) {
- GetValue(typeof_state);
+void Reference::GetValueAndSpill() {
+ GetValue();
}
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 47f0e96..7c0b0c6 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "parser.h"
#include "register-allocator-inl.h"
@@ -92,7 +93,6 @@
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- typeof_state_(NOT_INSIDE_TYPEOF),
true_target_(NULL),
false_target_(NULL),
previous_(NULL) {
@@ -101,11 +101,9 @@
CodeGenState::CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target)
: owner_(owner),
- typeof_state_(typeof_state),
true_target_(true_target),
false_target_(false_target),
previous_(owner->state()) {
@@ -144,6 +142,9 @@
// cp: callee's context
void CodeGenerator::GenCode(FunctionLiteral* fun) {
+ // Record the position for debugging purposes.
+ CodeForFunctionPosition(fun);
+
ZoneList<Statement*>* body = fun->body();
// Initialize state.
@@ -322,18 +323,32 @@
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
+ // Calculate the exact length of the return sequence and make sure that
+ // the constant pool is not emitted inside of the return sequence.
+ int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
+ int return_sequence_length = Assembler::kJSReturnSequenceLength;
+ if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
+ // Additional mov instruction generated.
+ return_sequence_length++;
+ }
+ masm_->BlockConstPoolFor(return_sequence_length);
+
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+ masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
// Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(kJSReturnSequenceLength,
+ // expected by the debugger. The add instruction above is an addressing
+ // mode 1 instruction where there are restrictions on which immediate values
+ // can be encoded in the instruction and which immediate values requires
+ // use of an additional instruction for moving the immediate to a temporary
+ // register.
+ ASSERT_EQ(return_sequence_length,
masm_->InstructionsGeneratedSince(&check_exit_codesize));
}
@@ -442,14 +457,13 @@
// register was set, has_cc() is true and cc_reg_ contains the condition to
// test for 'true'.
void CodeGenerator::LoadCondition(Expression* x,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc) {
ASSERT(!has_cc());
int original_height = frame_->height();
- { CodeGenState new_state(this, typeof_state, true_target, false_target);
+ { CodeGenState new_state(this, true_target, false_target);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -479,13 +493,13 @@
}
-void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
JumpTarget true_target;
JumpTarget false_target;
- LoadCondition(x, typeof_state, &true_target, &false_target, false);
+ LoadCondition(expr, &true_target, &false_target, false);
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
@@ -552,24 +566,27 @@
}
-// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
-// variables w/o reference errors elsewhere.
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
VirtualFrame::SpilledScope spilled_scope;
- Variable* variable = x->AsVariableProxy()->AsVariable();
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // NOTE: This is somewhat nasty. We force the compiler to load
- // the variable as if through '<global>.<variable>' to make sure we
- // do not get reference errors.
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
- // TODO(1241834): Fetch the position from the variable instead of using
- // no position.
Property property(&global, &key, RelocInfo::kNoPosition);
- LoadAndSpill(&property);
+ Reference ref(this, &property);
+ ref.GetValueAndSpill();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
+ frame_->SpillAll();
} else {
- LoadAndSpill(x, INSIDE_TYPEOF);
+ // Anything else can be handled normally.
+ LoadAndSpill(expr);
}
}
@@ -1066,27 +1083,6 @@
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) {}
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#if defined(DEBUG)
- void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
-#endif // defined(DEBUG)
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
@@ -1122,22 +1118,20 @@
void CodeGenerator::CheckStack() {
VirtualFrame::SpilledScope spilled_scope;
- if (FLAG_check_stack) {
- Comment cmnt(masm_, "[ check stack");
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- // Put the lr setup instruction in the delay slot. kInstrSize is added to
- // the implicit 8 byte offset that always applies to operations with pc and
- // gives a return address 12 bytes down.
- masm_->add(lr, pc, Operand(Assembler::kInstrSize));
- masm_->cmp(sp, Operand(ip));
- StackCheckStub stub;
- // Call the stub if lower.
- masm_->mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- }
+ Comment cmnt(masm_, "[ check stack");
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ // Put the lr setup instruction in the delay slot. kInstrSize is added to
+ // the implicit 8 byte offset that always applies to operations with pc and
+ // gives a return address 12 bytes down.
+ masm_->add(lr, pc, Operand(Assembler::kInstrSize));
+ masm_->cmp(sp, Operand(ip));
+ StackCheckStub stub;
+ // Call the stub if lower.
+ masm_->mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
@@ -1299,8 +1293,7 @@
JumpTarget then;
JumpTarget else_;
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &else_, true);
+ LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (frame_ != NULL) {
Branch(false, &else_);
}
@@ -1323,8 +1316,7 @@
ASSERT(!has_else_stm);
JumpTarget then;
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &exit, true);
+ LoadConditionAndSpill(node->condition(), &then, &exit, true);
if (frame_ != NULL) {
Branch(false, &exit);
}
@@ -1339,8 +1331,7 @@
ASSERT(!has_then_stm);
JumpTarget else_;
// if (!cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &exit, &else_, true);
+ LoadConditionAndSpill(node->condition(), &exit, &else_, true);
if (frame_ != NULL) {
Branch(true, &exit);
}
@@ -1354,8 +1345,7 @@
Comment cmnt(masm_, "[ If");
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &exit, &exit, false);
+ LoadConditionAndSpill(node->condition(), &exit, &exit, false);
if (frame_ != NULL) {
if (has_cc()) {
cc_reg_ = al;
@@ -1570,7 +1560,7 @@
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
- // Compile the test.
+ // Compile the test.
switch (info) {
case ALWAYS_TRUE:
// If control can fall off the end of the body, jump back to the
@@ -1593,8 +1583,9 @@
node->continue_target()->Bind();
}
if (has_valid_frame()) {
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ Comment cmnt(masm_, "[ DoWhileCondition");
+ CodeForDoWhileConditionPosition(node);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A invalid frame here indicates that control did not
// fall out of the test expression.
@@ -1633,8 +1624,7 @@
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the
// test expression.
@@ -1693,8 +1683,7 @@
// If the test is always true, there is no need to compile it.
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
Branch(false, node->break_target());
}
@@ -1780,25 +1769,81 @@
primitive.Bind();
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
- frame_->EmitPush(r0); // duplicate the object being enumerated
- frame_->EmitPush(r0);
+ // r0: value to be iterated over
+ frame_->EmitPush(r0); // Push the object being iterated over.
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ JumpTarget call_runtime;
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ JumpTarget check_prototype;
+ JumpTarget use_cache;
+ __ mov(r1, Operand(r0));
+ loop.Bind();
+ // Check that there are no elements.
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ cmp(r2, r4);
+ call_runtime.Branch(ne);
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in r3 for the subsequent
+ // prototype load.
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
+ __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
+ __ cmp(r2, ip);
+ call_runtime.Branch(eq);
+ // Check that there in an enum cache in the non-empty instance
+ // descriptors. This is the case if the next enumeration index
+ // field does not contain a smi.
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ call_runtime.Branch(eq);
+ // For all objects but the receiver, check that the cache is empty.
+ // r4: empty fixed array root.
+ __ cmp(r1, r0);
+ check_prototype.Branch(eq);
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmp(r2, r4);
+ call_runtime.Branch(ne);
+ check_prototype.Bind();
+ // Load the prototype from the map and loop if non-null.
+ __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r1, ip);
+ loop.Branch(ne);
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ use_cache.Jump();
+
+ call_runtime.Bind();
+ // Call the runtime to get the property names for the object.
+ frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ // r0: map or fixed array (result from call to
+ // Runtime::kGetPropertyNamesFast)
__ mov(r2, Operand(r0));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
fixed_array.Branch(ne);
+ use_cache.Bind();
// Get enum cache
+ // r0: map (either the result from a call to
+ // Runtime::kGetPropertyNamesFast or has been fetched directly from
+ // the object)
__ mov(r1, Operand(r0));
__ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
@@ -1863,9 +1908,7 @@
__ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
- Result arg_count_reg(r0);
- __ mov(r0, Operand(1));
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
__ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
@@ -2272,7 +2315,8 @@
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) {
ASSERT(frame_->height() == original_height);
@@ -2303,20 +2347,19 @@
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &else_, true);
+ LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (has_valid_frame()) {
Branch(false, &else_);
}
if (has_valid_frame() || then.is_linked()) {
then.Bind();
- LoadAndSpill(node->then_expression(), typeof_state());
+ LoadAndSpill(node->then_expression());
}
if (else_.is_linked()) {
JumpTarget exit;
if (has_valid_frame()) exit.Jump();
else_.Bind();
- LoadAndSpill(node->else_expression(), typeof_state());
+ LoadAndSpill(node->else_expression());
if (exit.is_linked()) exit.Bind();
}
ASSERT(frame_->height() == original_height + 1);
@@ -2383,10 +2426,6 @@
frame_->EmitPush(r0);
} else {
- // Note: We would like to keep the assert below, but it fires because of
- // some nasty code in LoadTypeofExpression() which should be removed...
- // ASSERT(!slot->var()->is_dynamic());
-
// Special handling for locals allocated in registers.
__ ldr(r0, SlotOperand(slot, r2));
frame_->EmitPush(r0);
@@ -2481,7 +2520,7 @@
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
+ LoadFromSlot(node, NOT_INSIDE_TYPEOF);
ASSERT(frame_->height() == original_height + 1);
}
@@ -2500,7 +2539,7 @@
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValueAndSpill(typeof_state());
+ ref.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@@ -2836,7 +2875,7 @@
} else {
// +=, *= and similar binary assignments.
// Get the old value of the lhs.
- target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
bool overwrite =
(node->value()->AsBinaryOperation() != NULL &&
@@ -2901,7 +2940,7 @@
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
- property.GetValueAndSpill(typeof_state());
+ property.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@@ -3071,7 +3110,7 @@
// Load the function to call from the property through a reference.
Reference ref(this, property);
- ref.GetValueAndSpill(NOT_INSIDE_TYPEOF); // receiver
+ ref.GetValueAndSpill(); // receiver
// Pass receiver to called function.
if (property->is_synthetic()) {
@@ -3301,7 +3340,79 @@
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
+ Comment(masm_, "[ GenerateFastCharCodeAt");
+
+ LoadAndSpill(args->at(0));
+ LoadAndSpill(args->at(1));
+ frame_->EmitPop(r0); // Index.
+ frame_->EmitPop(r1); // String.
+
+ Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
+
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow); // The 'string' was a Smi.
+
+ ASSERT(kSmiTag == 0);
+ __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
+ __ b(ne, &slow); // The index was negative or not a Smi.
+
+ __ bind(&try_again_with_new_string);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &slow);
+
+ // Now r2 has the string type.
+ __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
+ // Now r3 has the length of the string. Compare with the index.
+ __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
+ __ b(le, &slow);
+
+ // Here we know the index is in range. Check that string is sequential.
+ ASSERT_EQ(0, kSeqStringTag);
+ __ tst(r2, Operand(kStringRepresentationMask));
+ __ b(ne, ¬_a_flat_string);
+
+ // Check whether it is an ASCII string.
+ ASSERT_EQ(0, kTwoByteStringTag);
+ __ tst(r2, Operand(kStringEncodingMask));
+ __ b(ne, &ascii_string);
+
+ // 2-byte string. We can add without shifting since the Smi tag size is the
+ // log2 of the number of bytes in a two-byte character.
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiShiftSize);
+ __ add(r1, r1, Operand(r0));
+ __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ jmp(&end);
+
+ __ bind(&ascii_string);
+ __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
+ __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ jmp(&end);
+
+ __ bind(¬_a_flat_string);
+ __ and_(r2, r2, Operand(kStringRepresentationMask));
+ __ cmp(r2, Operand(kConsStringTag));
+ __ b(ne, &slow);
+
+ // ConsString.
+ // Check that the right hand side is the empty string (ie if this is really a
+ // flat string in a cons string). If that is not the case we would rather go
+ // to the runtime system now, to flatten the string.
+ __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
+ __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
+ __ cmp(r2, Operand(r3));
+ __ b(ne, &slow);
+
+ // Get the first of the two strings.
+ __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
+ __ jmp(&try_again_with_new_string);
+
+ __ bind(&slow);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+
+ __ bind(&end);
frame_->EmitPush(r0);
}
@@ -3325,6 +3436,51 @@
}
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r1);
+ __ tst(r1, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r1, ip);
+ true_target()->Branch(eq);
+
+ Register map_reg = r2;
+ __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ false_target()->Branch(eq);
+
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ false_target()->Branch(lt);
+ __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ cc_reg_ = le;
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+ __ tst(r0, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ Register map_reg = r2;
+ __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
+ cc_reg_ = eq;
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3403,6 +3559,17 @@
}
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ frame_->CallRuntime(Runtime::kStringAdd, 2);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
@@ -3476,7 +3643,6 @@
if (op == Token::NOT) {
LoadConditionAndSpill(node->expression(),
- NOT_INSIDE_TYPEOF,
false_target(),
true_target(),
true);
@@ -3490,9 +3656,7 @@
if (property != NULL) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (variable != NULL) {
Slot* slot = variable->slot();
@@ -3500,9 +3664,7 @@
LoadGlobal();
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
@@ -3514,9 +3676,7 @@
frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else {
// Default: Result of deleting non-global, not dynamically
@@ -3566,9 +3726,7 @@
smi_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
continue_label.Jump();
smi_label.Bind();
@@ -3590,9 +3748,7 @@
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
continue_label.Bind();
break;
}
@@ -3637,7 +3793,7 @@
ASSERT(frame_->height() == original_height + 1);
return;
}
- target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ target.GetValueAndSpill();
frame_->EmitPop(r0);
JumpTarget slow;
@@ -3677,9 +3833,7 @@
{
// Convert the operand to a number.
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
}
if (is_postfix) {
// Postfix: store to result (on the stack).
@@ -3731,7 +3885,6 @@
if (op == Token::AND) {
JumpTarget is_true;
LoadConditionAndSpill(node->left(),
- NOT_INSIDE_TYPEOF,
&is_true,
false_target(),
false);
@@ -3767,7 +3920,6 @@
}
is_true.Bind();
LoadConditionAndSpill(node->right(),
- NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -3779,7 +3931,6 @@
} else if (op == Token::OR) {
JumpTarget is_false;
LoadConditionAndSpill(node->left(),
- NOT_INSIDE_TYPEOF,
true_target(),
&is_false,
false);
@@ -3815,7 +3966,6 @@
}
is_false.Bind();
LoadConditionAndSpill(node->right(),
- NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -4000,28 +4150,35 @@
} else if (check->Equals(Heap::function_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE);
+ Register map_reg = r2;
+ __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
+ true_target()->Branch(eq);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
cc_reg_ = eq;
} else if (check->Equals(Heap::object_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r1, ip);
true_target()->Branch(eq);
+ Register map_reg = r2;
+ __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
+ false_target()->Branch(eq);
+
// It can be an undetectable object.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
__ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
__ cmp(r1, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(eq);
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
false_target()->Branch(lt);
- __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
cc_reg_ = le;
} else {
@@ -4062,9 +4219,7 @@
case Token::IN: {
LoadAndSpill(left);
LoadAndSpill(right);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
frame_->EmitPush(r0);
break;
}
@@ -4114,7 +4269,7 @@
}
-void Reference::GetValue(TypeofState typeof_state) {
+void Reference::GetValue() {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
@@ -4129,16 +4284,11 @@
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlot(slot, typeof_state);
+ cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof. If
- // there is a chance that reference errors can be thrown below, we
- // must distinguish between the two kinds of loads (typeof expression
- // loads must not throw a reference error).
VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from named Property");
Handle<String> name(GetName());
@@ -4157,9 +4307,6 @@
}
case KEYED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof.
-
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
VirtualFrame* frame = cgen_->frame();
@@ -4495,7 +4642,7 @@
// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
// the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test
@@ -4568,6 +4715,22 @@
if (cc != eq) {
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but (undefined <= undefined)
+ // == false! See ECMAScript 11.8.5.
+ if (cc == le || cc == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(r2));
+ __ b(ne, &return_equal);
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // undefined <= undefined should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // undefined >= undefined should fail.
+ }
+ __ mov(pc, Operand(lr)); // Return.
+ }
}
}
__ bind(&return_equal);
@@ -4645,9 +4808,17 @@
// Rhs is a smi, lhs is a number.
__ push(lr);
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
+ } else {
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
+
// r3 and r2 are rhs as double.
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
__ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
@@ -4675,9 +4846,16 @@
__ push(lr);
__ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
+ } else {
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
__ pop(lr);
// Fall through to both_loaded_as_doubles.
}
@@ -4880,9 +5058,23 @@
// fall through if neither is a NaN. Also binds rhs_not_nan.
EmitNanCheck(masm, &rhs_not_nan, cc_);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement double precision comparison.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
+
+ __ fcmp(d6, d7);
+ __ vmrs(pc);
+ __ mov(r0, Operand(0), LeaveCC, eq);
+ __ mov(r0, Operand(1), LeaveCC, lt);
+ __ mvn(r0, Operand(0), LeaveCC, gt);
+ __ mov(pc, Operand(lr));
+ } else {
+ // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
+ // answer. Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+ }
__ bind(¬_smis);
// At this point we know we are dealing with two different objects,
@@ -4935,7 +5127,6 @@
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ mov(r0, Operand(arg_count));
__ InvokeBuiltin(native, CALL_JS);
__ cmp(r0, Operand(0));
__ pop(pc);
@@ -4982,24 +5173,74 @@
// Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
- // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ jmp(&do_the_call); // Tail call. No return.
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
+
+ // Push arguments to the stack
__ push(r1);
__ push(r0);
- __ mov(r0, Operand(1)); // Set number of arguments.
+
+ if (Token::ADD == operation) {
+ // Test for string arguments before calling runtime.
+ // r1 : first argument
+ // r0 : second argument
+ // sp[0] : second argument
+ // sp[1] : first argument
+
+ Label not_strings, not_string1, string1;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, ¬_string1);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, ¬_string1);
+
+ // First argument is a a string, test second.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &string1);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &string1);
+
+ // First and second argument are strings.
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
+
+ // First argument was not a string, test second.
+ __ bind(¬_string1);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, ¬_strings);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, ¬_strings);
+
+ // Only second argument is a string.
+ __ b(¬_strings);
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
+
+ __ bind(¬_strings);
+ }
+
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
// We branch here if at least one of r0 and r1 is not a Smi.
@@ -5027,12 +5268,20 @@
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r6);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub3(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ bind(&finished_loading_r0);
// Move r1 to a double in r0-r1.
@@ -5052,12 +5301,19 @@
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r6);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ } else {
+ // Write Smi from r1 to r1 and r0 in double format.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub4(r1, r0, r7, r6);
+ __ push(lr);
+ __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ bind(&finished_loading_r1);
__ bind(&do_the_call);
@@ -5066,6 +5322,38 @@
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// r5: Address of heap number for result.
+
+ if (CpuFeatures::IsSupported(VFP3) &&
+ ((Token::MUL == operation) ||
+ (Token::DIV == operation) ||
+ (Token::ADD == operation) ||
+ (Token::SUB == operation))) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement
+ // double precision, add, subtract, multiply, divide.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
+
+ if (Token::MUL == operation) {
+ __ fmuld(d5, d6, d7);
+ } else if (Token::DIV == operation) {
+ __ fdivd(d5, d6, d7);
+ } else if (Token::ADD == operation) {
+ __ faddd(d5, d6, d7);
+ } else if (Token::SUB == operation) {
+ __ fsubd(d5, d6, d7);
+ } else {
+ UNREACHABLE();
+ }
+
+ __ fmrrd(r0, r1, d5);
+
+ __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
+ __ mov(r0, Operand(r5));
+ __ mov(pc, lr);
+ return;
+ }
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
__ AlignStack(0);
@@ -5134,38 +5422,49 @@
__ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- __ rsb(dest, dest, Operand(30));
-
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
+ // We now have the exponent in dest. Subtract from 30 to get
+ // how much to shift down.
+ __ rsb(dest, dest, Operand(30));
+ }
__ bind(&right_exponent);
- // Get the top bits of the mantissa.
- __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to take.
- // We just orred in the implicit bit so that took care of one and we want to
- // leave the sign bit 0 so we subtract 2 bits from the shift distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't actually
- // need this because the bits get shifted out again, but it's probably slower
- // to test than just to do it.
- __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- __ mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions implementing double precision to integer
+ // conversion using round to zero.
+ __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ __ fmdrr(d7, scratch2, scratch);
+ __ ftosid(s15, d7);
+ __ fmrs(dest, s15);
+ } else {
+ // Get the top bits of the mantissa.
+ __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+ // distance.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
+ // Put sign in zero flag.
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the last 10 bits.
+ __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+ // Move down according to the exponent.
+ __ mov(dest, Operand(scratch, LSR, dest));
+ // Fix sign if sign bit was set.
+ __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ }
__ bind(&done);
}
-
// For bitwise ops where the inputs are not both Smis we here try to determine
// whether both inputs are either Smis or at least heap numbers that can be
// represented by a 32 bit signed value. We truncate towards zero as required
@@ -5182,7 +5481,7 @@
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r1, r3, r4, r5, &slow);
+ GetInt32(masm, r1, r3, r5, r4, &slow);
__ jmp(&done_checking_r1);
__ bind(&r1_is_smi);
__ mov(r3, Operand(r1, ASR, 1));
@@ -5192,7 +5491,7 @@
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r0, r2, r4, r5, &slow);
+ GetInt32(masm, r0, r2, r5, r4, &slow);
__ jmp(&done_checking_r0);
__ bind(&r0_is_smi);
__ mov(r2, Operand(r0, ASR, 1));
@@ -5277,7 +5576,6 @@
__ bind(&slow);
__ push(r1); // restore stack
__ push(r0);
- __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
switch (op_) {
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@@ -5659,7 +5957,6 @@
// Enter runtime system.
__ bind(&slow);
__ push(r0);
- __ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
__ bind(¬_smi);
@@ -5797,7 +6094,7 @@
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -5857,7 +6154,7 @@
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(frame_type);
+ __ LeaveExitFrame(mode);
// check if we should retry or throw exception
Label retry;
@@ -5903,12 +6200,12 @@
// this by performing a garbage collection and retrying the
// builtin once.
- StackFrame::Type frame_type = is_debug_break
- ? StackFrame::EXIT_DEBUG
- : StackFrame::EXIT;
+ ExitFrame::Mode mode = is_debug_break
+ ? ExitFrame::MODE_DEBUG
+ : ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type);
+ __ EnterExitFrame(mode);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -5923,7 +6220,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
false,
false);
@@ -5932,7 +6229,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
false);
@@ -5943,7 +6240,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
true);
@@ -6135,7 +6432,6 @@
// Slow-case. Tail call builtin.
__ bind(&slow);
- __ mov(r0, Operand(1)); // Arg count without receiver.
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
}
@@ -6178,7 +6474,7 @@
__ b(eq, &adaptor);
// Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
+ // through register r0. Use unsigned comparison to get negative
// check for free.
__ cmp(r1, r0);
__ b(cs, &slow);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index e079950..ba7f936 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -77,12 +77,12 @@
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
- void GetValue(TypeofState typeof_state);
+ void GetValue();
// Generate code to push the value of a reference on top of the expression
// stack and then spill the stack frame. This function is used temporarily
// while the code generator is being transformed.
- inline void GetValueAndSpill(TypeofState typeof_state);
+ inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
@@ -112,10 +112,8 @@
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
- // state. The new state has its own typeof state and pair of branch
- // labels.
+ // state. The new state has its own pair of branch labels.
CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target);
@@ -123,13 +121,11 @@
// previous state.
~CodeGenState();
- TypeofState typeof_state() const { return typeof_state_; }
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
private:
CodeGenerator* owner_;
- TypeofState typeof_state_;
JumpTarget* true_target_;
JumpTarget* false_target_;
CodeGenState* previous_;
@@ -169,8 +165,8 @@
// Accessors
MacroAssembler* masm() { return masm_; }
-
VirtualFrame* frame() const { return frame_; }
+ Handle<Script> script() { return script_; }
bool has_valid_frame() const { return frame_ != NULL; }
@@ -191,10 +187,6 @@
static const int kUnknownIntValue = -1;
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceLength = 4;
-
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@@ -210,7 +202,6 @@
// State
bool has_cc() const { return cc_reg_ != al; }
- TypeofState typeof_state() const { return state_->typeof_state(); }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
@@ -259,25 +250,22 @@
}
void LoadCondition(Expression* x,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc);
- void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
- inline void LoadAndSpill(Expression* expression,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ inline void LoadAndSpill(Expression* expression);
// Call LoadCondition and then spill the virtual frame unless control flow
// cannot reach the end of the expression (ie, by emitting only
// unconditional jumps to the control targets).
inline void LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control);
@@ -331,7 +319,6 @@
InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -347,6 +334,8 @@
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsFunction(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -377,6 +366,9 @@
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
+ // Fast support for StringAdd.
+ void GenerateStringAdd(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -391,6 +383,7 @@
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
+ void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -433,6 +426,27 @@
};
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) {}
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#if defined(DEBUG)
+ void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
+#endif // defined(DEBUG)
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 964bfe1..89ff7c0 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -67,6 +67,26 @@
}
+// Support for VFP registers s0 to s31 (d0 to d15).
+// Note that "sN:sM" is the same as "dN/2"
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* VFPRegisters::names_[kNumVFPRegisters] = {
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
+};
+
+
+const char* VFPRegisters::Name(int reg) {
+ ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
+ return names_[reg];
+}
+
+
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 6bd0d00..9432207 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -43,24 +43,27 @@
# define USE_THUMB_INTERWORK 1
#endif
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
- defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_7A__) || \
+#if defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7__)
+#if defined(__ARM_ARCH_6__) || \
+ defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || \
+ defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6ZK__) || \
+ defined(__ARM_ARCH_6T2__) || \
+ defined(CAN_USE_ARMV7_INSTRUCTIONS)
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
+#if defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || \
+ defined(CAN_USE_ARMV6_INSTRUCTIONS)
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
// Simulator should support ARM5 instructions.
@@ -75,6 +78,9 @@
// Number of registers in normal ARM mode.
static const int kNumRegisters = 16;
+// VFP support.
+static const int kNumVFPRegisters = 48;
+
// PC is register 15.
static const int kPCRegister = 15;
static const int kNoRegister = -1;
@@ -231,6 +237,16 @@
inline int RnField() const { return Bits(19, 16); }
inline int RdField() const { return Bits(15, 12); }
+ // Support for VFP.
+ // Vn(19-16) | Vd(15-12) | Vm(3-0)
+ inline int VnField() const { return Bits(19, 16); }
+ inline int VmField() const { return Bits(3, 0); }
+ inline int VdField() const { return Bits(15, 12); }
+ inline int NField() const { return Bit(7); }
+ inline int MField() const { return Bit(5); }
+ inline int DField() const { return Bit(22); }
+ inline int RtField() const { return Bits(15, 12); }
+
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
return static_cast<Opcode>(Bits(24, 21));
@@ -307,7 +323,7 @@
struct RegisterAlias {
int reg;
- const char *name;
+ const char* name;
};
private:
@@ -315,6 +331,15 @@
static const RegisterAlias aliases_[];
};
+// Helper functions for converting between VFP register numbers and names.
+class VFPRegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ private:
+ static const char* names_[kNumVFPRegisters];
+};
} } // namespace assembler::arm
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index cafefce..a5a358b 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -33,12 +33,13 @@
#include "v8.h"
#include "cpu.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
void CPU::Setup() {
- // Nothing to do.
+ CpuFeatures::Probe();
}
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index ef33653..fc9808d 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -61,7 +61,7 @@
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
- CodeGenerator::kJSReturnSequenceLength);
+ Assembler::kJSReturnSequenceLength);
}
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 6431483..2f9e78f 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -97,6 +97,10 @@
// Printing of common values.
void PrintRegister(int reg);
+ void PrintSRegister(int reg);
+ void PrintDRegister(int reg);
+ int FormatVFPRegister(Instr* instr, const char* format);
+ int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
void PrintShiftImm(Instr* instr);
@@ -121,6 +125,10 @@
void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
+ // For VFP support.
+ void DecodeTypeVFP(Instr* instr);
+ void DecodeType6CoprocessorIns(Instr* instr);
+
const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_;
@@ -171,6 +179,16 @@
Print(converter_.NameOfCPURegister(reg));
}
+// Print the VFP S register name according to the active name converter.
+void Decoder::PrintSRegister(int reg) {
+ Print(assembler::arm::VFPRegisters::Name(reg));
+}
+
+// Print the VFP D register name according to the active name converter.
+void Decoder::PrintDRegister(int reg) {
+ Print(assembler::arm::VFPRegisters::Name(reg + 32));
+}
+
// These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
@@ -290,6 +308,10 @@
int reg = instr->RmField();
PrintRegister(reg);
return 2;
+ } else if (format[1] == 't') { // 'rt: Rt register
+ int reg = instr->RtField();
+ PrintRegister(reg);
+ return 2;
} else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions
ASSERT(STRING_STARTS_WITH(format, "rlist"));
@@ -315,6 +337,39 @@
}
+// Handle all VFP register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
+ ASSERT((format[0] == 'S') || (format[0] == 'D'));
+
+ if (format[1] == 'n') {
+ int reg = instr->VnField();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField()));
+ if (format[0] == 'D') PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == 'm') {
+ int reg = instr->VmField();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField()));
+ if (format[0] == 'D') PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') {
+ int reg = instr->VdField();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField()));
+ if (format[0] == 'D') PrintDRegister(reg);
+ return 2;
+ }
+
+ UNREACHABLE();
+ return -1;
+}
+
+
+int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
+ Print(format);
+ return 0;
+}
+
+
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
@@ -459,6 +514,13 @@
}
return 1;
}
+ case 'v': {
+ return FormatVFPinstruction(instr, format);
+ }
+ case 'S':
+ case 'D': {
+ return FormatVFPRegister(instr, format);
+ }
case 'w': { // 'w: W field of load and store instructions
if (instr->HasW()) {
Print("!");
@@ -761,8 +823,7 @@
void Decoder::DecodeType6(Instr* instr) {
- // Coprocessor instructions currently not supported.
- Unknown(instr);
+ DecodeType6CoprocessorIns(instr);
}
@@ -770,12 +831,10 @@
if (instr->Bit(24) == 1) {
Format(instr, "swi'cond 'swi");
} else {
- // Coprocessor instructions currently not supported.
- Unknown(instr);
+ DecodeTypeVFP(instr);
}
}
-
void Decoder::DecodeUnconditional(Instr* instr) {
if (instr->Bits(7, 4) == 0xB && instr->Bits(27, 25) == 0 && instr->HasL()) {
Format(instr, "'memop'h'pu 'rd, ");
@@ -837,6 +896,136 @@
}
+// void Decoder::DecodeTypeVFP(Instr* instr)
+// Implements the following VFP instructions:
+// fmsr: Sn = Rt
+// fmrs: Rt = Sn
+// fsitod: Dd = Sm
+// ftosid: Sd = Dm
+// Dd = faddd(Dn, Dm)
+// Dd = fsubd(Dn, Dm)
+// Dd = fmuld(Dn, Dm)
+// Dd = fdivd(Dn, Dm)
+// vcmp(Dd, Dm)
+// VMRS
+void Decoder::DecodeTypeVFP(Instr* instr) {
+ ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+
+ if (instr->Bit(23) == 1) {
+ if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x5) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
+ } else if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(7) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
+ } else if ((instr->Bit(21) == 0x0) &&
+ (instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
+ } else if ((instr->Bits(21, 20) == 0x3) &&
+ (instr->Bits(19, 16) == 0x4) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0x1) &&
+ (instr->Bit(4) == 0x0)) {
+ Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ } else if ((instr->Bits(23, 20) == 0xF) &&
+ (instr->Bits(19, 16) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(7, 5) == 0x0) &&
+ (instr->Bit(4) == 0x1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ if (instr->Bits(15, 12) == 0xF)
+ Format(instr, "vmrs'cond APSR, FPSCR");
+ else
+ Unknown(instr); // Not used by V8.
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
+ if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
+ } else if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else {
+ if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ Format(instr, "vmov'cond 'Sn, 'rt");
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ Format(instr, "vmov'cond 'rt, 'Sn");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ }
+}
+
+
+// Decode Type 6 coprocessor instructions.
+// Dm = fmdrr(Rt, Rt2)
+// <Rt, Rt2> = fmrrd(Dm)
+void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
+ ASSERT((instr->TypeField() == 6));
+
+ if (instr->Bit(23) == 1) {
+ Unknown(instr); // Not used by V8.
+ } else if (instr->Bit(22) == 1) {
+ if ((instr->Bits(27, 24) == 0xC) &&
+ (instr->Bit(22) == 1) &&
+ (instr->Bits(11, 8) == 0xB) &&
+ (instr->Bits(7, 6) == 0x0) &&
+ (instr->Bit(4) == 1)) {
+ if (instr->Bit(20) == 0) {
+ Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+ } else if (instr->Bit(20) == 1) {
+ Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
+ Unknown(instr); // Not used by V8.
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+}
+
+
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr);
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 97feae5..45cab55 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -28,6 +28,8 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
#include "fast-codegen.h"
#include "parser.h"
@@ -52,38 +54,94 @@
// frames-arm.h for its layout.
void FastCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
- // ARM does NOT call SetFunctionPosition.
+ SetFunctionPosition(fun);
+ int locals_count = fun->scope()->num_stack_slots();
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ }
// Adjust fp to point to caller's fp.
__ add(fp, sp, Operand(2 * kPointerSize));
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = fun->scope()->num_stack_slots();
- if (locals_count > 0) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- if (FLAG_check_stack) {
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- }
for (int i = 0; i < locals_count; i++) {
__ push(ip);
}
}
- if (FLAG_check_stack) {
- // Put the lr setup instruction in the delay slot. The kInstrSize is
- // added to the implicit 8 byte offset that always applies to operations
- // with pc and gives a return address 12 bytes down.
- Comment cmnt(masm_, "[ Stack check");
- __ add(lr, pc, Operand(Assembler::kInstrSize));
- __ cmp(sp, Operand(r2));
- StackCheckStub stub;
- __ mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context
+ __ str(r0, MemOperand(cp, Context::SlotOffset(slot->index())));
+ }
+ }
+ }
+
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(r3, r1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments->slot(), r0, r1, r2);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, r3, r1, r2);
+ }
+
+ // Check the stack for overflow or break request.
+ // Put the lr setup instruction in the delay slot. The kInstrSize is
+ // added to the implicit 8 byte offset that always applies to operations
+ // with pc and gives a return address 12 bytes down.
+ { Comment cmnt(masm_, "[ Stack check");
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ __ add(lr, pc, Operand(Assembler::kInstrSize));
+ __ cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ __ mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -95,14 +153,26 @@
}
{ Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
}
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the
// body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- SetReturnPosition(fun);
+ }
+ EmitReturnSequence(function_->end_position());
+}
+
+
+void FastCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
@@ -110,12 +180,332 @@
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+
+ // Calculate the exact length of the return sequence and make sure that
+ // the constant pool is not emitted inside of the return sequence.
+ int num_parameters = function_->scope()->num_parameters();
+ int32_t sp_delta = (num_parameters + 1) * kPointerSize;
+ int return_sequence_length = Assembler::kJSReturnSequenceLength;
+ if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
+ // Additional mov instruction generated.
+ return_sequence_length++;
+ }
+ masm_->BlockConstPoolFor(return_sequence_length);
+
+ CodeGenerator::RecordPositions(masm_, position);
__ RecordJSReturn();
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
- int num_parameters = function_->scope()->num_parameters();
- __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
+ __ add(sp, sp, Operand(sp_delta));
__ Jump(lr);
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger. The add instruction above is an addressing
+ // mode 1 instruction where there are restrictions on which immediate values
+ // can be encoded in the instruction and which immediate values requires
+ // use of an additional instruction for moving the immediate to a temporary
+ // register.
+ ASSERT_EQ(return_sequence_length,
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Register source) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(source);
+ break;
+ case Expression::kTest:
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ }
+ }
+}
+
+
+template <>
+MemOperand FastCodeGenerator::CreateSlotOperand<MemOperand>(
+ Slot* source,
+ Register scratch) {
+ switch (source->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return MemOperand(fp, SlotOffset(source));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(source->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, source->index());
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ // Fall-through.
+ default:
+ UNREACHABLE();
+ return MemOperand(r0, 0); // Dead code to make the compiler happy.
+ }
+}
+
+
+void FastCodeGenerator::Move(Register dst, Slot* source) {
+ // Use dst as scratch.
+ MemOperand location = CreateSlotOperand<MemOperand>(source, dst);
+ __ ldr(dst, location);
+}
+
+
+
+void FastCodeGenerator::Move(Expression::Context context,
+ Slot* source,
+ Register scratch) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ Move(scratch, source);
+ Move(context, scratch);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ __ mov(ip, Operand(expr->handle()));
+ Move(context, ip);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ switch (dst->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ __ str(src, MemOperand(fp, SlotOffset(dst)));
+ break;
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(dst->var()->scope());
+ __ LoadContext(scratch1, context_chain_length);
+ int index = Context::SlotOffset(dst->index());
+ __ mov(scratch2, Operand(index));
+ __ str(src, MemOperand(scratch1, index));
+ __ RecordWrite(scratch1, scratch2, src);
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+
+void FastCodeGenerator::DropAndMove(Expression::Context context,
+ Register source,
+ int drop_count) {
+ ASSERT(drop_count > 0);
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ __ add(sp, sp, Operand(drop_count * kPointerSize));
+ break;
+ case Expression::kValue:
+ if (drop_count > 1) {
+ __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
+ }
+ __ str(source, MemOperand(sp));
+ break;
+ case Expression::kTest:
+ ASSERT(!source.is(sp));
+ __ add(sp, sp, Operand(drop_count * kPointerSize));
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (drop_count > 1) {
+ __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
+ }
+ __ str(source, MemOperand(sp));
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (drop_count > 1) {
+ __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
+ }
+ __ str(source, MemOperand(sp));
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+void FastCodeGenerator::TestAndBranch(Register source,
+ Label* true_label,
+ Label* false_label) {
+ ASSERT_NE(NULL, true_label);
+ ASSERT_NE(NULL, false_label);
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ push(source);
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, true_label);
+ __ jmp(false_label);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER: // Fall through.
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(ip);
+ __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ ldr(r1,
+ CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ cmp(r1, cp);
+ __ Check(eq, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
+ // No write barrier since the_hole_value is in old space.
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(r0);
+ __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r2, Operand(offset));
+ // We know that we have written a function, which is not a smi.
+ __ RecordWrite(cp, r2, r0);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ mov(r2, Operand(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ mov(r1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ } else if (decl->fun() != NULL) {
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+ Visit(decl->fun()); // Initial value for function decl.
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(prop->key());
+
+ if (decl->fun() != NULL) {
+ ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ Visit(decl->fun());
+ __ pop(r0);
+ } else {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Value in r0 is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ }
}
}
@@ -131,50 +521,18 @@
}
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- SetStatementPosition(stmt);
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- Visit(stmt->expression());
-}
-
-
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
Expression* expr = stmt->expression();
- Visit(expr);
-
- // Complete the statement based on the location of the subexpression.
- Location source = expr->location();
- ASSERT(!source.is_nowhere());
- if (source.is_temporary()) {
- __ pop(r0);
- } else {
- ASSERT(source.is_constant());
- ASSERT(expr->AsLiteral() != NULL);
+ // Complete the statement based on the type of the subexpression.
+ if (expr->AsLiteral() != NULL) {
__ mov(r0, Operand(expr->AsLiteral()->handle()));
+ } else {
+ ASSERT_EQ(Expression::kValue, expr->context());
+ Visit(expr);
+ __ pop(r0);
}
-
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-
- __ RecordJSReturn();
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int num_parameters = function_->scope()->num_parameters();
- __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
- __ Jump(lr);
+ EmitReturnSequence(stmt->statement_pos());
}
@@ -182,7 +540,8 @@
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -191,12 +550,7 @@
__ mov(r0, Operand(boilerplate));
__ stm(db_w, sp, cp.bit() | r0.bit());
__ CallRuntime(Runtime::kNewClosure, 2);
-
- if (expr->location().is_temporary()) {
- __ push(r0);
- } else {
- ASSERT(expr->location().is_nowhere());
- }
+ Move(expr->context(), r0);
}
@@ -204,6 +558,7 @@
Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
@@ -212,30 +567,69 @@
__ mov(r2, Operand(expr->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- if (expr->location().is_temporary()) {
- // Replace the global object with the result.
- __ str(r0, MemOperand(sp));
- } else {
- ASSERT(expr->location().is_nowhere());
- __ pop();
- }
-
- } else {
- Comment cmnt(masm_, "Stack slot");
+ DropAndMove(expr->context(), r0);
+ } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
- ASSERT(slot != NULL);
- if (expr->location().is_temporary()) {
- __ ldr(ip, MemOperand(fp, SlotOffset(slot)));
- __ push(ip);
- } else {
- ASSERT(expr->location().is_nowhere());
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ Move(expr->context(), slot, r0);
+ } else {
+ // A variable has been rewritten into an explicit access to
+ // an object property.
+ Property* property = rewrite->AsProperty();
+ ASSERT_NOT_NULL(property);
+
+ // Currently the only parameter expressions that can occur are
+ // on the form "slot[literal]".
+
+ // Check that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ Move(r2, object_slot);
+
+ // Check that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ __ mov(r1, Operand(key_literal->handle()));
+
+ // Push both as arguments to ic.
+ __ stm(db_w, sp, r2.bit() | r1.bit());
+
+ // Do a KEYED property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndMove(expr->context(), r0, 2);
}
}
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExp Literal");
+ Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
// r4 = JS function, literals array
@@ -257,10 +651,132 @@
__ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ bind(&done);
- if (expr->location().is_temporary()) {
- __ push(r0);
+ Move(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ Label boilerplate_exists;
+ __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // r2 = literal array (0).
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ ldr(r0, FieldMemOperand(r2, literal_offset));
+ // Check whether we need to materialize the object literal boilerplate.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(ip));
+ __ b(ne, &boilerplate_exists);
+ // Create boilerplate if it does not exist.
+ // r1 = literal index (1).
+ __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
+ // r0 = constant properties (2).
+ __ mov(r0, Operand(expr->constant_properties()));
+ __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&boilerplate_exists);
+ // r0 contains boilerplate.
+ // Clone boilerplate.
+ __ push(r0);
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ }
+
+ // If result_saved == true: The result is saved on top of the
+ // stack and in r0.
+ // If result_saved == false: The result not on the stack, just in r0.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(r0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: // Fall through.
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ pop(r0);
+ __ mov(r2, Operand(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ __ ldr(r0, MemOperand(sp)); // Restore result into r0.
+ break;
+ }
+ // Fall through.
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(r0);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ __ ldr(r0, MemOperand(sp)); // Restore result into r0.
+ break;
+
+ case ObjectLiteral::Property::GETTER: // Fall through.
+ case ObjectLiteral::Property::SETTER:
+ __ push(r0);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ __ push(r1);
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ ldr(r0, MemOperand(sp)); // Restore result into r0
+ break;
+ }
+ }
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ pop();
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(r0);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
}
}
@@ -314,7 +830,7 @@
result_saved = true;
}
Visit(subexpr);
- ASSERT(subexpr->location().is_temporary());
+ ASSERT_EQ(Expression::kValue, subexpr->context());
// Store the subexpression value in the array's elements.
__ pop(r0); // Subexpression value.
@@ -329,211 +845,864 @@
__ RecordWrite(r1, r2, r0);
}
- Location destination = expr->location();
- if (destination.is_nowhere() && result_saved) {
- __ pop();
- } else if (destination.is_temporary() && !result_saved) {
- __ push(r0);
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ pop();
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(r0);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
}
}
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
- Expression* rhs = expr->value();
- Visit(rhs);
-
- // Left-hand side can only be a global or a (parameter or local) slot.
+void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
-
- // Complete the assignment based on the location of the right-hand-side
- // value and the desired location of the assignment value.
- Location destination = expr->location();
- Location source = rhs->location();
- ASSERT(!destination.is_constant());
- ASSERT(!source.is_nowhere());
-
if (var->is_global()) {
- // Assignment to a global variable, use inline caching. Right-hand-side
- // value is passed in r0, variable name in r2, and the global object on
- // the stack.
- if (source.is_temporary()) {
- __ pop(r0);
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- __ mov(r0, Operand(rhs->AsLiteral()->handle()));
- }
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in r0, variable name in
+ // r2, and the global object on the stack.
+ __ pop(r0);
__ mov(r2, Operand(var->name()));
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
- if (destination.is_temporary()) {
- __ str(r0, MemOperand(sp));
- } else {
- ASSERT(destination.is_nowhere());
- __ pop();
- }
+ DropAndMove(expr->context(), r0);
- } else {
- if (source.is_temporary()) {
- if (destination.is_temporary()) {
- // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side
- // temporary on the stack.
- __ ldr(ip, MemOperand(sp));
- } else {
- ASSERT(destination.is_nowhere());
- // Case 'var = temp'. Discard right-hand-side temporary.
- __ pop(ip);
+ } else if (var->slot()) {
+ Slot* slot = var->slot();
+ ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled.
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Perform assignment and discard value.
+ __ pop(r0);
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ break;
+ case Expression::kValue:
+ // Perform assignment and preserve value.
+ __ ldr(r0, MemOperand(sp));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ break;
+ case Expression::kTest:
+ // Perform assignment and test (and discard) value.
+ __ pop(r0);
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
+ }
+ break;
}
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
- // discarded result. Always perform the assignment.
- __ mov(ip, Operand(rhs->AsLiteral()->handle()));
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
- if (destination.is_temporary()) {
- // Case 'temp <- (var = constant)'. Save result.
- __ push(ip);
+
+ case Slot::CONTEXT: {
+ int chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ if (chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ __ ldr(r0, CodeGenerator::ContextOperand(cp, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
+ for (int i = 1; i < chain_length; i++) {
+ __ ldr(r0,
+ CodeGenerator::ContextOperand(r0, Context::CLOSURE_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
+ }
+ } else { // Slot is in the current context. Generate optimized code.
+ __ mov(r0, cp);
+ }
+ // The context may be an intermediate context, not a function context.
+ __ ldr(r0, CodeGenerator::ContextOperand(r0, Context::FCONTEXT_INDEX));
+ __ pop(r1);
+ __ str(r1, CodeGenerator::ContextOperand(r0, slot->index()));
+
+ // RecordWrite may destroy all its register arguments.
+ if (expr->context() == Expression::kValue) {
+ __ push(r1);
+ } else if (expr->context() != Expression::kEffect) {
+ __ mov(r3, r1);
+ }
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+
+ // Update the write barrier for the array store with r0 as the scratch
+ // register. Skip the write barrier if the value written (r1) is a smi.
+ // The smi test is part of RecordWrite on other platforms, not on arm.
+ Label exit;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+
+ __ mov(r2, Operand(offset));
+ __ RecordWrite(r0, r2, r1);
+ __ bind(&exit);
+ if (expr->context() != Expression::kEffect &&
+ expr->context() != Expression::kValue) {
+ Move(expr->context(), r3);
+ }
+ break;
}
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
}
}
}
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(r0);
+ __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ DropAndMove(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ // Reciever is under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(r0);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ // Reciever is under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ // Receiver and key are still on stack.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ Move(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+ uint32_t dummy;
+
+ // Record the source position for the property load.
+ SetSourcePosition(expr->position());
+
+ // Evaluate receiver.
+ Visit(expr->obj());
+
+ if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+ !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+ // Do a NAMED property load.
+ // The IC expects the property name in r2 and the receiver on the stack.
+ __ mov(r2, Operand(key->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ } else {
+ // Do a KEYED property load.
+ Visit(expr->key());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Drop key and receiver left on the stack by IC.
+ __ pop();
+ }
+ DropAndMove(expr->context(), r0);
+}
+
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ Call(ic, reloc_info);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), r0);
+}
+
+
void FastCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
Variable* var = fun->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL && !var->is_this() && var->is_global());
- ASSERT(!var->is_possibly_eval());
- __ mov(r1, Operand(var->name()));
- // Push global object as receiver.
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ mov(r1, Operand(var->name()));
+ // Push global object as receiver for the call IC lookup.
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ mov(r0, Operand(key->handle()));
+ __ push(r0);
+ Visit(prop->obj());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ Visit(prop->obj());
+ Visit(prop->key());
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Load receiver object into r1.
+ if (prop->is_synthetic()) {
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ } else {
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ }
+ // Overwrite (object, key) with (function, receiver).
+ __ str(r0, MemOperand(sp, kPointerSize));
+ __ str(r1, MemOperand(sp));
+ EmitCallWithStub(expr);
+ }
+ } else {
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_fast_codegen(true);
+ }
+ Visit(fun);
+ // Load global receiver object.
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ push(r1);
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ // Push global object (receiver).
__ ldr(r0, CodeGenerator::GlobalObject());
- __ stm(db_w, sp, r1.bit() | r0.bit());
+ __ push(r0);
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ mov(r0, Operand(args->at(i)->AsLiteral()->handle()));
- __ push(r0);
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ // If location is value, it is already on the stack,
+ // so nothing to do here.
}
- // Record source position for debugger
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- if (expr->location().is_temporary()) {
- __ str(r0, MemOperand(sp));
- } else {
- ASSERT(expr->location().is_nowhere());
- __ pop();
- }
+
+ // Load function, arg_count into r1 and r0.
+ __ mov(r0, Operand(arg_count));
+ // Function is in esp[arg_count + 1].
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in r0, or pop it.
+ DropAndMove(expr->context(), r0);
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
- Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ mov(r1, Operand(expr->name()));
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ }
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ mov(r0, Operand(args->at(i)->AsLiteral()->handle()));
- __ push(r0);
- } else {
- ASSERT(args->at(i)->location().is_temporary());
- // If location is temporary, it is already on the stack,
- // so nothing to do here.
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- __ CallRuntime(function, arg_count);
- if (expr->location().is_temporary()) {
- __ push(r0);
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), r0);
} else {
- ASSERT(expr->location().is_nowhere());
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ Move(expr->context(), r0);
+ }
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kEffect, expr->expression()->context());
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ push(ip);
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ push(ip);
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ ASSERT_EQ(Expression::kTest, expr->expression()->context());
+
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ Visit(expr->expression());
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ true_label_ = saved_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = saved_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ true_label_ = &push_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ push(r0);
+ __ mov(r2, Operand(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ str(r0, MemOperand(sp));
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ mov(r0, Operand(proxy->name()));
+ __ stm(db_w, sp, cp.bit() | r0.bit());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(r0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ Visit(expr->expression());
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Move(expr->context(), r0);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+
+ Visit(proxy);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kValue: // Fall through
+ case Expression::kTest: // Fall through
+ case Expression::kTestValue: // Fall through
+ case Expression::kValueTest:
+ // Duplicate the result on the stack.
+ __ push(r0);
+ break;
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ }
+ // Call runtime for +1/-1.
+ __ push(r0);
+ __ mov(ip, Operand(Smi::FromInt(1)));
+ __ push(ip);
+ if (expr->op() == Token::INC) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ // Call Store IC.
+ __ mov(r2, Operand(proxy->AsVariable()->name()));
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Restore up stack after store IC.
+ __ add(sp, sp, Operand(kPointerSize));
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through
+ case Expression::kValue:
+ // Do nothing. Result in either on the stack for value context
+ // or discarded for effect context.
+ break;
+ case Expression::kTest:
+ __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ b(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ b(true_label_);
+ break;
+ }
}
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- // Compile a short-circuited boolean or operation in a non-test
- // context.
- ASSERT(expr->op() == Token::OR);
- // Compile (e0 || e1) as if it were
- // (let (temp = e0) temp ? temp : e1).
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ ASSERT_EQ(Expression::kEffect, expr->left()->context());
+ ASSERT_EQ(expr->context(), expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+ break;
- Label done;
- Location destination = expr->location();
- ASSERT(!destination.is_constant());
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
- Expression* left = expr->left();
- Location left_source = left->location();
- ASSERT(!left_source.is_nowhere());
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
- Expression* right = expr->right();
- Location right_source = right->location();
- ASSERT(!right_source.is_nowhere());
+ Visit(expr->left());
+ Visit(expr->right());
+ __ pop(r0);
+ __ pop(r1);
+ GenericBinaryOpStub stub(expr->op(),
+ NO_OVERWRITE);
+ __ CallStub(&stub);
+ Move(expr->context(), r0);
- Visit(left);
- // Call the runtime to find the boolean value of the left-hand
- // subexpression. Duplicate the value if it may be needed as the final
- // result.
- if (left_source.is_temporary()) {
- if (destination.is_temporary()) {
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
+ break;
}
- } else {
- ASSERT(left->AsLiteral() != NULL);
- __ mov(r0, Operand(left->AsLiteral()->handle()));
- __ push(r0);
- if (destination.is_temporary()) __ push(r0);
+ default:
+ UNREACHABLE();
}
- // The left-hand value is in on top of the stack. It is duplicated on the
- // stack iff the destination location is temporary.
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, &done);
-
- // Discard the left-hand value if present on the stack.
- if (destination.is_temporary()) __ pop();
- Visit(right);
-
- // Save or discard the right-hand value as needed.
- if (destination.is_temporary() && right_source.is_constant()) {
- ASSERT(right->AsLiteral() != NULL);
- __ mov(ip, Operand(right->AsLiteral()->handle()));
- __ push(ip);
- } else if (destination.is_nowhere() && right_source.is_temporary()) {
- __ pop();
- }
-
- __ bind(&done);
}
+
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+
+ // Convert current context to test context: Pre-test code.
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_true;
+ false_label_ = &push_false;
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = &push_true;
+ break;
+
+ case Expression::kTestValue:
+ false_label_ = &push_false;
+ break;
+ }
+ // Convert current context to test context: End pre-test code.
+
+ switch (expr->op()) {
+ case Token::IN: {
+ __ InvokeBuiltin(Builtins::IN, CALL_JS);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, true_label_);
+ __ jmp(false_label_);
+ break;
+ }
+
+ case Token::INSTANCEOF: {
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ tst(r0, r0);
+ __ b(eq, true_label_); // The stub returns 0 for true.
+ __ jmp(false_label_);
+ break;
+ }
+
+ default: {
+ Condition cc = eq;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = eq;
+ __ pop(r0);
+ __ pop(r1);
+ break;
+ case Token::LT:
+ cc = lt;
+ __ pop(r0);
+ __ pop(r1);
+ break;
+ case Token::GT:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = lt;
+ __ pop(r1);
+ __ pop(r0);
+ break;
+ case Token::LTE:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = ge;
+ __ pop(r1);
+ __ pop(r0);
+ break;
+ case Token::GTE:
+ cc = ge;
+ __ pop(r0);
+ __ pop(r1);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ orr(r2, r0, Operand(r1));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow_case);
+ __ cmp(r1, r0);
+ __ b(cc, true_label_);
+ __ jmp(false_label_);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ tst(r0, r0);
+ __ b(cc, true_label_);
+ __ jmp(false_label_);
+ }
+ }
+
+ // Convert current context to test context: Post-test code.
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ // Convert current context to test context: End post-test code.
+}
+
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ Move(expr->context(), r0);
+}
+
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 6fde4b7..b0fa13a 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -54,23 +54,24 @@
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPDisplacement;
- Type type;
- if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
- type = EXIT_DEBUG;
+ const int offset = ExitFrameConstants::kCodeOffset;
+ Object* code = Memory::Object_at(fp + offset);
+ bool is_debug_exit = code->IsSmi();
+ if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize;
- } else {
- type = EXIT;
}
// Fill in the state.
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- return type;
+ return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Do nothing
+ v->VisitPointer(&code_slot());
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
}
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 0874c09..4924c1a 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -100,7 +100,7 @@
static const int kSPDisplacement = -1 * kPointerSize;
// The debug marker is just above the frame pointer.
- static const int kDebugMarkOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index ba83645..c56f414 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -107,12 +107,17 @@
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
- __ mov(t1, Operand(t1, LSR, String::kHashShift));
+ __ ldr(t1, FieldMemOperand(r2, String::kHashFieldOffset));
if (i > 0) {
- __ add(t1, t1, Operand(StringDictionary::GetProbeOffset(i)));
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ add(t1, t1, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
- __ and_(t1, t1, Operand(r3));
+ __ and_(t1, r3, Operand(t1, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 45c6540..aa6570c 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -155,6 +155,15 @@
}
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ cmp(sp, Operand(ip));
+ b(lo, on_stack_overflow);
+}
+
+
+
+
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
@@ -274,9 +283,7 @@
}
-void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
- ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// Compute the argv pointer and keep it in a callee-saved register.
// r0 is argc.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -298,8 +305,11 @@
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
mov(fp, Operand(sp)); // setup new frame pointer
- // Push debug marker.
- mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+ if (mode == ExitFrame::MODE_DEBUG) {
+ mov(ip, Operand(Smi::FromInt(0)));
+ } else {
+ mov(ip, Operand(CodeObject()));
+ }
push(ip);
// Save the frame pointer and the context in top.
@@ -316,7 +326,7 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// Use sp as base to push.
CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
}
@@ -348,14 +358,14 @@
}
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// This code intentionally clobbers r2 and r3.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
add(r3, fp, Operand(kOffset));
CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
}
@@ -784,15 +794,13 @@
mov(scratch1, Operand(new_space_allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
ldr(result, MemOperand(scratch1));
- } else {
-#ifdef DEBUG
+ } else if (FLAG_debug_code) {
// Assert that result actually contains top on entry. scratch2 is used
// immediately below so this use of scratch2 does not cause difference with
// respect to register content between debug and release mode.
ldr(scratch2, MemOperand(scratch1));
cmp(result, scratch2);
Check(eq, "Unexpected allocation top");
-#endif
}
// Calculate new top and bail out if new space is exhausted. Use result
@@ -805,7 +813,11 @@
cmp(result, Operand(scratch2));
b(hi, gc_required);
- // Update allocation top. result temporarily holds the new top,
+ // Update allocation top. result temporarily holds the new top.
+ if (FLAG_debug_code) {
+ tst(result, Operand(kObjectAlignmentMask));
+ Check(eq, "Unaligned allocation in new space");
+ }
str(result, MemOperand(scratch1));
// Tag and adjust back to start of new object.
@@ -834,15 +846,13 @@
mov(scratch1, Operand(new_space_allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
ldr(result, MemOperand(scratch1));
- } else {
-#ifdef DEBUG
+ } else if (FLAG_debug_code) {
// Assert that result actually contains top on entry. scratch2 is used
// immediately below so this use of scratch2 does not cause difference with
// respect to register content between debug and release mode.
ldr(scratch2, MemOperand(scratch1));
cmp(result, scratch2);
Check(eq, "Unexpected allocation top");
-#endif
}
// Calculate new top and bail out if new space is exhausted. Use result
@@ -856,7 +866,11 @@
cmp(result, Operand(scratch2));
b(hi, gc_required);
- // Update allocation top. result temporarily holds the new top,
+ // Update allocation top. result temporarily holds the new top.
+ if (FLAG_debug_code) {
+ tst(result, Operand(kObjectAlignmentMask));
+ Check(eq, "Unaligned allocation in new space");
+ }
str(result, MemOperand(scratch1));
// Adjust back to start of new object.
@@ -975,6 +989,17 @@
}
+void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
+ Register outHighReg,
+ Register outLowReg) {
+ // ARMv7 VFP3 instructions to implement integer to double conversion.
+ mov(r7, Operand(inReg, ASR, kSmiTagSize));
+ fmsr(s15, r7);
+ fsitod(d7, s15);
+ fmrrd(outLowReg, outHighReg, d7);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
@@ -1141,6 +1166,9 @@
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ set_allow_stub_calls(true);
+
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
@@ -1150,6 +1178,26 @@
}
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ for (int i = 1; i < context_chain_length; i++) {
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ }
+ // The context may be an intermediate context, not a function context.
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else { // Slot is in the current function context.
+ // The context may be an intermediate context, not a function context.
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ }
+}
+
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index e37bb5e..0974329 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -79,6 +79,11 @@
void RecordWrite(Register object, Register offset, Register scratch);
// ---------------------------------------------------------------------------
+ // Stack limit support
+
+ void StackLimitCheck(Label* on_stack_limit_hit);
+
+ // ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -87,18 +92,20 @@
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register r0 and
+ // Enter specific kind of exit frame; either normal or debug mode.
+ // Expects the number of arguments in register r0 and
// the builtin function to call in register r1. Exits with argc in
// r4, argv in r6, and and the builtin function to call in r5.
- void EnterExitFrame(StackFrame::Type type);
+ void EnterExitFrame(ExitFrame::Mode mode);
// Leave the current exit frame. Expects the return value in r0.
- void LeaveExitFrame(StackFrame::Type type);
+ void LeaveExitFrame(ExitFrame::Mode mode);
// Align the stack by optionally pushing a Smi zero.
void AlignStack(int offset);
+ void LoadContext(Register dst, int context_chain_length);
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -240,6 +247,11 @@
// occurred.
void IllegalOperation(int num_arguments);
+ // Uses VFP instructions to Convert a Smi to a double.
+ void IntegerToDoubleConversionWithVFP3(Register inReg,
+ Register outHighReg,
+ Register outLowReg);
+
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 2e75a61..24b6a9c 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -29,6 +29,7 @@
#include "unicode.h"
#include "log.h"
#include "ast.h"
+#include "code-stubs.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
@@ -587,9 +588,9 @@
Label stack_limit_hit;
Label stack_ok;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ mov(r0, Operand(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC);
// Handle it if the stack pointer is already below the stack limit.
@@ -1089,9 +1090,9 @@
void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption.
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ mov(r0, Operand(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(sp, r0);
SafeCall(&check_preempt_label_, ls);
@@ -1099,14 +1100,12 @@
void RegExpMacroAssemblerARM::CheckStackLimit() {
- if (FLAG_check_stack) {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(backtrack_stackpointer(), Operand(r0));
- SafeCall(&stack_overflow_label_, ls);
- }
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ mov(r0, Operand(stack_limit));
+ __ ldr(r0, MemOperand(r0));
+ __ cmp(backtrack_stackpointer(), Operand(r0));
+ SafeCall(&stack_overflow_label_, ls);
}
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 0711ac1..f70bc05 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -260,6 +260,21 @@
};
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM.
+class RegExpCEntryStub: public CodeStub {
+ public:
+ RegExpCEntryStub() {}
+ virtual ~RegExpCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return RegExpCEntry; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "RegExpCEntryStub"; }
+};
+
#endif // V8_NATIVE_REGEXP
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 22bec82..9dc417b 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -342,6 +342,11 @@
PrintF("Z flag: %d; ", sim_->z_flag_);
PrintF("C flag: %d; ", sim_->c_flag_);
PrintF("V flag: %d\n", sim_->v_flag_);
+ PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
+ PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
+ PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
+ PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
+ PrintF("INEXACT flag: %d; ", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "unstop") == 0) {
intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize;
Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
@@ -429,6 +434,24 @@
c_flag_ = false;
v_flag_ = false;
+ // Initializing VFP registers.
+ // All registers are initialized to zero to start with
+ // even though s_registers_ & d_registers_ share the same
+ // physical registers in the target.
+ for (int i = 0; i < num_s_registers; i++) {
+ vfp_register[i] = 0;
+ }
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+
+ inv_op_vfp_flag_ = false;
+ div_zero_vfp_flag_ = false;
+ overflow_vfp_flag_ = false;
+ underflow_vfp_flag_ = false;
+ inexact_vfp_flag_ = false;
+
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
// some buffer below.
@@ -545,6 +568,99 @@
}
+// Getting from and setting into VFP registers.
+void Simulator::set_s_register(int sreg, unsigned int value) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ vfp_register[sreg] = value;
+}
+
+
+unsigned int Simulator::get_s_register(int sreg) const {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ return vfp_register[sreg];
+}
+
+
+void Simulator::set_s_register_from_float(int sreg, const float flt) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ // Read the bits from the single precision floating point value
+ // into the unsigned integer element of vfp_register[] given by index=sreg.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &flt, sizeof(vfp_register[0]));
+ memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
+}
+
+
+void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ // Read the bits from the integer value into the unsigned integer element of
+ // vfp_register[] given by index=sreg.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &sint, sizeof(vfp_register[0]));
+ memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
+}
+
+
+void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
+ ASSERT((dreg >= 0) && (dreg < num_d_registers));
+ // Read the bits from the double precision floating point value into the two
+ // consecutive unsigned integer elements of vfp_register[] given by index
+ // 2*sreg and 2*sreg+1.
+ char buffer[2 * sizeof(vfp_register[0])];
+ memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
+#else
+ memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
+ memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
+#endif
+}
+
+
+float Simulator::get_float_from_s_register(int sreg) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+
+ float sm_val = 0.0;
+ // Read the bits from the unsigned integer vfp_register[] array
+ // into the single precision floating point value and return it.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
+ memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
+ return(sm_val);
+}
+
+
+int Simulator::get_sinteger_from_s_register(int sreg) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+
+ int sm_val = 0;
+ // Read the bits from the unsigned integer vfp_register[] array
+ // into the single precision floating point value and return it.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
+ memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
+ return(sm_val);
+}
+
+
+double Simulator::get_double_from_d_register(int dreg) {
+ ASSERT((dreg >= 0) && (dreg < num_d_registers));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer vfp_register[] array
+ // into the double precision floating point value and return it.
+ char buffer[2 * sizeof(vfp_register[0])];
+#ifdef BIG_ENDIAN_FLOATING_POINT
+ memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
+ memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
+#else
+ memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
+#endif
+ memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
+ return(dm_val);
+}
+
+
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void Simulator::GetFpArgs(double* x, double* y) {
@@ -772,6 +888,37 @@
}
+// Support for VFP comparisons.
+void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
+ // All non-NaN cases.
+ if (val1 == val2) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = true;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ } else if (val1 < val2) {
+ n_flag_FPSCR_ = true;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+ } else {
+ // Case when (val1 > val2).
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ }
+}
+
+
+void Simulator::Copy_FPSCR_to_APSR() {
+ n_flag_ = n_flag_FPSCR_;
+ z_flag_ = z_flag_FPSCR_;
+ c_flag_ = c_flag_FPSCR_;
+ v_flag_ = v_flag_FPSCR_;
+}
+
+
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with register.
int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
@@ -1154,7 +1301,7 @@
}
}
} else {
- UNIMPLEMENTED(); // not used by V8
+ UNIMPLEMENTED(); // Not used by V8.
}
} else {
// extra load/store instructions
@@ -1664,16 +1811,15 @@
void Simulator::DecodeType6(Instr* instr) {
- UNIMPLEMENTED();
+ DecodeType6CoprocessorIns(instr);
}
void Simulator::DecodeType7(Instr* instr) {
if (instr->Bit(24) == 1) {
- // Format(instr, "swi 'swi");
SoftwareInterrupt(instr);
} else {
- UNIMPLEMENTED();
+ DecodeTypeVFP(instr);
}
}
@@ -1745,6 +1891,177 @@
}
+// void Simulator::DecodeTypeVFP(Instr* instr)
+// The Following ARMv7 VFPv instructions are currently supported.
+// fmsr :Sn = Rt
+// fmrs :Rt = Sn
+// fsitod: Dd = Sm
+// ftosid: Sd = Dm
+// Dd = faddd(Dn, Dm)
+// Dd = fsubd(Dn, Dm)
+// Dd = fmuld(Dn, Dm)
+// Dd = fdivd(Dn, Dm)
+// vcmp(Dd, Dm)
+// VMRS
+void Simulator::DecodeTypeVFP(Instr* instr) {
+ ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+
+ int rt = instr->RtField();
+ int vm = instr->VmField();
+ int vn = instr->VnField();
+ int vd = instr->VdField();
+
+ if (instr->Bit(23) == 1) {
+ if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x5) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ double dm_val = get_double_from_d_register(vm);
+ int32_t int_value = static_cast<int32_t>(dm_val);
+ set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value);
+ } else if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(7) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ int32_t int_value = get_sinteger_from_s_register(((vm<<1) |
+ instr->MField()));
+ double dbl_value = static_cast<double>(int_value);
+ set_d_register_from_double(vd, dbl_value);
+ } else if ((instr->Bit(21) == 0x0) &&
+ (instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value / dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Bits(21, 20) == 0x3) &&
+ (instr->Bits(19, 16) == 0x4) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0x1) &&
+ (instr->Bit(4) == 0x0)) {
+ double dd_value = get_double_from_d_register(vd);
+ double dm_value = get_double_from_d_register(vm);
+ Compute_FPSCR_Flags(dd_value, dm_value);
+ } else if ((instr->Bits(23, 20) == 0xF) &&
+ (instr->Bits(19, 16) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(7, 5) == 0x0) &&
+ (instr->Bit(4) == 0x1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ if (instr->Bits(15, 12) == 0xF)
+ Copy_FPSCR_to_APSR();
+ else
+ UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
+ if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value + dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value - dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value * dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ } else {
+ if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ int32_t rs_val = get_register(rt);
+ set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val);
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ int32_t int_value = get_sinteger_from_s_register(((vn<<1) |
+ instr->NField()));
+ set_register(rt, int_value);
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ }
+}
+
+
+// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
+// Decode Type 6 coprocessor instructions.
+// Dm = fmdrr(Rt, Rt2)
+// <Rt, Rt2> = fmrrd(Dm)
+void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
+ ASSERT((instr->TypeField() == 6));
+
+ int rt = instr->RtField();
+ int rn = instr->RnField();
+ int vm = instr->VmField();
+
+ if (instr->Bit(23) == 1) {
+ UNIMPLEMENTED();
+ } else if (instr->Bit(22) == 1) {
+ if ((instr->Bits(27, 24) == 0xC) &&
+ (instr->Bit(22) == 1) &&
+ (instr->Bits(11, 8) == 0xB) &&
+ (instr->Bits(7, 6) == 0x0) &&
+ (instr->Bit(4) == 1)) {
+ if (instr->Bit(20) == 0) {
+ int32_t rs_val = get_register(rt);
+ int32_t rn_val = get_register(rn);
+
+ set_s_register_from_sinteger(2*vm, rs_val);
+ set_s_register_from_sinteger((2*vm+1), rn_val);
+
+ } else if (instr->Bit(20) == 1) {
+ int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
+ int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
+
+ set_register(rt, rt_int_value);
+ set_register(rn, rn_int_value);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else if (instr->Bit(21) == 1) {
+ UNIMPLEMENTED();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instr* instr) {
pc_modified_ = false;
@@ -1802,7 +2119,6 @@
}
-//
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
@@ -1924,6 +2240,25 @@
return result;
}
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ int new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ int current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+
} } // namespace assembler::arm
#endif // !defined(__arm__)
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index ff6bbf4..3a4bb31 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -52,6 +52,12 @@
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
};
@@ -60,6 +66,10 @@
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
+
#else // defined(__arm__)
// When running with the simulator transition into simulated execution at this
@@ -73,6 +83,11 @@
assembler::arm::Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
#include "constants-arm.h"
@@ -82,7 +97,6 @@
class Simulator {
public:
friend class Debugger;
-
enum Register {
no_reg = -1,
r0 = 0, r1, r2, r3, r4, r5, r6, r7,
@@ -90,7 +104,15 @@
num_registers,
sp = 13,
lr = 14,
- pc = 15
+ pc = 15,
+ s0 = 0, s1, s2, s3, s4, s5, s6, s7,
+ s8, s9, s10, s11, s12, s13, s14, s15,
+ s16, s17, s18, s19, s20, s21, s22, s23,
+ s24, s25, s26, s27, s28, s29, s30, s31,
+ num_s_registers = 32,
+ d0 = 0, d1, d2, d3, d4, d5, d6, d7,
+ d8, d9, d10, d11, d12, d13, d14, d15,
+ num_d_registers = 16
};
Simulator();
@@ -106,6 +128,16 @@
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
+ // Support for VFP.
+ void set_s_register(int reg, unsigned int value);
+ unsigned int get_s_register(int reg) const;
+ void set_d_register_from_double(int dreg, const double& dbl);
+ double get_double_from_d_register(int dreg);
+ void set_s_register_from_float(int sreg, const float dbl);
+ float get_float_from_s_register(int sreg);
+ void set_s_register_from_sinteger(int reg, const int value);
+ int get_sinteger_from_s_register(int reg);
+
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
@@ -124,6 +156,12 @@
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@@ -154,6 +192,10 @@
int32_t right,
bool addition);
+ // Support for VFP.
+ void Compute_FPSCR_Flags(double val1, double val2);
+ void Copy_FPSCR_to_APSR();
+
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instr* instr, bool* carry_out);
int32_t GetImm(Instr* instr, bool* carry_out);
@@ -185,6 +227,10 @@
void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
+ // Support for VFP.
+ void DecodeTypeVFP(Instr* instr);
+ void DecodeType6CoprocessorIns(Instr* instr);
+
// Executes one instruction.
void InstructionDecode(Instr* instr);
@@ -198,20 +244,34 @@
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
- // architecture state
+ // Architecture state.
int32_t registers_[16];
bool n_flag_;
bool z_flag_;
bool c_flag_;
bool v_flag_;
- // simulator support
+ // VFP architecture state.
+ unsigned int vfp_register[num_s_registers];
+ bool n_flag_FPSCR_;
+ bool z_flag_FPSCR_;
+ bool c_flag_FPSCR_;
+ bool v_flag_FPSCR_;
+
+ // VFP FP exception flags architecture state.
+ bool inv_op_vfp_flag_;
+ bool div_zero_vfp_flag_;
+ bool overflow_vfp_flag_;
+ bool underflow_vfp_flag_;
+ bool inexact_vfp_flag_;
+
+ // Simulator support.
char* stack_;
bool pc_modified_;
int icount_;
static bool initialized_;
- // registered breakpoints
+ // Registered breakpoints.
Instr* break_pc_;
instr_t break_instr_;
};
@@ -229,6 +289,15 @@
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::arm::Simulator::current()->StackLimit();
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ assembler::arm::Simulator* sim = assembler::arm::Simulator::current();
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static inline void UnregisterCTryCatch() {
+ assembler::arm::Simulator::current()->PopAddress();
+ }
};
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 8282655..efccaf4 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -105,7 +105,7 @@
__ b(eq, &miss);
// Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, String::kLengthOffset));
+ __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
__ eor(scratch, scratch, Operand(flags));
@@ -229,10 +229,7 @@
miss, &check_wrapper);
// Load length directly from the string.
- __ and_(scratch1, scratch1, Operand(kStringSizeMask));
- __ add(scratch1, scratch1, Operand(String::kHashShift));
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ mov(r0, Operand(r0, LSR, scratch1));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 97d164e..132c8ae 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -146,29 +146,27 @@
// Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
}
- if (FLAG_check_stack) {
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- }
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
for (int i = 0; i < count; i++) {
__ push(ip);
}
- if (FLAG_check_stack) {
- // Put the lr setup instruction in the delay slot. The kInstrSize is added
- // to the implicit 8 byte offset that always applies to operations with pc
- // and gives a return address 12 bytes down.
- masm()->add(lr, pc, Operand(Assembler::kInstrSize));
- masm()->cmp(sp, Operand(r2));
- StackCheckStub stub;
- // Call the stub if lower.
- masm()->mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- }
+ // Check the stack for overflow or a break request.
+ // Put the lr setup instruction in the delay slot. The kInstrSize is added
+ // to the implicit 8 byte offset that always applies to operations with pc
+ // and gives a return address 12 bytes down.
+ masm()->add(lr, pc, Operand(Assembler::kInstrSize));
+ masm()->cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ // Call the stub if lower.
+ masm()->mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
+
void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED();
}
@@ -245,11 +243,8 @@
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- Result* arg_count_register,
int arg_count) {
- ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(arg_count, arg_count);
- arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
}
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 457478d..d523000 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -305,7 +305,6 @@
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
- Result* arg_count_register,
int arg_count);
// Call into an IC stub given the number of arguments it removes