Upgrade to 3.29
Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.
Bug: 17370214
Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/arm/OWNERS b/src/arm/OWNERS
new file mode 100644
index 0000000..906a5ce
--- /dev/null
+++ b/src/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index d5db686..8b5c4b8 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -37,24 +37,61 @@
#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
#define V8_ARM_ASSEMBLER_ARM_INL_H_
-#include "arm/assembler-arm.h"
+#include "src/arm/assembler-arm.h"
-#include "cpu.h"
-#include "debug.h"
+#include "src/assembler.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
+bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
+
+
+int Register::NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+}
+
+
+int DwVfpRegister::NumRegisters() {
+ return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
+}
+
+
+int DwVfpRegister::NumReservedRegisters() {
+ return kNumReservedRegisters;
+}
+
+
+int DwVfpRegister::NumAllocatableRegisters() {
+ return NumRegisters() - kNumReservedRegisters;
+}
+
+
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
- ASSERT(!reg.is(kDoubleRegZero));
- ASSERT(!reg.is(kScratchDoubleReg));
+ DCHECK(!reg.is(kDoubleRegZero));
+ DCHECK(!reg.is(kScratchDoubleReg));
+ if (reg.code() > kDoubleRegZero.code()) {
+ return reg.code() - kNumReservedRegisters;
+ }
return reg.code();
}
-void RelocInfo::apply(intptr_t delta) {
+DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
+ DCHECK(index >= 0 && index < NumAllocatableRegisters());
+ DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code()) {
+ return from_code(index + kNumReservedRegisters);
+ }
+ return from_code(index);
+}
+
+
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
@@ -66,16 +103,30 @@
Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
+ if (FLAG_enable_ool_constant_pool ||
+ Assembler::IsMovW(Memory::int32_at(pc_))) {
+ // We return the PC for ool constant pool since this function is used by the
+ // serializerer and expects the address to reside within the code object.
+ return reinterpret_cast<Address>(pc_);
+ } else {
+ DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return constant_pool_entry_address();
+ }
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ DCHECK(IsInConstantPool());
+ return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
}
@@ -84,10 +135,13 @@
}
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@@ -96,27 +150,26 @@
Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(Assembler::target_address_address_at(pc_));
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_address_at(pc_, host_)));
}
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -125,35 +178,47 @@
}
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
+Address RelocInfo::target_reference() {
+ DCHECK(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, host_);
}
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ DCHECK(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
+Cell* RelocInfo::target_cell() {
+ DCHECK(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -162,17 +227,43 @@
}
+static const int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on Arm.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Memory::Address_at(pc_ +
+ (kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Memory::Address_at(pc_ +
+ (kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
+ stub->instruction_start();
+}
+
+
Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
if (host() != NULL) {
@@ -194,28 +285,29 @@
Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
+void RelocInfo::WipeOut() {
+ DCHECK(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, host_, NULL);
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
Instr current_instr = Assembler::instr_at(pc_);
Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
-#ifdef USE_BLX
// A patched return sequence is:
// ldr ip, [pc, #0]
// blx ip
- return ((current_instr & kLdrPCMask) == kLdrPCPattern)
- && ((next_instr & kBlxRegMask) == kBlxRegPattern);
-#else
- // A patched return sequence is:
- // mov lr, pc
- // ldr pc, [pc, #-4]
- return (current_instr == kMovLrPc)
- && ((next_instr & kLdrPCMask) == kLdrPCPattern);
-#endif
+ return Assembler::IsLdrPcImmediateOffset(current_instr) &&
+ Assembler::IsBlxReg(next_instr);
}
@@ -225,26 +317,25 @@
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
@@ -257,19 +348,19 @@
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
@@ -292,7 +383,7 @@
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
@@ -329,64 +420,253 @@
}
-Address Assembler::target_address_address_at(Address pc) {
- Address target_pc = pc;
- Instr instr = Memory::int32_at(target_pc);
- // If we have a bx instruction, the instruction before the bx is
- // what we need to patch.
- static const int32_t kBxInstMask = 0x0ffffff0;
- static const int32_t kBxInstPattern = 0x012fff10;
- if ((instr & kBxInstMask) == kBxInstPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
+Address Assembler::target_address_from_return_address(Address pc) {
+ // Returns the address of the call target from the return address that will
+ // be returned to after a call.
+ // Call sequence on V7 or later is:
+ // movw ip, #... @ call address low 16
+ // movt ip, #... @ call address high 16
+ // blx ip
+ // @ return address
+ // For V6 when the constant pool is unavailable, it is:
+ // mov ip, #... @ call address low 8
+ // orr ip, ip, #... @ call address 2nd 8
+ // orr ip, ip, #... @ call address 3rd 8
+ // orr ip, ip, #... @ call address high 8
+ // blx ip
+ // @ return address
+ // In cases that need frequent patching, the address is in the
+ // constant pool. It could be a small constant pool load:
+ // ldr ip, [pc / pp, #...] @ call address
+ // blx ip
+ // @ return address
+ // Or an extended constant pool load (ARMv7):
+ // movw ip, #...
+ // movt ip, #...
+ // ldr ip, [pc, ip] @ call address
+ // blx ip
+ // @ return address
+ // Or an extended constant pool load (ARMv6):
+ // mov ip, #...
+ // orr ip, ip, #...
+ // orr ip, ip, #...
+ // orr ip, ip, #...
+ // ldr ip, [pc, ip] @ call address
+ // blx ip
+ // @ return address
+ Address candidate = pc - 2 * Assembler::kInstrSize;
+ Instr candidate_instr(Memory::int32_at(candidate));
+ if (IsLdrPcImmediateOffset(candidate_instr) |
+ IsLdrPpImmediateOffset(candidate_instr)) {
+ return candidate;
+ } else {
+ if (IsLdrPpRegOffset(candidate_instr)) {
+ candidate -= Assembler::kInstrSize;
+ }
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ candidate -= 1 * Assembler::kInstrSize;
+ DCHECK(IsMovW(Memory::int32_at(candidate)) &&
+ IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
+ } else {
+ candidate -= 3 * Assembler::kInstrSize;
+ DCHECK(
+ IsMovImmed(Memory::int32_at(candidate)) &&
+ IsOrrImmed(Memory::int32_at(candidate + Assembler::kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(candidate + 2 * Assembler::kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(candidate + 3 * Assembler::kInstrSize)));
+ }
+ return candidate;
}
-
-#ifdef USE_BLX
- // If we have a blx instruction, the instruction before it is
- // what needs to be patched.
- if ((instr & kBlxRegMask) == kBlxRegPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-#endif
-
- ASSERT(IsLdrPcImmediateOffset(instr));
- int offset = instr & 0xfff; // offset_12 is unsigned
- if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
- // Verify that the constant pool comes after the instruction referencing it.
- ASSERT(offset >= -4);
- return target_pc + offset + 8;
}
-Address Assembler::target_address_at(Address pc) {
- return Memory::Address_at(target_address_address_at(pc));
+Address Assembler::break_address_from_return_address(Address pc) {
+ return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
+ IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
+ // Load from constant pool, small section.
+ return pc + kInstrSize * 2;
+ } else {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ DCHECK(IsMovW(Memory::int32_at(pc)));
+ DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ if (IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))) {
+ // Load from constant pool, extended section.
+ return pc + kInstrSize * 4;
+ } else {
+ // A movw / movt load immediate.
+ return pc + kInstrSize * 3;
+ }
+ } else {
+ DCHECK(IsMovImmed(Memory::int32_at(pc)));
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)));
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)));
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ if (IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize))) {
+ // Load from constant pool, extended section.
+ return pc + kInstrSize * 6;
+ } else {
+ // A mov / orr load immediate.
+ return pc + kInstrSize * 5;
+ }
+ }
+ }
}
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+ Address constant_pool_entry, Code* code, Address target) {
+ if (FLAG_enable_ool_constant_pool) {
+ set_target_address_at(constant_pool_entry, code, target);
+ } else {
+ Memory::Address_at(constant_pool_entry) = target;
+ }
}
-void Assembler::set_external_target_at(Address constant_pool_entry,
- Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+bool Assembler::is_constant_pool_load(Address pc) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ return !Assembler::IsMovW(Memory::int32_at(pc)) ||
+ (FLAG_enable_ool_constant_pool &&
+ Assembler::IsLdrPpRegOffset(
+ Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
+ } else {
+ return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
+ (FLAG_enable_ool_constant_pool &&
+ Assembler::IsLdrPpRegOffset(
+ Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
+ }
}
-void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::Address_at(target_address_address_at(pc)) = target;
- // Intuitively, we would think it is necessary to flush the instruction cache
- // after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
- // However, on ARM, no instruction was actually patched by the assignment
- // above; the target address is not part of an instruction, it is patched in
- // the constant pool and is read via a data access; the instruction accessing
- // this address in the constant pool remains unchanged.
+Address Assembler::constant_pool_entry_address(
+ Address pc, ConstantPoolArray* constant_pool) {
+ if (FLAG_enable_ool_constant_pool) {
+ DCHECK(constant_pool != NULL);
+ int cp_offset;
+ if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)) &&
+ IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize)));
+ // This is an extended constant pool lookup (ARMv6).
+ Instr mov_instr = instr_at(pc);
+ Instr orr_instr_1 = instr_at(pc + kInstrSize);
+ Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
+ Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
+ cp_offset = DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
+ DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3);
+ } else if (IsMovW(Memory::int32_at(pc))) {
+ DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
+ IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
+ // This is an extended constant pool lookup (ARMv7).
+ Instruction* movw_instr = Instruction::At(pc);
+ Instruction* movt_instr = Instruction::At(pc + kInstrSize);
+ cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
+ movw_instr->ImmedMovwMovtValue();
+ } else {
+ // This is a small constant pool lookup.
+ DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
+ }
+ return reinterpret_cast<Address>(constant_pool) + cp_offset;
+ } else {
+ DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ Instr instr = Memory::int32_at(pc);
+ return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
+ }
}
+
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
+ if (is_constant_pool_load(pc)) {
+ // This is a constant pool lookup. Return the value in the constant pool.
+ return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
+ } else if (CpuFeatures::IsSupported(ARMv7)) {
+ // This is an movw / movt immediate load. Return the immediate.
+ DCHECK(IsMovW(Memory::int32_at(pc)) &&
+ IsMovT(Memory::int32_at(pc + kInstrSize)));
+ Instruction* movw_instr = Instruction::At(pc);
+ Instruction* movt_instr = Instruction::At(pc + kInstrSize);
+ return reinterpret_cast<Address>(
+ (movt_instr->ImmedMovwMovtValue() << 16) |
+ movw_instr->ImmedMovwMovtValue());
+ } else {
+ // This is an mov / orr immediate load. Return the immediate.
+ DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+ IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ Instr mov_instr = instr_at(pc);
+ Instr orr_instr_1 = instr_at(pc + kInstrSize);
+ Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
+ Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
+ Address ret = reinterpret_cast<Address>(
+ DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
+ DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3));
+ return ret;
+ }
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ if (is_constant_pool_load(pc)) {
+ // This is a constant pool lookup. Update the entry in the constant pool.
+ Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
+ // Intuitively, we would think it is necessary to always flush the
+ // instruction cache after patching a target address in the code as follows:
+ // CpuFeatures::FlushICache(pc, sizeof(target));
+ // However, on ARM, no instruction is actually patched in the case
+ // of embedded constants of the form:
+ // ldr ip, [pp, #...]
+ // since the instruction accessing this address in the constant pool remains
+ // unchanged.
+ } else if (CpuFeatures::IsSupported(ARMv7)) {
+ // This is an movw / movt immediate load. Patch the immediate embedded in
+ // the instructions.
+ DCHECK(IsMovW(Memory::int32_at(pc)));
+ DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
+ uint32_t immediate = reinterpret_cast<uint32_t>(target);
+ instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF);
+ instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16);
+ DCHECK(IsMovW(Memory::int32_at(pc)));
+ DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CpuFeatures::FlushICache(pc, 2 * kInstrSize);
+ }
+ } else {
+ // This is an mov / orr immediate load. Patch the immediate embedded in
+ // the instructions.
+ DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+ IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
+ uint32_t immediate = reinterpret_cast<uint32_t>(target);
+ instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
+ instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
+ instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
+ instr_ptr[3] = PatchShiftImm(instr_ptr[3], immediate & (kImm8Mask << 24));
+ DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+ IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CpuFeatures::FlushICache(pc, 4 * kInstrSize);
+ }
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index ec28da4..96f28f9 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -32,96 +32,187 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm-inl.h"
-#include "serialize.h"
+#include "src/arm/assembler-arm-inl.h"
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
-
// Get the CPU features enabled by the build. For cross compilation the
-// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
+// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
// snapshot.
-static uint64_t CpuFeaturesImpliedByCompiler() {
- uint64_t answer = 0;
+static unsigned CpuFeaturesImpliedByCompiler() {
+ unsigned answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
- answer |= 1u << ARMv7;
-#endif // def CAN_USE_ARMV7_INSTRUCTIONS
-#ifdef CAN_USE_VFP_INSTRUCTIONS
- answer |= 1u << VFP3 | 1u << ARMv7;
-#endif // def CAN_USE_VFP_INSTRUCTIONS
-
-#ifdef __arm__
- // If the compiler is allowed to use VFP then we can use VFP too in our code
- // generation even when generating snapshots. ARMv7 and hardware floating
- // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
-#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
- && !defined(__SOFTFP__)
- answer |= 1u << VFP3 | 1u << ARMv7;
-#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
- // && !defined(__SOFTFP__)
-#endif // def __arm__
+ if (FLAG_enable_armv7) answer |= 1u << ARMv7;
+#endif // CAN_USE_ARMV7_INSTRUCTIONS
+#ifdef CAN_USE_VFP3_INSTRUCTIONS
+ if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
+#endif // CAN_USE_VFP3_INSTRUCTIONS
+#ifdef CAN_USE_VFP32DREGS
+ if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
+#endif // CAN_USE_VFP32DREGS
+#ifdef CAN_USE_NEON
+ if (FLAG_enable_neon) answer |= 1u << NEON;
+#endif // CAN_USE_VFP32DREGS
+ if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
+ answer |= 1u << UNALIGNED_ACCESSES;
+ }
return answer;
}
-void CpuFeatures::Probe() {
- unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
- CpuFeaturesImpliedByCompiler());
- ASSERT(supported_ == 0 || supported_ == standard_features);
-#ifdef DEBUG
- initialized_ = true;
-#endif
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= CpuFeaturesImpliedByCompiler();
+ cache_line_size_ = 64;
- // Get the features implied by the OS and the compiler settings. This is the
- // minimal set of features which is also alowed for generated code in the
- // snapshot.
- supported_ |= standard_features;
-
- if (Serializer::enabled()) {
- // No probing for features if we might serialize (generate snapshot).
- return;
- }
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
#ifndef __arm__
- // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
- // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
- if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3 | 1u << ARMv7;
- }
- // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
+ // For the simulator build, use whatever the flags specify.
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
+ if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
+ if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
+ if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
+ if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
}
-#else // def __arm__
- // Probe for additional features not already known to be available.
- if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
+ if (FLAG_enable_mls) supported_ |= 1u << MLS;
+ if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
+
+#else // __arm__
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+ if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
supported_ |= 1u << VFP3 | 1u << ARMv7;
- found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
}
- if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
- supported_ |= 1u << ARMv7;
- found_by_runtime_probing_ |= 1u << ARMv7;
+ if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
+ if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
+ if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
+
+ if (cpu.architecture() >= 7) {
+ if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
+ if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
+ // Use movw/movt for QUALCOMM ARMv7 cores.
+ if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
+ supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ }
}
+
+ // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
+ if (cpu.implementer() == base::CPU::ARM &&
+ (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
+ cpu.part() == base::CPU::ARM_CORTEX_A9)) {
+ cache_line_size_ = 32;
+ }
+
+ if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
#endif
+
+ DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
+}
+
+
+void CpuFeatures::PrintTarget() {
+ const char* arm_arch = NULL;
+ const char* arm_target_type = "";
+ const char* arm_no_probe = "";
+ const char* arm_fpu = "";
+ const char* arm_thumb = "";
+ const char* arm_float_abi = NULL;
+
+#if !defined __arm__
+ arm_target_type = " simulator";
+#endif
+
+#if defined ARM_TEST_NO_FEATURE_PROBE
+ arm_no_probe = " noprobe";
+#endif
+
+#if defined CAN_USE_ARMV7_INSTRUCTIONS
+ arm_arch = "arm v7";
+#else
+ arm_arch = "arm v6";
+#endif
+
+#if defined CAN_USE_NEON
+ arm_fpu = " neon";
+#elif defined CAN_USE_VFP3_INSTRUCTIONS
+# if defined CAN_USE_VFP32DREGS
+ arm_fpu = " vfp3";
+# else
+ arm_fpu = " vfp3-d16";
+# endif
+#else
+ arm_fpu = " vfp2";
+#endif
+
+#ifdef __arm__
+ arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp";
+#elif USE_EABI_HARDFLOAT
+ arm_float_abi = "hard";
+#else
+ arm_float_abi = "softfp";
+#endif
+
+#if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
+ arm_thumb = " thumb";
+#endif
+
+ printf("target%s%s %s%s%s %s\n",
+ arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
+ arm_float_abi);
+}
+
+
+void CpuFeatures::PrintFeatures() {
+ printf(
+ "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
+ "MOVW_MOVT_IMMEDIATE_LOADS=%d",
+ CpuFeatures::IsSupported(ARMv7),
+ CpuFeatures::IsSupported(VFP3),
+ CpuFeatures::IsSupported(VFP32DREGS),
+ CpuFeatures::IsSupported(NEON),
+ CpuFeatures::IsSupported(SUDIV),
+ CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
+ CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
+#ifdef __arm__
+ bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
+#elif USE_EABI_HARDFLOAT
+ bool eabi_hardfloat = true;
+#else
+ bool eabi_hardfloat = false;
+#endif
+ printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of DwVfpRegister
+
+const char* DwVfpRegister::AllocationIndexToString(int index) {
+ DCHECK(index >= 0 && index < NumAllocatableRegisters());
+ DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
+ return VFPRegisters::Name(index, true);
}
@@ -132,10 +223,16 @@
bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on ARM means that it is a movw/movt instruction. We don't
- // generate those yet.
- return false;
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on ARM means that it is a movw/movt instruction, or is an
+ // out of line constant pool entry. These only occur if
+ // FLAG_enable_ool_constant_pool is true.
+ return FLAG_enable_ool_constant_pool;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ return Assembler::is_constant_pool_load(pc_);
}
@@ -148,7 +245,7 @@
}
// Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+ CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
}
@@ -165,31 +262,37 @@
// See assembler-arm-inl.h for inlined constructors
Operand::Operand(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// no relocation needed
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
+ imm32_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE32;
}
}
Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
- ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+ DCHECK(is_uint5(shift_imm));
+
rm_ = rm;
rs_ = no_reg;
shift_op_ = shift_op;
shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
+
+ if ((shift_op == ROR) && (shift_imm == 0)) {
+ // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
+ // RRX as ROR #0 (See below).
+ shift_op = LSL;
+ } else if (shift_op == RRX) {
// encoded as ROR with shift_imm == 0
- ASSERT(shift_imm == 0);
+ DCHECK(shift_imm == 0);
shift_op_ = ROR;
shift_imm_ = 0;
}
@@ -197,7 +300,7 @@
Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
- ASSERT(shift_op != RRX);
+ DCHECK(shift_op != RRX);
rm_ = rm;
rs_ = no_reg;
shift_op_ = shift_op;
@@ -212,6 +315,7 @@
am_ = am;
}
+
MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
rn_ = rn;
rm_ = rm;
@@ -223,7 +327,7 @@
MemOperand::MemOperand(Register rn, Register rm,
ShiftOp shift_op, int shift_imm, AddrMode am) {
- ASSERT(is_uint5(shift_imm));
+ DCHECK(is_uint5(shift_imm));
rn_ = rn;
rm_ = rm;
shift_op_ = shift_op;
@@ -232,13 +336,69 @@
}
+NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
+ DCHECK((am == Offset) || (am == PostIndex));
+ rn_ = rn;
+ rm_ = (am == Offset) ? pc : sp;
+ SetAlignment(align);
+}
+
+
+NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
+ rn_ = rn;
+ rm_ = rm;
+ SetAlignment(align);
+}
+
+
+void NeonMemOperand::SetAlignment(int align) {
+ switch (align) {
+ case 0:
+ align_ = 0;
+ break;
+ case 64:
+ align_ = 1;
+ break;
+ case 128:
+ align_ = 2;
+ break;
+ case 256:
+ align_ = 3;
+ break;
+ default:
+ UNREACHABLE();
+ align_ = 0;
+ break;
+ }
+}
+
+
+NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
+ base_ = base;
+ switch (registers_count) {
+ case 1:
+ type_ = nlt_1;
+ break;
+ case 2:
+ type_ = nlt_2;
+ break;
+ case 3:
+ type_ = nlt_3;
+ break;
+ case 4:
+ type_ = nlt_4;
+ break;
+ default:
+ UNREACHABLE();
+ type_ = nlt_1;
+ break;
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-// add(sp, sp, 4) instruction (aka Pop())
-const Instr kPopInstruction =
- al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
- kRegister_sp_Code * B12;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
const Instr kPushRegPattern =
@@ -247,11 +407,21 @@
// register r is not encoded.
const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
-// mov lr, pc
-const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
-const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
+const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+// ldr rd, [pp, #offset]
+const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
+// ldr rd, [pp, rn]
+const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
+// vldr dd, [pc, #offset]
+const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
+// vldr dd, [pp, #offset]
+const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -263,9 +433,13 @@
const Instr kMovMvnFlip = B22;
const Instr kMovLeaveCCMask = 0xdff * B16;
const Instr kMovLeaveCCPattern = 0x1a0 * B16;
-const Instr kMovwMask = 0xff * B20;
const Instr kMovwPattern = 0x30 * B20;
+const Instr kMovtPattern = 0x34 * B20;
const Instr kMovwLeaveCCFlip = 0x5 * B21;
+const Instr kMovImmedMask = 0x7f * B21;
+const Instr kMovImmedPattern = 0x1d * B21;
+const Instr kOrrImmedMask = 0x7f * B21;
+const Instr kOrrImmedPattern = 0x1c * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
@@ -282,86 +456,50 @@
const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | kRegister_fp_Code * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
-const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
-const Instr kLdrStrOffsetMask = 0x00000fff;
-// Spare buffer.
-static const int kMinimalBufferSize = 4*KB;
-
-
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- num_pending_reloc_info_ = 0;
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ recorded_ast_id_(TypeFeedbackId::None()),
+ constant_pool_builder_(),
+ positions_recorder_(this) {
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ num_pending_32_bit_reloc_info_ = 0;
+ num_pending_64_bit_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
- first_const_pool_use_ = -1;
+ first_const_pool_32_use_ = -1;
+ first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
+ constant_pool_available_ = !FLAG_enable_ool_constant_pool;
ClearRecordedAstId();
}
Assembler::~Assembler() {
- ASSERT(const_pool_blocked_nesting_ == 0);
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
+ DCHECK(const_pool_blocked_nesting_ == 0);
}
void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_pending_reloc_info_ == 0);
-
+ if (!FLAG_enable_ool_constant_pool) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ DCHECK(num_pending_32_bit_reloc_info_ == 0);
+ DCHECK(num_pending_64_bit_reloc_info_ == 0);
+ }
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
}
void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -385,7 +523,7 @@
int Assembler::GetBranchOffset(Instr instr) {
- ASSERT(IsBranch(instr));
+ DCHECK(IsBranch(instr));
// Take the jump offset in the lower 24 bits, sign extend it and multiply it
// with 4 to get the offset in bytes.
return ((instr & kImm24Mask) << 8) >> 6;
@@ -397,19 +535,33 @@
}
+bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
+ return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
+}
+
+
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
- ASSERT(IsLdrRegisterImmediate(instr));
+ DCHECK(IsLdrRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
int offset = instr & kOff12Mask; // Zero extended offset.
return positive ? offset : -offset;
}
+int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
+ DCHECK(IsVldrDRegisterImmediate(instr));
+ bool positive = (instr & B23) == B23;
+ int offset = instr & kOff8Mask; // Zero extended offset.
+ offset <<= 2;
+ return positive ? offset : -offset;
+}
+
+
Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsLdrRegisterImmediate(instr));
+ DCHECK(IsLdrRegisterImmediate(instr));
bool positive = offset >= 0;
if (!positive) offset = -offset;
- ASSERT(is_uint12(offset));
+ DCHECK(is_uint12(offset));
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
@@ -417,16 +569,29 @@
}
+Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
+ DCHECK(IsVldrDRegisterImmediate(instr));
+ DCHECK((offset & ~3) == offset); // Must be 64-bit aligned.
+ bool positive = offset >= 0;
+ if (!positive) offset = -offset;
+ DCHECK(is_uint10(offset));
+ // Set bit indicating whether the offset should be added.
+ instr = (instr & ~B23) | (positive ? B23 : 0);
+ // Set the actual offset. Its bottom 2 bits are zero.
+ return (instr & ~kOff8Mask) | (offset >> 2);
+}
+
+
bool Assembler::IsStrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
}
Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsStrRegisterImmediate(instr));
+ DCHECK(IsStrRegisterImmediate(instr));
bool positive = offset >= 0;
if (!positive) offset = -offset;
- ASSERT(is_uint12(offset));
+ DCHECK(is_uint12(offset));
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
@@ -440,9 +605,9 @@
Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsAddRegisterImmediate(instr));
- ASSERT(offset >= 0);
- ASSERT(is_uint12(offset));
+ DCHECK(IsAddRegisterImmediate(instr));
+ DCHECK(offset >= 0);
+ DCHECK(is_uint12(offset));
// Set the offset.
return (instr & ~kOff12Mask) | offset;
}
@@ -469,6 +634,24 @@
}
+Instr Assembler::GetConsantPoolLoadPattern() {
+ if (FLAG_enable_ool_constant_pool) {
+ return kLdrPpImmedPattern;
+ } else {
+ return kLdrPCImmedPattern;
+ }
+}
+
+
+Instr Assembler::GetConsantPoolLoadMask() {
+ if (FLAG_enable_ool_constant_pool) {
+ return kLdrPpImmedMask;
+ } else {
+ return kLdrPCImmedMask;
+ }
+}
+
+
bool Assembler::IsPush(Instr instr) {
return ((instr & ~kRdMask) == kPushRegPattern);
}
@@ -502,7 +685,52 @@
bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// ldr<cond> <Rd>, [pc +/- offset_12].
- return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
+ return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
+}
+
+
+bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // ldr<cond> <Rd>, [pp +/- offset_12].
+ return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
+}
+
+
+bool Assembler::IsLdrPpRegOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // ldr<cond> <Rd>, [pp, +/- <Rm>].
+ return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
+}
+
+
+Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
+
+
+bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // vldr<cond> <Dd>, [pc +/- offset_10].
+ return (instr & kVldrDPCMask) == kVldrDPCPattern;
+}
+
+
+bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // vldr<cond> <Dd>, [pp +/- offset_10].
+ return (instr & kVldrDPpMask) == kVldrDPpPattern;
+}
+
+
+bool Assembler::IsBlxReg(Instr instr) {
+ // Check the instruction is indeed a
+ // blxcc <Rm>
+ return (instr & kBlxRegMask) == kBlxRegPattern;
+}
+
+
+bool Assembler::IsBlxIp(Instr instr) {
+ // Check the instruction is indeed a
+ // blx ip
+ return instr == kBlxIp;
}
@@ -525,16 +753,17 @@
Register Assembler::GetCmpImmediateRegister(Instr instr) {
- ASSERT(IsCmpImmediate(instr));
+ DCHECK(IsCmpImmediate(instr));
return GetRn(instr);
}
int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
- ASSERT(IsCmpImmediate(instr));
+ DCHECK(IsCmpImmediate(instr));
return instr & kOff12Mask;
}
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -544,19 +773,22 @@
// Linked labels refer to unknown positions in the code
// to be generated; pos() is the position of the last
// instruction using the label.
+//
+// The linked labels form a link chain by making the branch offset
+// in the instruction steam to point to the previous branch
+// instruction using the same label.
+//
+// The link chain is terminated by a branch offset pointing to the
+// same position.
-// The link chain is terminated by a negative code position (must be aligned)
-const int kEndOfChain = -4;
-
-
-int Assembler::target_at(int pos) {
+int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- // Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
+ if (is_uint24(instr)) {
+ // Emitted link to a label, not part of a branch.
+ return instr;
}
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & kImm24Mask) << 8) >> 6;
if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
((instr & B24) != 0)) {
@@ -569,25 +801,86 @@
void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- ASSERT(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ if (is_uint24(instr)) {
+ DCHECK(target_pos == pos || target_pos >= 0);
+ // Emitted link to a label, not part of a branch.
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+
+ // Here are the instructions we need to emit:
+ // For ARMv7: target24 => target16_1:target16_0
+ // movw dst, #target16_0
+ // movt dst, #target16_1
+ // For ARMv6: target24 => target8_2:target8_1:target8_0
+ // mov dst, #target8_0
+ // orr dst, dst, #target8_1 << 8
+ // orr dst, dst, #target8_2 << 16
+
+ // We extract the destination register from the emitted nop instruction.
+ Register dst = Register::from_code(
+ Instruction::RmValue(instr_at(pos + kInstrSize)));
+ DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
+ uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ DCHECK(is_uint24(target24));
+ if (is_uint8(target24)) {
+ // If the target fits in a byte then only patch with a mov
+ // instruction.
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 1,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target24));
+ } else {
+ uint16_t target16_0 = target24 & kImm16Mask;
+ uint16_t target16_1 = target24 >> 16;
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Patch with movw/movt.
+ if (target16_1 == 0) {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 1,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->movw(dst, target16_0);
+ } else {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->movw(dst, target16_0);
+ patcher.masm()->movt(dst, target16_1);
+ }
+ } else {
+ // Patch with a sequence of mov/orr/orr instructions.
+ uint8_t target8_0 = target16_0 & kImm8Mask;
+ uint8_t target8_1 = target16_0 >> 8;
+ uint8_t target8_2 = target16_1 & kImm8Mask;
+ if (target8_2 == 0) {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target8_0));
+ patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
+ } else {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 3,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target8_0));
+ patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
+ patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
+ }
+ }
+ }
return;
}
int imm26 = target_pos - (pos + kPcLoadDelta);
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
if (Instruction::ConditionField(instr) == kSpecialCondition) {
// blx uses bit 24 to encode bit 2 of imm26
- ASSERT((imm26 & 1) == 0);
+ DCHECK((imm26 & 1) == 0);
instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
} else {
- ASSERT((imm26 & 3) == 0);
+ DCHECK((imm26 & 3) == 0);
instr &= ~kImm24Mask;
}
int imm24 = imm26 >> 2;
- ASSERT(is_int24(imm24));
+ DCHECK(is_int24(imm24));
instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
@@ -606,7 +899,7 @@
if ((instr & ~kImm24Mask) == 0) {
PrintF("value\n");
} else {
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
+ DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx
Condition cond = Instruction::ConditionField(instr);
const char* b;
const char* c;
@@ -651,7 +944,7 @@
void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
while (L->is_linked()) {
int fixup_pos = L->pos();
next(L); // call next before overwriting link with target at fixup_pos
@@ -666,51 +959,26 @@
}
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // label can only be bound once
+ DCHECK(!L->is_bound()); // label can only be bound once
bind_to(L, pc_offset());
}
void Assembler::next(Label* L) {
- ASSERT(L->is_linked());
+ DCHECK(L->is_linked());
int link = target_at(L->pos());
- if (link == kEndOfChain) {
+ if (link == L->pos()) {
+ // Branch target points to the same instuction. This is the end of the link
+ // chain.
L->Unuse();
} else {
- ASSERT(link >= 0);
+ DCHECK(link >= 0);
L->link_to(link);
}
}
-static Instr EncodeMovwImmediate(uint32_t immediate) {
- ASSERT(immediate < 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
-}
-
-
// Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction
@@ -739,14 +1007,14 @@
if (CpuFeatures::IsSupported(ARMv7)) {
if (imm32 < 0x10000) {
*instr ^= kMovwLeaveCCFlip;
- *instr |= EncodeMovwImmediate(imm32);
+ *instr |= Assembler::EncodeMovwImmediate(imm32);
*rotate_imm = *immed_8 = 0; // Not used for movw.
return true;
}
}
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
- if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kCmpCmnFlip;
return true;
}
@@ -754,7 +1022,7 @@
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == ADD ||
alu_insn == SUB) {
- if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
@@ -775,50 +1043,121 @@
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-bool Operand::must_use_constant_pool() const {
+bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif // def DEBUG
- return Serializer::enabled();
- } else if (rmode_ == RelocInfo::NONE) {
+ if (assembler != NULL && assembler->predictable_code_size()) return true;
+ return assembler->serializer_enabled();
+ } else if (RelocInfo::IsNone(rmode_)) {
return false;
}
return true;
}
-bool Operand::is_single_instruction(Instr instr) const {
- if (rm_.is_valid()) return true;
+static bool use_mov_immediate_load(const Operand& x,
+ const Assembler* assembler) {
+ if (assembler != NULL && !assembler->is_constant_pool_available()) {
+ return true;
+ } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ (assembler == NULL || !assembler->predictable_code_size())) {
+ // Prefer movw / movt to constant pool if it is more efficient on the CPU.
+ return true;
+ } else if (x.must_output_reloc_info(assembler)) {
+ // Prefer constant pool if data is likely to be patched.
+ return false;
+ } else {
+ // Otherwise, use immediate load if movw / movt is available.
+ return CpuFeatures::IsSupported(ARMv7);
+ }
+}
+
+
+int Operand::instructions_required(const Assembler* assembler,
+ Instr instr) const {
+ if (rm_.is_valid()) return 1;
uint32_t dummy1, dummy2;
- if (must_use_constant_pool() ||
+ if (must_output_reloc_info(assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
- // constant pool is required. For a mov instruction not setting the
- // condition code additional instruction conventions can be used.
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (must_use_constant_pool() ||
- !CpuFeatures::IsSupported(ARMv7)) {
- // mov instruction will be an ldr from constant pool (one instruction).
- return true;
- } else {
- // mov instruction will be a mov or movw followed by movt (two
- // instructions).
- return false;
- }
+ // constant pool is required. First account for the instructions required
+ // for the constant pool or immediate load
+ int instructions;
+ if (use_mov_immediate_load(*this, assembler)) {
+ // A movw / movt or mov / orr immediate load.
+ instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
+ } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
+ // An extended constant pool load.
+ instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else {
- // If this is not a mov or mvn instruction there will always an additional
- // instructions - either mov or ldr. The mov might actually be two
- // instructions mov or movw followed by movt so including the actual
- // instruction two or three instructions will be generated.
- return false;
+ // A small constant pool load.
+ instructions = 1;
}
+
+ if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
+ // For a mov or mvn instruction which doesn't set the condition
+ // code, the constant pool or immediate load is enough, otherwise we need
+ // to account for the actual instruction being requested.
+ instructions += 1;
+ }
+ return instructions;
} else {
// No use of constant pool and the immediate operand can be encoded as a
// shifter operand.
- return true;
+ return 1;
+ }
+}
+
+
+void Assembler::move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond) {
+ RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
+ uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
+ if (x.must_output_reloc_info(this)) {
+ RecordRelocInfo(rinfo);
+ }
+
+ if (use_mov_immediate_load(x, this)) {
+ Register target = rd.code() == pc.code() ? ip : rd;
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
+ // Make sure the movw/movt doesn't get separated.
+ BlockConstPoolFor(2);
+ }
+ movw(target, imm32 & 0xffff, cond);
+ movt(target, imm32 >> 16, cond);
+ } else {
+ DCHECK(FLAG_enable_ool_constant_pool);
+ mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
+ orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
+ orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
+ orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
+ }
+ if (target.code() != rd.code()) {
+ mov(rd, target, LeaveCC, cond);
+ }
+ } else {
+ DCHECK(is_constant_pool_available());
+ ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
+ if (section == ConstantPoolArray::EXTENDED_SECTION) {
+ DCHECK(FLAG_enable_ool_constant_pool);
+ Register target = rd.code() == pc.code() ? ip : rd;
+ // Emit instructions to load constant pool offset.
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ movw(target, 0, cond);
+ movt(target, 0, cond);
+ } else {
+ mov(target, Operand(0), LeaveCC, cond);
+ orr(target, target, Operand(0), LeaveCC, cond);
+ orr(target, target, Operand(0), LeaveCC, cond);
+ orr(target, target, Operand(0), LeaveCC, cond);
+ }
+ // Load from constant pool at offset.
+ ldr(rd, MemOperand(pp, target), cond);
+ } else {
+ DCHECK(section == ConstantPoolArray::SMALL_SECTION);
+ ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+ }
}
}
@@ -828,12 +1167,12 @@
Register rd,
const Operand& x) {
CheckBuffer();
- ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
+ DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) {
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (x.must_use_constant_pool() ||
+ if (x.must_output_reloc_info(this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
@@ -842,25 +1181,9 @@
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (x.must_use_constant_pool() ||
- !CpuFeatures::IsSupported(ARMv7)) {
- RecordRelocInfo(x.rmode_, x.imm32_);
- ldr(rd, MemOperand(pc, 0), cond);
- } else {
- // Will probably use movw, will certainly not use constant pool.
- mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- }
+ move_32_bit_immediate(rd, x, cond);
} else {
- // If this is not a mov or mvn instruction we may still be able to avoid
- // a constant pool entry by using mvn or movw.
- if (!x.must_use_constant_pool() &&
- (instr & kMovMvnMask) != kMovMvnPattern) {
- mov(ip, x, LeaveCC, cond);
- } else {
- RecordRelocInfo(x.rmode_, x.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
- }
+ mov(ip, x, LeaveCC, cond);
addrmod1(instr, rn, rd, Operand(ip));
}
return;
@@ -871,7 +1194,7 @@
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
} else {
// Register shift.
- ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
+ DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
}
emit(instr | rn.code()*B16 | rd.code()*B12);
@@ -883,7 +1206,7 @@
void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(kCondMask | B | L)) == B26);
+ DCHECK((instr & ~(kCondMask | B | L)) == B26);
int am = x.am_;
if (!x.rm_.is_valid()) {
// Immediate offset.
@@ -895,28 +1218,28 @@
if (!is_uint12(offset_12)) {
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
}
- ASSERT(offset_12 >= 0); // no masking needed
+ DCHECK(offset_12 >= 0); // no masking needed
instr |= offset_12;
} else {
// Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_
// and shift_op_ are initialized.
- ASSERT(!x.rm_.is(pc));
+ DCHECK(!x.rm_.is(pc));
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
}
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
}
void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
- ASSERT(x.rn_.is_valid());
+ DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
+ DCHECK(x.rn_.is_valid());
int am = x.am_;
if (!x.rm_.is_valid()) {
// Immediate offset.
@@ -928,60 +1251,60 @@
if (!is_uint8(offset_8)) {
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
}
- ASSERT(offset_8 >= 0); // no masking needed
+ DCHECK(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
// Scaled register offset not supported, load index first
// rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
} else {
// Register offset.
- ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
+ DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
instr |= x.rm_.code();
}
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
}
void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
- ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
- ASSERT(rl != 0);
- ASSERT(!rn.is(pc));
+ DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
+ DCHECK(rl != 0);
+ DCHECK(!rn.is(pc));
emit(instr | rn.code()*B16 | rl);
}
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// Unindexed addressing is not encoded by this function.
- ASSERT_EQ((B27 | B26),
+ DCHECK_EQ((B27 | B26),
(instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
- ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
+ DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
int am = x.am_;
int offset_8 = x.offset_;
- ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
+ DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset
offset_8 >>= 2;
if (offset_8 < 0) {
offset_8 = -offset_8;
am ^= U;
}
- ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
+ DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
// Post-indexed addressing requires W == 1; different than in addrmod2/3.
if ((am & P) == 0)
am |= W;
- ASSERT(offset_8 >= 0); // no masking needed
+ DCHECK(offset_8 >= 0); // no masking needed
emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
}
@@ -992,9 +1315,11 @@
target_pos = L->pos();
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ // Point to previous instruction that uses the link.
+ target_pos = L->pos();
} else {
- target_pos = kEndOfChain;
+ // First entry of the link chain points to itself.
+ target_pos = pc_offset();
}
L->link_to(pc_offset());
}
@@ -1006,27 +1331,11 @@
}
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- }
-}
-
-
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
- ASSERT((branch_offset & 3) == 0);
+ DCHECK((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
+ DCHECK(is_int24(imm24));
emit(cond | B27 | B25 | (imm24 & kImm24Mask));
if (cond == al) {
@@ -1038,33 +1347,33 @@
void Assembler::bl(int branch_offset, Condition cond) {
positions_recorder()->WriteRecordedPositions();
- ASSERT((branch_offset & 3) == 0);
+ DCHECK((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
+ DCHECK(is_int24(imm24));
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
void Assembler::blx(int branch_offset) { // v5 and above
positions_recorder()->WriteRecordedPositions();
- ASSERT((branch_offset & 1) == 0);
+ DCHECK((branch_offset & 1) == 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
+ DCHECK(is_int24(imm24));
emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
}
void Assembler::blx(Register target, Condition cond) { // v5 and above
positions_recorder()->WriteRecordedPositions();
- ASSERT(!target.is(pc));
+ DCHECK(!target.is(pc));
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
}
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
positions_recorder()->WriteRecordedPositions();
- ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
+ DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
}
@@ -1136,7 +1445,7 @@
void Assembler::cmp_raw_immediate(
Register src, int raw_immediate, Condition cond) {
- ASSERT(is_uint12(raw_immediate));
+ DCHECK(is_uint12(raw_immediate));
emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
}
@@ -1159,18 +1468,58 @@
// Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
- ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
+ DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
addrmod1(cond | MOV | s, r0, dst, src);
}
+void Assembler::mov_label_offset(Register dst, Label* label) {
+ if (label->is_bound()) {
+ mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
+ } else {
+ // Emit the link to the label in the code stream followed by extra nop
+ // instructions.
+ // If the label is not linked, then start a new link chain by linking it to
+ // itself, emitting pc_offset().
+ int link = label->is_linked() ? label->pos() : pc_offset();
+ label->link_to(pc_offset());
+
+ // When the label is bound, these instructions will be patched with a
+ // sequence of movw/movt or mov/orr/orr instructions. They will load the
+ // destination register with the position of the label from the beginning
+ // of the code.
+ //
+ // The link will be extracted from the first instruction and the destination
+ // register from the second.
+ // For ARMv7:
+ // link
+ // mov dst, dst
+ // For ARMv6:
+ // link
+ // mov dst, dst
+ // mov dst, dst
+ //
+ // When the label gets bound: target_at extracts the link and target_at_put
+ // patches the instructions.
+ DCHECK(is_uint24(link));
+ BlockConstPoolScope block_const_pool(this);
+ emit(link);
+ nop(dst.code());
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ nop(dst.code());
+ }
+ }
+}
+
+
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
- ASSERT(immediate < 0x10000);
- mov(reg, Operand(immediate), LeaveCC, cond);
+ DCHECK(CpuFeatures::IsSupported(ARMv7));
+ emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
}
void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
+ DCHECK(CpuFeatures::IsSupported(ARMv7));
emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
}
@@ -1189,15 +1538,42 @@
// Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
+void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
+ Condition cond) {
+ DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ DCHECK(IsEnabled(MLS));
+ emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::sdiv(Register dst, Register src1, Register src2,
+ Condition cond) {
+ DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(IsEnabled(SUDIV));
+ emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
+ src2.code()*B8 | B4 | src1.code());
+}
+
+
+void Assembler::udiv(Register dst, Register src1, Register src2,
+ Condition cond) {
+ DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(IsEnabled(SUDIV));
+ emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
+ src2.code() * B8 | B4 | src1.code());
+}
+
+
void Assembler::mul(Register dst, Register src1, Register src2,
SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
// dst goes in bits 16-19 for this instruction!
emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1209,8 +1585,8 @@
Register src2,
SBit s,
Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
+ DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(!dstL.is(dstH));
emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1222,8 +1598,8 @@
Register src2,
SBit s,
Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
+ DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(!dstL.is(dstH));
emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1235,8 +1611,8 @@
Register src2,
SBit s,
Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
+ DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(!dstL.is(dstH));
emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1248,8 +1624,8 @@
Register src2,
SBit s,
Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
+ DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(!dstL.is(dstH));
emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1258,7 +1634,7 @@
// Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above.
- ASSERT(!dst.is(pc) && !src.is(pc));
+ DCHECK(!dst.is(pc) && !src.is(pc));
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
15*B8 | CLZ | src.code());
}
@@ -1272,11 +1648,11 @@
const Operand& src,
Condition cond) {
// v6 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.rm_.is(pc));
- ASSERT((satpos >= 0) && (satpos <= 31));
- ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
- ASSERT(src.rs_.is(no_reg));
+ DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(!dst.is(pc) && !src.rm_.is(pc));
+ DCHECK((satpos >= 0) && (satpos <= 31));
+ DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
+ DCHECK(src.rs_.is(no_reg));
int sh = 0;
if (src.shift_op_ == ASR) {
@@ -1300,10 +1676,10 @@
int width,
Condition cond) {
// v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
+ DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(!dst.is(pc) && !src.is(pc));
+ DCHECK((lsb >= 0) && (lsb <= 31));
+ DCHECK((width >= 1) && (width <= (32 - lsb)));
emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
lsb*B7 | B6 | B4 | src.code());
}
@@ -1320,10 +1696,10 @@
int width,
Condition cond) {
// v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
+ DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(!dst.is(pc) && !src.is(pc));
+ DCHECK((lsb >= 0) && (lsb <= 31));
+ DCHECK((width >= 1) && (width <= (32 - lsb)));
emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
lsb*B7 | B6 | B4 | src.code());
}
@@ -1335,10 +1711,10 @@
// bfc dst, #lsb, #width
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
// v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
+ DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(!dst.is(pc));
+ DCHECK((lsb >= 0) && (lsb <= 31));
+ DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
}
@@ -1354,42 +1730,148 @@
int width,
Condition cond) {
// v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
+ DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(!dst.is(pc) && !src.is(pc));
+ DCHECK((lsb >= 0) && (lsb <= 31));
+ DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
src.code());
}
+void Assembler::pkhbt(Register dst,
+ Register src1,
+ const Operand& src2,
+ Condition cond ) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.125.
+ // cond(31-28) | 01101000(27-20) | Rn(19-16) |
+ // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src1.is(pc));
+ DCHECK(!src2.rm().is(pc));
+ DCHECK(!src2.rm().is(no_reg));
+ DCHECK(src2.rs().is(no_reg));
+ DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
+ DCHECK(src2.shift_op() == LSL);
+ emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
+ src2.shift_imm_*B7 | B4 | src2.rm().code());
+}
+
+
+void Assembler::pkhtb(Register dst,
+ Register src1,
+ const Operand& src2,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.125.
+ // cond(31-28) | 01101000(27-20) | Rn(19-16) |
+ // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src1.is(pc));
+ DCHECK(!src2.rm().is(pc));
+ DCHECK(!src2.rm().is(no_reg));
+ DCHECK(src2.rs().is(no_reg));
+ DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
+ DCHECK(src2.shift_op() == ASR);
+ int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
+ emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
+ asr*B7 | B6 | B4 | src2.rm().code());
+}
+
+
+void Assembler::uxtb(Register dst,
+ const Operand& src,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.274.
+ // cond(31-28) | 01101110(27-20) | 1111(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src.rm().is(pc));
+ DCHECK(!src.rm().is(no_reg));
+ DCHECK(src.rs().is(no_reg));
+ DCHECK((src.shift_imm_ == 0) ||
+ (src.shift_imm_ == 8) ||
+ (src.shift_imm_ == 16) ||
+ (src.shift_imm_ == 24));
+ // Operand maps ROR #0 to LSL #0.
+ DCHECK((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
+ emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
+ ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+}
+
+
+void Assembler::uxtab(Register dst,
+ Register src1,
+ const Operand& src2,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.271.
+ // cond(31-28) | 01101110(27-20) | Rn(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src1.is(pc));
+ DCHECK(!src2.rm().is(pc));
+ DCHECK(!src2.rm().is(no_reg));
+ DCHECK(src2.rs().is(no_reg));
+ DCHECK((src2.shift_imm_ == 0) ||
+ (src2.shift_imm_ == 8) ||
+ (src2.shift_imm_ == 16) ||
+ (src2.shift_imm_ == 24));
+ // Operand maps ROR #0 to LSL #0.
+ DCHECK((src2.shift_op() == ROR) ||
+ ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
+ emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
+ ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
+}
+
+
+void Assembler::uxtb16(Register dst,
+ const Operand& src,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.275.
+ // cond(31-28) | 01101100(27-20) | 1111(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ DCHECK(!dst.is(pc));
+ DCHECK(!src.rm().is(pc));
+ DCHECK(!src.rm().is(no_reg));
+ DCHECK(src.rs().is(no_reg));
+ DCHECK((src.shift_imm_ == 0) ||
+ (src.shift_imm_ == 8) ||
+ (src.shift_imm_ == 16) ||
+ (src.shift_imm_ == 24));
+ // Operand maps ROR #0 to LSL #0.
+ DCHECK((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
+ emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
+ ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+}
+
+
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
- ASSERT(!dst.is(pc));
+ DCHECK(!dst.is(pc));
emit(cond | B24 | s | 15*B16 | dst.code()*B12);
}
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
Condition cond) {
- ASSERT(fields >= B16 && fields < B20); // at least one field set
+ DCHECK(fields >= B16 && fields < B20); // at least one field set
Instr instr;
if (!src.rm_.is_valid()) {
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (src.must_use_constant_pool() ||
+ if (src.must_output_reloc_info(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
+ move_32_bit_immediate(ip, src);
msr(fields, Operand(ip), cond);
return;
}
instr = I | rotate_imm*B8 | immed_8;
} else {
- ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
+ DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
instr = src.rm_.code();
}
emit(cond | instr | B24 | B21 | fields | 15*B12);
@@ -1442,32 +1924,52 @@
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
- ASSERT(src.rm().is(no_reg));
- ASSERT(!dst1.is(lr)); // r14.
- ASSERT_EQ(0, dst1.code() % 2);
- ASSERT_EQ(dst1.code() + 1, dst2.code());
+ DCHECK(IsEnabled(ARMv7));
+ DCHECK(src.rm().is(no_reg));
+ DCHECK(!dst1.is(lr)); // r14.
+ DCHECK_EQ(0, dst1.code() % 2);
+ DCHECK_EQ(dst1.code() + 1, dst2.code());
addrmod3(cond | B7 | B6 | B4, dst1, src);
}
void Assembler::strd(Register src1, Register src2,
const MemOperand& dst, Condition cond) {
- ASSERT(dst.rm().is(no_reg));
- ASSERT(!src1.is(lr)); // r14.
- ASSERT_EQ(0, src1.code() % 2);
- ASSERT_EQ(src1.code() + 1, src2.code());
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
+ DCHECK(dst.rm().is(no_reg));
+ DCHECK(!src1.is(lr)); // r14.
+ DCHECK_EQ(0, src1.code() % 2);
+ DCHECK_EQ(src1.code() + 1, src2.code());
+ DCHECK(IsEnabled(ARMv7));
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
+
+// Preload instructions.
+void Assembler::pld(const MemOperand& address) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.128.
+ // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
+ // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
+ DCHECK(address.rm().is(no_reg));
+ DCHECK(address.am() == Offset);
+ int U = B23;
+ int offset = address.offset();
+ if (offset < 0) {
+ offset = -offset;
+ U = 0;
+ }
+ DCHECK(offset < 4096);
+ emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
+ 0xf*B12 | offset);
+}
+
+
// Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am,
Register base,
RegList dst,
Condition cond) {
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
- ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
+ DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
addrmod4(cond | B27 | am | L, base, dst);
@@ -1496,7 +1998,7 @@
// enabling/disabling and a counter feature. See simulator-arm.h .
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
- ASSERT(code >= kDefaultStopCode);
+ DCHECK(code >= kDefaultStopCode);
{
// The Simulator will handle the stop instruction and get the message
// address. It expects to find the address just after the svc instruction.
@@ -1509,7 +2011,6 @@
emit(reinterpret_cast<Instr>(msg));
}
#else // def __arm__
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
if (cond != al) {
Label skip;
b(&skip, NegateCondition(cond));
@@ -1518,21 +2019,18 @@
} else {
bkpt(0);
}
-#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
- svc(0x9f0001, cond);
-#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
#endif // def __arm__
}
void Assembler::bkpt(uint32_t imm16) { // v5 and above
- ASSERT(is_uint16(imm16));
+ DCHECK(is_uint16(imm16));
emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
}
void Assembler::svc(uint32_t imm24, Condition cond) {
- ASSERT(is_uint24(imm24));
+ DCHECK(is_uint24(imm24));
emit(cond | 15*B24 | imm24);
}
@@ -1545,7 +2043,7 @@
CRegister crm,
int opcode_2,
Condition cond) {
- ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
+ DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
}
@@ -1568,7 +2066,7 @@
CRegister crm,
int opcode_2,
Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
}
@@ -1591,7 +2089,7 @@
CRegister crm,
int opcode_2,
Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
}
@@ -1623,7 +2121,7 @@
LFlag l,
Condition cond) {
// Unindexed addressing.
- ASSERT(is_uint8(option));
+ DCHECK(is_uint8(option));
emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
}
@@ -1653,30 +2151,31 @@
int offset,
const Condition cond) {
// Ddst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ // Instruction details available in ARM DDI 0406C.b, A8-924.
+ // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
+ // Vd(15-12) | 1011(11-8) | offset
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
+ int vd, d;
+ dst.split_code(&vd, &d);
- ASSERT(offset >= 0);
+ DCHECK(offset >= 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
+ emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
0xB*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
- ASSERT(!base.is(ip));
+ DCHECK(!base.is(ip));
if (u == 1) {
add(ip, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
}
- emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
+ emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
}
}
@@ -1684,9 +2183,14 @@
void Assembler::vldr(const DwVfpRegister dst,
const MemOperand& operand,
const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(dst, operand.rn(), operand.offset(), cond);
+ DCHECK(operand.am_ == Offset);
+ if (operand.rm().is_valid()) {
+ add(ip, operand.rn(),
+ Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+ vldr(dst, ip, 0, cond);
+ } else {
+ vldr(dst, operand.rn(), operand.offset(), cond);
+ }
}
@@ -1698,7 +2202,6 @@
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1706,7 +2209,7 @@
}
int sd, d;
dst.split_code(&sd, &d);
- ASSERT(offset >= 0);
+ DCHECK(offset >= 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
@@ -1714,7 +2217,7 @@
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
- ASSERT(!base.is(ip));
+ DCHECK(!base.is(ip));
if (u == 1) {
add(ip, base, Operand(offset));
} else {
@@ -1728,9 +2231,14 @@
void Assembler::vldr(const SwVfpRegister dst,
const MemOperand& operand,
const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(dst, operand.rn(), operand.offset(), cond);
+ DCHECK(operand.am_ == Offset);
+ if (operand.rm().is_valid()) {
+ add(ip, operand.rn(),
+ Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+ vldr(dst, ip, 0, cond);
+ } else {
+ vldr(dst, operand.rn(), operand.offset(), cond);
+ }
}
@@ -1739,29 +2247,31 @@
int offset,
const Condition cond) {
// MEM(Rbase + offset) = Dsrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
- // Vsrc(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ // Instruction details available in ARM DDI 0406C.b, A8-1082.
+ // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
+ // Vd(15-12) | 1011(11-8) | (offset/4)
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
- ASSERT(offset >= 0);
+ DCHECK(offset >= 0);
+ int vd, d;
+ src.split_code(&vd, &d);
+
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
+ ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
- ASSERT(!base.is(ip));
+ DCHECK(!base.is(ip));
if (u == 1) {
add(ip, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
}
- emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
+ emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
}
}
@@ -1769,9 +2279,14 @@
void Assembler::vstr(const DwVfpRegister src,
const MemOperand& operand,
const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vstr(src, operand.rn(), operand.offset(), cond);
+ DCHECK(operand.am_ == Offset);
+ if (operand.rm().is_valid()) {
+ add(ip, operand.rn(),
+ Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+ vstr(src, ip, 0, cond);
+ } else {
+ vstr(src, operand.rn(), operand.offset(), cond);
+ }
}
@@ -1783,7 +2298,6 @@
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1791,14 +2305,14 @@
}
int sd, d;
src.split_code(&sd, &d);
- ASSERT(offset >= 0);
+ DCHECK(offset >= 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
- ASSERT(!base.is(ip));
+ DCHECK(!base.is(ip));
if (u == 1) {
add(ip, base, Operand(offset));
} else {
@@ -1812,9 +2326,14 @@
void Assembler::vstr(const SwVfpRegister src,
const MemOperand& operand,
const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(src, operand.rn(), operand.offset(), cond);
+ DCHECK(operand.am_ == Offset);
+ if (operand.rm().is_valid()) {
+ add(ip, operand.rn(),
+ Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+ vstr(src, ip, 0, cond);
+ } else {
+ vstr(src, operand.rn(), operand.offset(), cond);
+ }
}
@@ -1823,17 +2342,17 @@
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-626.
+ // Instruction details available in ARM DDI 0406C.b, A8-922.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
- // first(15-12) | 1010(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
+ // first(15-12) | 1011(11-8) | (count * 2)
+ DCHECK_LE(first.code(), last.code());
+ DCHECK(am == ia || am == ia_w || am == db_w);
+ DCHECK(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
+ DCHECK(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@@ -1844,17 +2363,17 @@
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-784.
+ // Instruction details available in ARM DDI 0406C.b, A8-1080.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
+ DCHECK_LE(first.code(), last.code());
+ DCHECK(am == ia || am == ia_w || am == db_w);
+ DCHECK(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
+ DCHECK(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@@ -1867,10 +2386,9 @@
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
+ DCHECK_LE(first.code(), last.code());
+ DCHECK(am == ia || am == ia_w || am == db_w);
+ DCHECK(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
@@ -1888,10 +2406,9 @@
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
+ DCHECK_LE(first.code(), last.code());
+ DCHECK(am == ia || am == ia_w || am == db_w);
+ DCHECK(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
@@ -1900,6 +2417,7 @@
0xA*B8 | count);
}
+
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
@@ -1908,10 +2426,11 @@
*hi = i >> 32;
}
+
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ DCHECK(CpuFeatures::IsSupported(VFP3));
// VMOV can accept an immediate of the form:
//
@@ -1961,36 +2480,79 @@
void Assembler::vmov(const DwVfpRegister dst,
double imm,
- const Condition cond) {
- // Dd = immediate
- // Instruction details available in ARM DDI 0406B, A8-640.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
-
+ const Register scratch) {
uint32_t enc;
- if (FitsVMOVDoubleImmediate(imm, &enc)) {
+ if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
+ //
+ // Dd = immediate
+ // Instruction details available in ARM DDI 0406C.b, A8-936.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
+ } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
+ // TODO(jfb) Temporarily turned off until we have constant blinding or
+ // some equivalent mitigation: an attacker can otherwise control
+ // generated data which also happens to be executable, a Very Bad
+ // Thing indeed.
+ // Blinding gets tricky because we don't have xor, we probably
+ // need to add/subtract without losing precision, which requires a
+ // cookie value that Lithium is probably better positioned to
+ // choose.
+ // We could also add a few peepholes here like detecting 0.0 and
+ // -0.0 and doing a vmov from the sequestered d14, forcing denorms
+ // to zero (we set flush-to-zero), and normalizing NaN values.
+ // We could also detect redundant values.
+ // The code could also randomize the order of values, though
+ // that's tricky because vldr has a limited reach. Furthermore
+ // it breaks load locality.
+ RelocInfo rinfo(pc_, imm);
+ ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
+ if (section == ConstantPoolArray::EXTENDED_SECTION) {
+ DCHECK(FLAG_enable_ool_constant_pool);
+ // Emit instructions to load constant pool offset.
+ movw(ip, 0);
+ movt(ip, 0);
+ // Load from constant pool at offset.
+ vldr(dst, MemOperand(pp, ip));
+ } else {
+ DCHECK(section == ConstantPoolArray::SMALL_SECTION);
+ vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
+ }
} else {
- // Synthesise the double from ARM immediates. This could be implemented
- // using vldr from a constant pool.
+ // Synthesise the double from ARM immediates.
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
- if (lo == hi) {
- // If the lo and hi parts of the double are equal, the literal is easier
- // to create. This is the case with 0.0.
- mov(ip, Operand(lo));
- vmov(dst, ip, ip);
- } else {
- // Move the low part of the double into the lower of the corresponsing S
- // registers of D register dst.
- mov(ip, Operand(lo));
- vmov(dst.low(), ip, cond);
+ if (scratch.is(no_reg)) {
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ // Move the low part of the double into the lower of the corresponsing S
+ // registers of D register dst.
+ mov(ip, Operand(lo));
+ vmov(loc.low(), ip);
- // Move the high part of the double into the higher of the corresponsing S
- // registers of D register dst.
- mov(ip, Operand(hi));
- vmov(dst.high(), ip, cond);
+ // Move the high part of the double into the higher of the
+ // corresponsing S registers of D register dst.
+ mov(ip, Operand(hi));
+ vmov(loc.high(), ip);
+ } else {
+ // D16-D31 does not have S registers, so move the low and high parts
+ // directly to the D register using vmov.32.
+ // Note: This may be slower, so we only do this when we have to.
+ mov(ip, Operand(lo));
+ vmov(dst, VmovIndexLo, ip);
+ mov(ip, Operand(hi));
+ vmov(dst, VmovIndexHi, ip);
+ }
+ } else {
+ // Move the low and high parts of the double to a D register in one
+ // instruction.
+ mov(ip, Operand(lo));
+ mov(scratch, Operand(hi));
+ vmov(dst, ip, scratch);
}
}
}
@@ -2001,7 +2563,6 @@
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@@ -2013,10 +2574,47 @@
const DwVfpRegister src,
const Condition cond) {
// Dd = Dm
- // Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0xB*B20 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-938.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
+ vm);
+}
+
+
+void Assembler::vmov(const DwVfpRegister dst,
+ const VmovIndex index,
+ const Register src,
+ const Condition cond) {
+ // Dd[index] = Rt
+ // Instruction details available in ARM DDI 0406C.b, A8-940.
+ // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
+ // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+ DCHECK(index.index == 0 || index.index == 1);
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
+ d*B7 | B4);
+}
+
+
+void Assembler::vmov(const Register dst,
+ const VmovIndex index,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // Dd[index] = Rt
+ // Instruction details available in ARM DDI 0406C.b, A8.8.342.
+ // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
+ // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+ DCHECK(index.index == 0 || index.index == 1);
+ int vn, n;
+ src.split_code(&vn, &n);
+ emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
+ 0xB*B8 | n*B7 | B4);
}
@@ -2025,13 +2623,14 @@
const Register src2,
const Condition cond) {
// Dm = <Rt,Rt2>.
- // Instruction details available in ARM DDI 0406A, A8-646.
+ // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!src1.is(pc) && !src2.is(pc));
+ DCHECK(!src1.is(pc) && !src2.is(pc));
+ int vm, m;
+ dst.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+ src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
}
@@ -2040,13 +2639,14 @@
const DwVfpRegister src,
const Condition cond) {
// <Rt,Rt2> = Dm.
- // Instruction details available in ARM DDI 0406A, A8-646.
+ // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!dst1.is(pc) && !dst2.is(pc));
+ DCHECK(!dst1.is(pc) && !dst2.is(pc));
+ int vm, m;
+ src.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+ dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
}
@@ -2057,8 +2657,7 @@
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!src.is(pc));
+ DCHECK(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
@@ -2072,8 +2671,7 @@
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(!dst.is(pc));
+ DCHECK(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
@@ -2134,7 +2732,7 @@
int reg_code,
int* vm,
int* m) {
- ASSERT((reg_code >= 0) && (reg_code <= 31));
+ DCHECK((reg_code >= 0) && (reg_code <= 31));
if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
// 32 bit type.
*m = reg_code & 0x1;
@@ -2154,7 +2752,7 @@
const int src_code,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(src_type != dst_type);
+ DCHECK(src_type != dst_type);
int D, Vd, M, Vm;
SplitRegCode(src_type, src_code, &Vm, &M);
SplitRegCode(dst_type, dst_code, &Vd, &D);
@@ -2164,7 +2762,7 @@
// Instruction details available in ARM DDI 0406B, A8.6.295.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
// Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
+ DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
int sz, opc2, op;
@@ -2173,7 +2771,7 @@
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
op = mode;
} else {
- ASSERT(IsIntegerVFPType(src_type));
+ DCHECK(IsIntegerVFPType(src_type));
opc2 = 0x0;
sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
@@ -2197,7 +2795,6 @@
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@@ -2206,7 +2803,6 @@
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@@ -2215,7 +2811,6 @@
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@@ -2224,7 +2819,6 @@
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2233,7 +2827,6 @@
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2242,7 +2835,6 @@
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@@ -2251,24 +2843,56 @@
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
+void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
+ int fraction_bits,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-874.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
+ // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
+ DCHECK(fraction_bits > 0 && fraction_bits <= 32);
+ DCHECK(CpuFeatures::IsSupported(VFP3));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int imm5 = 32 - fraction_bits;
+ int i = imm5 & 1;
+ int imm4 = (imm5 >> 1) & 0xf;
+ emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
+ vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
+}
+
+
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
- emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
- 0x5*B9 | B8 | B6 | src.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-968.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
+ m*B5 | vm);
}
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
- 0x5*B9 | B8 | 0x3*B6 | src.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-524.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
+ m*B5 | vm);
}
@@ -2278,12 +2902,17 @@
const Condition cond) {
// Dd = vadd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-536.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-830.
+ // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | m*B5 | vm);
}
@@ -2293,12 +2922,17 @@
const Condition cond) {
// Dd = vsub(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-1086.
+ // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | B6 | m*B5 | vm);
}
@@ -2308,12 +2942,53 @@
const Condition cond) {
// Dd = vmul(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-960.
+ // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | m*B5 | vm);
+}
+
+
+void Assembler::vmla(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-932.
+ // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
+ vm);
+}
+
+
+void Assembler::vmls(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-932.
+ // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
+ m*B5 | vm);
}
@@ -2323,12 +2998,17 @@
const Condition cond) {
// Dd = vdiv(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-584.
- // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-882.
+ // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
+ vm);
}
@@ -2336,26 +3016,29 @@
const DwVfpRegister src2,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
- src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-864.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ src1.split_code(&vd, &d);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
+ m*B5 | vm);
}
void Assembler::vcmp(const DwVfpRegister src1,
const double src2,
const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- ASSERT(src2 == 0.0);
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
- src1.code()*B12 | 0x5*B9 | B8 | B6);
+ // vcmp(Dd, #0.0) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406C.b, A8-864.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
+ DCHECK(src2 == 0.0);
+ int vd, d;
+ src1.split_code(&vd, &d);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
}
@@ -2363,7 +3046,6 @@
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2373,7 +3055,6 @@
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2382,29 +3063,143 @@
void Assembler::vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
- dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-1058.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
+ m*B5 | vm);
+}
+
+
+// Support for NEON.
+
+void Assembler::vld1(NeonSize size,
+ const NeonListOperand& dst,
+ const NeonMemOperand& src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.320.
+ // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
+ // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
+ DCHECK(CpuFeatures::IsSupported(NEON));
+ int vd, d;
+ dst.base().split_code(&vd, &d);
+ emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
+ dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
+}
+
+
+void Assembler::vst1(NeonSize size,
+ const NeonListOperand& src,
+ const NeonMemOperand& dst) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.404.
+ // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
+ // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
+ DCHECK(CpuFeatures::IsSupported(NEON));
+ int vd, d;
+ src.base().split_code(&vd, &d);
+ emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
+ size*B6 | dst.align()*B4 | dst.rm().code());
+}
+
+
+void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.346.
+ // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
+ // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(NEON));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
+ (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
}
// Pseudo instructions.
void Assembler::nop(int type) {
- // This is mov rx, rx.
- ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
+ // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
+ // some of the CPU's pipeline and has to issue. Older ARM chips simply used
+ // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
+ // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
+ // a type.
+ DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
emit(al | 13*B21 | type*B12 | type);
}
+bool Assembler::IsMovT(Instr instr) {
+ instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
+ ((kNumRegisters-1)*B12) | // mask out register
+ EncodeMovwImmediate(0xFFFF)); // mask out immediate value
+ return instr == kMovtPattern;
+}
+
+
+bool Assembler::IsMovW(Instr instr) {
+ instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
+ ((kNumRegisters-1)*B12) | // mask out destination
+ EncodeMovwImmediate(0xFFFF)); // mask out immediate value
+ return instr == kMovwPattern;
+}
+
+
+Instr Assembler::GetMovTPattern() { return kMovtPattern; }
+
+
+Instr Assembler::GetMovWPattern() { return kMovwPattern; }
+
+
+Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
+ DCHECK(immediate < 0x10000);
+ return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+}
+
+
+Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
+ instruction &= ~EncodeMovwImmediate(0xffff);
+ return instruction | EncodeMovwImmediate(immediate);
+}
+
+
+int Assembler::DecodeShiftImm(Instr instr) {
+ int rotate = Instruction::RotateValue(instr) * 2;
+ int immed8 = Instruction::Immed8Value(instr);
+ return (immed8 >> rotate) | (immed8 << (32 - rotate));
+}
+
+
+Instr Assembler::PatchShiftImm(Instr instr, int immed) {
+ uint32_t rotate_imm = 0;
+ uint32_t immed_8 = 0;
+ bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
+ DCHECK(immed_fits);
+ USE(immed_fits);
+ return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
+}
+
+
bool Assembler::IsNop(Instr instr, int type) {
+ DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
// Check for mov rx, rx where x = type.
- ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}
+bool Assembler::IsMovImmed(Instr instr) {
+ return (instr & kMovImmedMask) == kMovImmedPattern;
+}
+
+
+bool Assembler::IsOrrImmed(Instr instr) {
+ return (instr & kOrrImmedMask) == kOrrImmedPattern;
+}
+
+
+// static
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
uint32_t dummy2;
@@ -2412,6 +3207,11 @@
}
+bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
+ return is_uint12(abs(imm32));
+}
+
+
// Debugging.
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
@@ -2435,14 +3235,19 @@
}
+void Assembler::RecordConstPool(int size) {
+ // We only need this for debugger support, to correctly compute offsets in the
+ // code.
+ RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+}
+
+
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else if (buffer_size_ < 1*MB) {
+ if (buffer_size_ < 1 * MB) {
desc.buffer_size = 2*buffer_size_;
} else {
desc.buffer_size = buffer_size_ + 1*MB;
@@ -2458,9 +3263,9 @@
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ MemMove(desc.buffer, buffer_, desc.instr_size);
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
@@ -2475,14 +3280,20 @@
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
+ DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
rinfo.set_pc(rinfo.pc() + pc_delta);
}
}
+ for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
+ DCHECK(rinfo.rmode() == RelocInfo::NONE64);
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+ constant_pool_builder_.Relocate(pc_delta);
}
@@ -2490,7 +3301,8 @@
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
- ASSERT(num_pending_reloc_info_ == 0);
+ DCHECK(num_pending_32_bit_reloc_info_ == 0);
+ DCHECK(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -2501,48 +3313,41 @@
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
- ASSERT(num_pending_reloc_info_ == 0);
+ DCHECK(num_pending_32_bit_reloc_info_ == 0);
+ DCHECK(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
}
+void Assembler::emit_code_stub_address(Code* stub) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) =
+ reinterpret_cast<uint32_t>(stub->instruction_start());
+ pc_ += sizeof(uint32_t);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- // We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
- // These modes do not need an entry in the constant pool.
- } else {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
- }
- if (rinfo.rmode() != RelocInfo::NONE) {
+ RecordRelocInfo(rinfo);
+}
+
+
+void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
+ if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
+ if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
}
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
+ DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(rinfo.pc(),
+ rinfo.rmode(),
+ RecordedAstId().ToInt(),
+ NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -2552,13 +3357,51 @@
}
+ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
+ const RelocInfo& rinfo) {
+ if (FLAG_enable_ool_constant_pool) {
+ return constant_pool_builder_.AddEntry(this, rinfo);
+ } else {
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+ if (num_pending_64_bit_reloc_info_ == 0) {
+ first_const_pool_64_use_ = pc_offset();
+ }
+ pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+ } else {
+ DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+ if (num_pending_32_bit_reloc_info_ == 0) {
+ first_const_pool_32_use_ = pc_offset();
+ }
+ pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
+ }
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+ return ConstantPoolArray::SMALL_SECTION;
+ }
+}
+
+
void Assembler::BlockConstPoolFor(int instructions) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ DCHECK(num_pending_32_bit_reloc_info_ == 0);
+ DCHECK(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
- // If there are some pending entries, the constant pool cannot be blocked
- // further than first_const_pool_use_ + kMaxDistToPool
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+ // Max pool start (if we need a jump and an alignment).
+#ifdef DEBUG
+ int start = pc_limit + kInstrSize + 2 * kPointerSize;
+ DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
+ (start - first_const_pool_32_use_ +
+ num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
+ DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
+ (start - first_const_pool_64_use_ < kMaxDistToFPPool));
+#endif
no_const_pool_before_ = pc_limit;
}
@@ -2569,46 +3412,86 @@
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ DCHECK(num_pending_32_bit_reloc_info_ == 0);
+ DCHECK(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
if (is_const_pool_blocked()) {
// Something is wrong if emission is forced and blocked at the same time.
- ASSERT(!force_emit);
+ DCHECK(!force_emit);
return;
}
// There is nothing to do if there are no pending constant pool entries.
- if (num_pending_reloc_info_ == 0) {
+ if ((num_pending_32_bit_reloc_info_ == 0) &&
+ (num_pending_64_bit_reloc_info_ == 0)) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
}
- // We emit a constant pool when:
- // * requested to do so by parameter force_emit (e.g. after each function).
- // * the distance to the first instruction accessing the constant pool is
- // kAvgDistToPool or more.
- // * no jump is required and the distance to the first instruction accessing
- // the constant pool is at least kMaxDistToPool / 2.
- ASSERT(first_const_pool_use_ >= 0);
- int dist = pc_offset() - first_const_pool_use_;
- if (!force_emit && dist < kAvgDistToPool &&
- (require_jump || (dist < (kMaxDistToPool / 2)))) {
- return;
- }
-
// Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
- int needed_space = jump_instr + kInstrSize +
- num_pending_reloc_info_ * kInstrSize + kGap;
+ int size_up_to_marker = jump_instr + kInstrSize;
+ int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
+ bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
+ bool require_64_bit_align = false;
+ if (has_fp_values) {
+ require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
+ if (require_64_bit_align) {
+ size_after_marker += kInstrSize;
+ }
+ size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
+ }
+
+ int size = size_up_to_marker + size_after_marker;
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance from the first instruction accessing the constant pool to
+ // any of the constant pool entries will exceed its limit the next
+ // time the pool is checked. This is overly restrictive, but we don't emit
+ // constant pool entries in-order so it's conservatively correct.
+ // * the instruction doesn't require a jump after itself to jump over the
+ // constant pool, and we're getting close to running out of range.
+ if (!force_emit) {
+ DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
+ bool need_emit = false;
+ if (has_fp_values) {
+ int dist64 = pc_offset() +
+ size -
+ num_pending_32_bit_reloc_info_ * kPointerSize -
+ first_const_pool_64_use_;
+ if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
+ (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
+ need_emit = true;
+ }
+ }
+ int dist32 =
+ pc_offset() + size - first_const_pool_32_use_;
+ if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
+ (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
+ need_emit = true;
+ }
+ if (!need_emit) return;
+ }
+
+ int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
{
// Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this);
+ RecordComment("[ Constant Pool");
+ RecordConstPool(size);
// Emit jump over constant pool if necessary.
Label after_pool;
@@ -2616,37 +3499,109 @@
b(&after_pool);
}
- RecordComment("[ Constant Pool");
+ // Put down constant pool marker "Undefined instruction".
+ // The data size helps disassembly know what to print.
+ emit(kConstantPoolMarker |
+ EncodeConstantPoolLength(size_after_marker / kPointerSize));
- // Put down constant pool marker "Undefined instruction" as specified by
- // A5.6 (ARMv7) Instruction set encoding.
- emit(kConstantPoolMarker | num_pending_reloc_info_);
-
- // Emit constant pool entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
-
- Instr instr = instr_at(rinfo.pc());
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- ASSERT(IsLdrPcImmediateOffset(instr) &&
- GetLdrRegisterImmediateOffset(instr) == 0);
-
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
- ASSERT(is_uint12(delta));
-
- instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
- emit(rinfo.data());
+ if (require_64_bit_align) {
+ emit(kConstantPoolMarker);
}
- num_pending_reloc_info_ = 0;
- first_const_pool_use_ = -1;
+ // Emit 64-bit constant pool entries first: their range is smaller than
+ // 32-bit entries.
+ for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
+
+ DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
+
+ Instr instr = instr_at(rinfo.pc());
+ // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
+ DCHECK((IsVldrDPcImmediateOffset(instr) &&
+ GetVldrDRegisterImmediateOffset(instr) == 0));
+
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ DCHECK(is_uint10(delta));
+
+ bool found = false;
+ uint64_t value = rinfo.raw_data64();
+ for (int j = 0; j < i; j++) {
+ RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
+ if (value == rinfo2.raw_data64()) {
+ found = true;
+ DCHECK(rinfo2.rmode() == RelocInfo::NONE64);
+ Instr instr2 = instr_at(rinfo2.pc());
+ DCHECK(IsVldrDPcImmediateOffset(instr2));
+ delta = GetVldrDRegisterImmediateOffset(instr2);
+ delta += rinfo2.pc() - rinfo.pc();
+ break;
+ }
+ }
+
+ instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
+
+ if (!found) {
+ uint64_t uint_data = rinfo.raw_data64();
+ emit(uint_data & 0xFFFFFFFF);
+ emit(uint_data >> 32);
+ }
+ }
+
+ // Emit 32-bit constant pool entries.
+ for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
+ DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
+ rinfo.rmode() != RelocInfo::CONST_POOL &&
+ rinfo.rmode() != RelocInfo::NONE64);
+
+ Instr instr = instr_at(rinfo.pc());
+
+ // 64-bit loads shouldn't get here.
+ DCHECK(!IsVldrDPcImmediateOffset(instr));
+
+ if (IsLdrPcImmediateOffset(instr) &&
+ GetLdrRegisterImmediateOffset(instr) == 0) {
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ DCHECK(is_uint12(delta));
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+
+ bool found = false;
+ if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
+ for (int j = 0; j < i; j++) {
+ RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
+
+ if ((rinfo2.data() == rinfo.data()) &&
+ (rinfo2.rmode() == rinfo.rmode())) {
+ Instr instr2 = instr_at(rinfo2.pc());
+ if (IsLdrPcImmediateOffset(instr2)) {
+ delta = GetLdrRegisterImmediateOffset(instr2);
+ delta += rinfo2.pc() - rinfo.pc();
+ found = true;
+ break;
+ }
+ }
+ }
+ }
+
+ instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+
+ if (!found) {
+ emit(rinfo.data());
+ }
+ } else {
+ DCHECK(IsMovW(instr));
+ }
+ }
+
+ num_pending_32_bit_reloc_info_ = 0;
+ num_pending_64_bit_reloc_info_ = 0;
+ first_const_pool_32_use_ = -1;
+ first_const_pool_64_use_ = -1;
RecordComment("]");
@@ -2661,6 +3616,229 @@
}
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ if (!FLAG_enable_ool_constant_pool) {
+ return isolate->factory()->empty_constant_pool_array();
+ }
+ return constant_pool_builder_.New(isolate);
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ constant_pool_builder_.Populate(this, constant_pool);
+}
+
+
+ConstantPoolBuilder::ConstantPoolBuilder()
+ : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
+
+
+bool ConstantPoolBuilder::IsEmpty() {
+ return entries_.size() == 0;
+}
+
+
+ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
+ RelocInfo::Mode rmode) {
+ if (rmode == RelocInfo::NONE64) {
+ return ConstantPoolArray::INT64;
+ } else if (!RelocInfo::IsGCRelocMode(rmode)) {
+ return ConstantPoolArray::INT32;
+ } else if (RelocInfo::IsCodeTarget(rmode)) {
+ return ConstantPoolArray::CODE_PTR;
+ } else {
+ DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
+ return ConstantPoolArray::HEAP_PTR;
+ }
+}
+
+
+ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
+ Assembler* assm, const RelocInfo& rinfo) {
+ RelocInfo::Mode rmode = rinfo.rmode();
+ DCHECK(rmode != RelocInfo::COMMENT &&
+ rmode != RelocInfo::POSITION &&
+ rmode != RelocInfo::STATEMENT_POSITION &&
+ rmode != RelocInfo::CONST_POOL);
+
+ // Try to merge entries which won't be patched.
+ int merged_index = -1;
+ ConstantPoolArray::LayoutSection entry_section = current_section_;
+ if (RelocInfo::IsNone(rmode) ||
+ (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
+ size_t i;
+ std::vector<ConstantPoolEntry>::const_iterator it;
+ for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
+ if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
+ // Merge with found entry.
+ merged_index = i;
+ entry_section = entries_[i].section_;
+ break;
+ }
+ }
+ }
+ DCHECK(entry_section <= current_section_);
+ entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
+
+ if (merged_index == -1) {
+ // Not merged, so update the appropriate count.
+ number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
+ }
+
+ // Check if we still have room for another entry in the small section
+ // given Arm's ldr and vldr immediate offset range.
+ if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
+ !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
+ is_uint10(ConstantPoolArray::MaxInt64Offset(
+ small_entries()->count_of(ConstantPoolArray::INT64))))) {
+ current_section_ = ConstantPoolArray::EXTENDED_SECTION;
+ }
+ return entry_section;
+}
+
+
+void ConstantPoolBuilder::Relocate(int pc_delta) {
+ for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+ entry != entries_.end(); entry++) {
+ DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
+ entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
+ }
+}
+
+
+Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
+ if (IsEmpty()) {
+ return isolate->factory()->empty_constant_pool_array();
+ } else if (extended_entries()->is_empty()) {
+ return isolate->factory()->NewConstantPoolArray(*small_entries());
+ } else {
+ DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
+ return isolate->factory()->NewExtendedConstantPoolArray(
+ *small_entries(), *extended_entries());
+ }
+}
+
+
+void ConstantPoolBuilder::Populate(Assembler* assm,
+ ConstantPoolArray* constant_pool) {
+ DCHECK_EQ(extended_entries()->is_empty(),
+ !constant_pool->is_extended_layout());
+ DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
+ constant_pool, ConstantPoolArray::SMALL_SECTION)));
+ if (constant_pool->is_extended_layout()) {
+ DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
+ constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
+ }
+
+ // Set up initial offsets.
+ int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
+ [ConstantPoolArray::NUMBER_OF_TYPES];
+ for (int section = 0; section <= constant_pool->final_section(); section++) {
+ int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
+ ? small_entries()->total_count()
+ : 0;
+ for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
+ ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
+ if (number_of_entries_[section].count_of(type) != 0) {
+ offsets[section][type] = constant_pool->OffsetOfElementAt(
+ number_of_entries_[section].base_of(type) + section_start);
+ }
+ }
+ }
+
+ for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+ entry != entries_.end(); entry++) {
+ RelocInfo rinfo = entry->rinfo_;
+ RelocInfo::Mode rmode = entry->rinfo_.rmode();
+ ConstantPoolArray::Type type = GetConstantPoolType(rmode);
+
+ // Update constant pool if necessary and get the entry's offset.
+ int offset;
+ if (entry->merged_index_ == -1) {
+ offset = offsets[entry->section_][type];
+ offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
+ if (type == ConstantPoolArray::INT64) {
+ constant_pool->set_at_offset(offset, rinfo.data64());
+ } else if (type == ConstantPoolArray::INT32) {
+ constant_pool->set_at_offset(offset,
+ static_cast<int32_t>(rinfo.data()));
+ } else if (type == ConstantPoolArray::CODE_PTR) {
+ constant_pool->set_at_offset(offset,
+ reinterpret_cast<Address>(rinfo.data()));
+ } else {
+ DCHECK(type == ConstantPoolArray::HEAP_PTR);
+ constant_pool->set_at_offset(offset,
+ reinterpret_cast<Object*>(rinfo.data()));
+ }
+ offset -= kHeapObjectTag;
+ entry->merged_index_ = offset; // Stash offset for merged entries.
+ } else {
+ DCHECK(entry->merged_index_ < (entry - entries_.begin()));
+ offset = entries_[entry->merged_index_].merged_index_;
+ }
+
+ // Patch vldr/ldr instruction with correct offset.
+ Instr instr = assm->instr_at(rinfo.pc());
+ if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
+ Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
+ DCHECK((Assembler::IsMovW(instr) &&
+ Instruction::ImmedMovwMovtValue(instr) == 0));
+ DCHECK((Assembler::IsMovT(next_instr) &&
+ Instruction::ImmedMovwMovtValue(next_instr) == 0));
+ assm->instr_at_put(
+ rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
+ assm->instr_at_put(
+ rinfo.pc() + Assembler::kInstrSize,
+ Assembler::PatchMovwImmediate(next_instr, offset >> 16));
+ } else {
+ // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
+ Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
+ Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
+ Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
+ DCHECK((Assembler::IsMovImmed(instr) &&
+ Instruction::Immed8Value(instr) == 0));
+ DCHECK((Assembler::IsOrrImmed(instr_2) &&
+ Instruction::Immed8Value(instr_2) == 0) &&
+ Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
+ DCHECK((Assembler::IsOrrImmed(instr_3) &&
+ Instruction::Immed8Value(instr_3) == 0) &&
+ Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
+ DCHECK((Assembler::IsOrrImmed(instr_4) &&
+ Instruction::Immed8Value(instr_4) == 0) &&
+ Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
+ assm->instr_at_put(
+ rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
+ assm->instr_at_put(
+ rinfo.pc() + Assembler::kInstrSize,
+ Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
+ assm->instr_at_put(
+ rinfo.pc() + 2 * Assembler::kInstrSize,
+ Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
+ assm->instr_at_put(
+ rinfo.pc() + 3 * Assembler::kInstrSize,
+ Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
+ }
+ } else if (type == ConstantPoolArray::INT64) {
+ // Instruction to patch must be 'vldr rd, [pp, #0]'.
+ DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
+ Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
+ DCHECK(is_uint10(offset));
+ assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
+ instr, offset));
+ } else {
+ // Instruction to patch must be 'ldr rd, [pp, #0]'.
+ DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
+ Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
+ DCHECK(is_uint12(offset));
+ assm->instr_at_put(
+ rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
+ }
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e2d5f59..108d5cb 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -39,10 +39,13 @@
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
+
#include <stdio.h>
-#include "assembler.h"
-#include "constants-arm.h"
-#include "serialize.h"
+#include <vector>
+
+#include "src/arm/constants-arm.h"
+#include "src/assembler.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -68,62 +71,6 @@
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
-// Core register
-struct Register {
- static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 8;
- static const int kSizeInBytes = 4;
-
- static int ToAllocationIndex(Register reg) {
- ASSERT(reg.code() < kNumAllocatableRegisters);
- return reg.code();
- }
-
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code(index);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "r0",
- "r1",
- "r2",
- "r3",
- "r4",
- "r5",
- "r6",
- "r7",
- };
- return names[index];
- }
-
- static Register from_code(int code) {
- Register r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- void set_code(int code) {
- code_ = code;
- ASSERT(is_valid());
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
// These constants are used in several locations, including static initializers
const int kRegister_no_reg_Code = -1;
const int kRegister_r0_Code = 0;
@@ -143,6 +90,69 @@
const int kRegister_lr_Code = 14;
const int kRegister_pc_Code = 15;
+// Core register
+struct Register {
+ static const int kNumRegisters = 16;
+ static const int kMaxNumAllocatableRegisters =
+ FLAG_enable_ool_constant_pool ? 8 : 9;
+ static const int kSizeInBytes = 4;
+
+ inline static int NumAllocatableRegisters();
+
+ static int ToAllocationIndex(Register reg) {
+ DCHECK(reg.code() < kMaxNumAllocatableRegisters);
+ return reg.code();
+ }
+
+ static Register FromAllocationIndex(int index) {
+ DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+ return from_code(index);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "r0",
+ "r1",
+ "r2",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ };
+ if (FLAG_enable_ool_constant_pool && (index >= 7)) {
+ return names[index + 1];
+ }
+ return names[index];
+ }
+
+ static Register from_code(int code) {
+ Register r = { code };
+ return r;
+ }
+
+ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ DCHECK(is_valid());
+ return code_;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << code_;
+ }
+
+ void set_code(int code) {
+ code_ = code;
+ DCHECK(is_valid());
+ }
+
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
const Register no_reg = { kRegister_no_reg_Code };
const Register r0 = { kRegister_r0_Code };
@@ -152,6 +162,7 @@
const Register r4 = { kRegister_r4_Code };
const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
+// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
const Register r7 = { kRegister_r7_Code };
// Used as context register.
const Register r8 = { kRegister_r8_Code };
@@ -165,21 +176,21 @@
const Register lr = { kRegister_lr_Code };
const Register pc = { kRegister_pc_Code };
-
// Single word VFP register.
struct SwVfpRegister {
+ static const int kSizeInBytes = 4;
bool is_valid() const { return 0 <= code_ && code_ < 32; }
bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
int code() const {
- ASSERT(is_valid());
+ DCHECK(is_valid());
return code_;
}
int bit() const {
- ASSERT(is_valid());
+ DCHECK(is_valid());
return 1 << code_;
}
void split_code(int* vm, int* m) const {
- ASSERT(is_valid());
+ DCHECK(is_valid());
*m = code_ & 0x1;
*vm = code_ >> 1;
}
@@ -190,75 +201,46 @@
// Double word VFP register.
struct DwVfpRegister {
- static const int kNumRegisters = 16;
+ static const int kMaxNumRegisters = 32;
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static const int kNumReservedRegisters = 2;
- static const int kNumAllocatableRegisters = kNumRegisters -
+ static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
kNumReservedRegisters;
+ static const int kSizeInBytes = 8;
+
+ // Note: the number of registers can be different at snapshot and run-time.
+ // Any code included in the snapshot must be able to run both with 16 or 32
+ // registers.
+ inline static int NumRegisters();
+ inline static int NumReservedRegisters();
+ inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(DwVfpRegister reg);
-
- static DwVfpRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code(index);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "d0",
- "d1",
- "d2",
- "d3",
- "d4",
- "d5",
- "d6",
- "d7",
- "d8",
- "d9",
- "d10",
- "d11",
- "d12",
- "d13"
- };
- return names[index];
- }
+ static const char* AllocationIndexToString(int index);
+ inline static DwVfpRegister FromAllocationIndex(int index);
static DwVfpRegister from_code(int code) {
DwVfpRegister r = { code };
return r;
}
- // Supporting d0 to d15, can be later extended to d31.
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kMaxNumRegisters;
+ }
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- SwVfpRegister low() const {
- SwVfpRegister reg;
- reg.code_ = code_ * 2;
-
- ASSERT(reg.is_valid());
- return reg;
- }
- SwVfpRegister high() const {
- SwVfpRegister reg;
- reg.code_ = (code_ * 2) + 1;
-
- ASSERT(reg.is_valid());
- return reg;
- }
int code() const {
- ASSERT(is_valid());
+ DCHECK(is_valid());
return code_;
}
int bit() const {
- ASSERT(is_valid());
+ DCHECK(is_valid());
return 1 << code_;
}
void split_code(int* vm, int* m) const {
- ASSERT(is_valid());
+ DCHECK(is_valid());
*m = (code_ & 0x10) >> 4;
*vm = code_ & 0x0F;
}
@@ -270,6 +252,78 @@
typedef DwVfpRegister DoubleRegister;
+// Double word VFP register d0-15.
+struct LowDwVfpRegister {
+ public:
+ static const int kMaxNumLowRegisters = 16;
+ operator DwVfpRegister() const {
+ DwVfpRegister r = { code_ };
+ return r;
+ }
+ static LowDwVfpRegister from_code(int code) {
+ LowDwVfpRegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kMaxNumLowRegisters;
+ }
+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ DCHECK(is_valid());
+ return code_;
+ }
+ SwVfpRegister low() const {
+ SwVfpRegister reg;
+ reg.code_ = code_ * 2;
+
+ DCHECK(reg.is_valid());
+ return reg;
+ }
+ SwVfpRegister high() const {
+ SwVfpRegister reg;
+ reg.code_ = (code_ * 2) + 1;
+
+ DCHECK(reg.is_valid());
+ return reg;
+ }
+
+ int code_;
+};
+
+
+// Quad word NEON register.
+struct QwNeonRegister {
+ static const int kMaxNumRegisters = 16;
+
+ static QwNeonRegister from_code(int code) {
+ QwNeonRegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const {
+ return (0 <= code_) && (code_ < kMaxNumRegisters);
+ }
+ bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ DCHECK(is_valid());
+ return code_;
+ }
+ void split_code(int* vm, int* m) const {
+ DCHECK(is_valid());
+ int encoded_code = code_ << 1;
+ *m = (encoded_code & 0x10) >> 4;
+ *vm = encoded_code & 0x0F;
+ }
+
+ int code_;
+};
+
+
+typedef QwNeonRegister QuadRegister;
+
+
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
const SwVfpRegister s0 = { 0 };
@@ -306,22 +360,56 @@
const SwVfpRegister s31 = { 31 };
const DwVfpRegister no_dreg = { -1 };
-const DwVfpRegister d0 = { 0 };
-const DwVfpRegister d1 = { 1 };
-const DwVfpRegister d2 = { 2 };
-const DwVfpRegister d3 = { 3 };
-const DwVfpRegister d4 = { 4 };
-const DwVfpRegister d5 = { 5 };
-const DwVfpRegister d6 = { 6 };
-const DwVfpRegister d7 = { 7 };
-const DwVfpRegister d8 = { 8 };
-const DwVfpRegister d9 = { 9 };
-const DwVfpRegister d10 = { 10 };
-const DwVfpRegister d11 = { 11 };
-const DwVfpRegister d12 = { 12 };
-const DwVfpRegister d13 = { 13 };
-const DwVfpRegister d14 = { 14 };
-const DwVfpRegister d15 = { 15 };
+const LowDwVfpRegister d0 = { 0 };
+const LowDwVfpRegister d1 = { 1 };
+const LowDwVfpRegister d2 = { 2 };
+const LowDwVfpRegister d3 = { 3 };
+const LowDwVfpRegister d4 = { 4 };
+const LowDwVfpRegister d5 = { 5 };
+const LowDwVfpRegister d6 = { 6 };
+const LowDwVfpRegister d7 = { 7 };
+const LowDwVfpRegister d8 = { 8 };
+const LowDwVfpRegister d9 = { 9 };
+const LowDwVfpRegister d10 = { 10 };
+const LowDwVfpRegister d11 = { 11 };
+const LowDwVfpRegister d12 = { 12 };
+const LowDwVfpRegister d13 = { 13 };
+const LowDwVfpRegister d14 = { 14 };
+const LowDwVfpRegister d15 = { 15 };
+const DwVfpRegister d16 = { 16 };
+const DwVfpRegister d17 = { 17 };
+const DwVfpRegister d18 = { 18 };
+const DwVfpRegister d19 = { 19 };
+const DwVfpRegister d20 = { 20 };
+const DwVfpRegister d21 = { 21 };
+const DwVfpRegister d22 = { 22 };
+const DwVfpRegister d23 = { 23 };
+const DwVfpRegister d24 = { 24 };
+const DwVfpRegister d25 = { 25 };
+const DwVfpRegister d26 = { 26 };
+const DwVfpRegister d27 = { 27 };
+const DwVfpRegister d28 = { 28 };
+const DwVfpRegister d29 = { 29 };
+const DwVfpRegister d30 = { 30 };
+const DwVfpRegister d31 = { 31 };
+
+const QwNeonRegister q0 = { 0 };
+const QwNeonRegister q1 = { 1 };
+const QwNeonRegister q2 = { 2 };
+const QwNeonRegister q3 = { 3 };
+const QwNeonRegister q4 = { 4 };
+const QwNeonRegister q5 = { 5 };
+const QwNeonRegister q6 = { 6 };
+const QwNeonRegister q7 = { 7 };
+const QwNeonRegister q8 = { 8 };
+const QwNeonRegister q9 = { 9 };
+const QwNeonRegister q10 = { 10 };
+const QwNeonRegister q11 = { 11 };
+const QwNeonRegister q12 = { 12 };
+const QwNeonRegister q13 = { 13 };
+const QwNeonRegister q14 = { 14 };
+const QwNeonRegister q15 = { 15 };
+
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
@@ -337,11 +425,11 @@
bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(CRegister creg) const { return code_ == creg.code_; }
int code() const {
- ASSERT(is_valid());
+ DCHECK(is_valid());
return code_;
}
int bit() const {
- ASSERT(is_valid());
+ DCHECK(is_valid());
return 1 << code_;
}
@@ -399,7 +487,7 @@
public:
// immediate
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
+ RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(static Operand Zero()) {
return Operand(static_cast<int32_t>(0));
}
@@ -412,6 +500,17 @@
// rm <shift_op> shift_imm
explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+ INLINE(static Operand SmiUntag(Register rm)) {
+ return Operand(rm, ASR, kSmiTagSize);
+ }
+ INLINE(static Operand PointerOffsetFromSmiKey(Register key)) {
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
+ }
+ INLINE(static Operand DoubleOffsetFromSmiKey(Register key)) {
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
+ return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
+ }
// rm <shift_op> rs
explicit Operand(Register rm, ShiftOp shift_op, Register rs);
@@ -419,16 +518,22 @@
// Return true if this is a register operand.
INLINE(bool is_reg() const);
- // Return true if this operand fits in one instruction so that no
- // 2-instruction solution with a load into the ip register is necessary. If
+ // Return the number of actual instructions required to implement the given
+ // instruction for this particular operand. This can be a single instruction,
+ // if no load into the ip register is necessary, or anything between 2 and 4
+ // instructions when we need to load from the constant pool (depending upon
+ // whether the constant pool entry is in the small or extended section). If
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
- bool is_single_instruction(Instr instr = 0) const;
- bool must_use_constant_pool() const;
+ //
+ // The value returned is only valid as long as no entries are added to the
+ // constant pool between this call and the actual instruction being emitted.
+ int instructions_required(const Assembler* assembler, Instr instr = 0) const;
+ bool must_output_reloc_info(const Assembler* assembler) const;
inline int32_t immediate() const {
- ASSERT(!rm_.is_valid());
+ DCHECK(!rm_.is_valid());
return imm32_;
}
@@ -468,14 +573,20 @@
// [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
explicit MemOperand(Register rn, Register rm,
ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+ INLINE(static MemOperand PointerAddressFromSmiKey(Register array,
+ Register key,
+ AddrMode am = Offset)) {
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
+ }
void set_offset(int32_t offset) {
- ASSERT(rm_.is(no_reg));
+ DCHECK(rm_.is(no_reg));
offset_ = offset;
}
uint32_t offset() const {
- ASSERT(rm_.is(no_reg));
+ DCHECK(rm_.is(no_reg));
return offset_;
}
@@ -498,131 +609,94 @@
friend class Assembler;
};
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
+
+// Class NeonMemOperand represents a memory operand in load and
+// store NEON instructions
+class NeonMemOperand BASE_EMBEDDED {
public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
+ // [rn {:align}] Offset
+ // [rn {:align}]! PostIndex
+ explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- return (supported_ & (1u << f)) != 0;
- }
+ // [rn {:align}], rm PostIndex
+ explicit NeonMemOperand(Register rn, Register rm, int align = 0);
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const unsigned old_supported_;
- };
+ Register rn() const { return rn_; }
+ Register rm() const { return rm_; }
+ int align() const { return align_; }
private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_;
+ void SetAlignment(int align);
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+ Register rn_; // base
+ Register rm_; // register increment
+ int align_;
};
-extern const Instr kMovLrPc;
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-extern const Instr kBlxRegMask;
-extern const Instr kBlxRegPattern;
-extern const Instr kBlxIp;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
+// Class NeonListOperand represents a list of NEON registers
+class NeonListOperand BASE_EMBEDDED {
+ public:
+ explicit NeonListOperand(DoubleRegister base, int registers_count = 1);
+ DoubleRegister base() const { return base_; }
+ NeonListType type() const { return type_; }
+ private:
+ DoubleRegister base_;
+ NeonListType type_;
+};
+// Class used to build a constant pool.
+class ConstantPoolBuilder BASE_EMBEDDED {
+ public:
+ ConstantPoolBuilder();
+ ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
+ const RelocInfo& rinfo);
+ void Relocate(int pc_delta);
+ bool IsEmpty();
+ Handle<ConstantPoolArray> New(Isolate* isolate);
+ void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
+
+ inline ConstantPoolArray::LayoutSection current_section() const {
+ return current_section_;
+ }
+
+ inline ConstantPoolArray::NumberOfEntries* number_of_entries(
+ ConstantPoolArray::LayoutSection section) {
+ return &number_of_entries_[section];
+ }
+
+ inline ConstantPoolArray::NumberOfEntries* small_entries() {
+ return number_of_entries(ConstantPoolArray::SMALL_SECTION);
+ }
+
+ inline ConstantPoolArray::NumberOfEntries* extended_entries() {
+ return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
+ }
+
+ private:
+ struct ConstantPoolEntry {
+ ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
+ int merged_index)
+ : rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
+
+ RelocInfo rinfo_;
+ ConstantPoolArray::LayoutSection section_;
+ int merged_index_;
+ };
+
+ ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
+
+ std::vector<ConstantPoolEntry> entries_;
+ ConstantPoolArray::LayoutSection current_section_;
+ ConstantPoolArray::NumberOfEntries number_of_entries_[2];
+};
+
+struct VmovIndex {
+ unsigned char index;
+};
+const VmovIndex VmovIndexLo = { 0 };
+const VmovIndex VmovIndexHi = { 1 };
class Assembler : public AssemblerBase {
public:
@@ -640,10 +714,7 @@
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ virtual ~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -672,27 +743,51 @@
// Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed);
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
+ // Returns true if the given pc address is the start of a constant pool load
+ // instruction sequence.
+ INLINE(static bool is_constant_pool_load(Address pc));
// Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc.
- INLINE(static Address target_address_address_at(Address pc));
+ // the branch/call instruction at pc, or the object in a mov.
+ INLINE(static Address constant_pool_entry_address(
+ Address pc, ConstantPoolArray* constant_pool));
// Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool));
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED));
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ }
+
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ INLINE(static Address target_address_from_return_address(Address pc));
+
+ // Given the address of the beginning of a call, return the address
+ // in the instruction stream that the call will return from.
+ INLINE(static Address return_address_from_call_start(Address pc));
+
+ // Return the code target address of the patch debug break slot
+ INLINE(static Address break_address_from_return_address(Address pc));
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Address target);
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address constant_pool_entry,
- Address target);
+ Address constant_pool_entry, Code* code, Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
@@ -702,49 +797,21 @@
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
- // Distance between the instruction referring to the address of the call
- // target and the return address.
-#ifdef USE_BLX
- // Call sequence is:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
- static const int kCallTargetAddressOffset = 2 * kInstrSize;
-#else
- // Call sequence is:
- // mov lr, pc
- // ldr pc, [pc, #...] @ call address
- // @ return address
- static const int kCallTargetAddressOffset = kInstrSize;
-#endif
-
// Distance between start of patched return sequence and the emitted address
// to jump to.
-#ifdef USE_BLX
// Patched return sequence is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
-#else
- // Patched return sequence is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
-#endif
// Distance between start of patched debug break slot and the emitted address
// to jump to.
-#ifdef USE_BLX
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-#else
- // Patched debug break slot code is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
-#endif
+
+ static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
@@ -843,11 +910,13 @@
mov(dst, Operand(src), s, cond);
}
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+ void mov_label_offset(Register dst, Label* label);
+
// ARMv7 instructions for loading a 32 bit immediate in two instructions.
- // This may actually emit a different mov instruction, but on an ARMv7 it
- // is guaranteed to only emit one instruction.
+ // The constant for movw and movt should be in the range 0-0xffff.
void movw(Register reg, uint32_t immediate, Condition cond = al);
- // The constant for movt should be in the range 0-0xffff.
void movt(Register reg, uint32_t immediate, Condition cond = al);
void bic(Register dst, Register src1, const Operand& src2,
@@ -856,11 +925,48 @@
void mvn(Register dst, const Operand& src,
SBit s = LeaveCC, Condition cond = al);
+ // Shift instructions
+
+ void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al) {
+ if (src2.is_reg()) {
+ mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
+ } else {
+ mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
+ }
+ }
+
+ void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al) {
+ if (src2.is_reg()) {
+ mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
+ } else {
+ mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
+ }
+ }
+
+ void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al) {
+ if (src2.is_reg()) {
+ mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
+ } else {
+ mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
+ }
+ }
+
// Multiply instructions
void mla(Register dst, Register src1, Register src2, Register srcA,
SBit s = LeaveCC, Condition cond = al);
+ void mls(Register dst, Register src1, Register src2, Register srcA,
+ Condition cond = al);
+
+ void sdiv(Register dst, Register src1, Register src2,
+ Condition cond = al);
+
+ void udiv(Register dst, Register src1, Register src2, Condition cond = al);
+
void mul(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al);
@@ -912,6 +1018,19 @@
void bfi(Register dst, Register src, int lsb, int width,
Condition cond = al);
+ void pkhbt(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+
+ void pkhtb(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+
+ void uxtb(Register dst, const Operand& src, Condition cond = al);
+
+ void uxtab(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+
+ void uxtb16(Register dst, const Operand& src, Condition cond = al);
+
// Status register access instructions
void mrs(Register dst, SRegister s, Condition cond = al);
@@ -933,6 +1052,9 @@
Register src2,
const MemOperand& dst, Condition cond = al);
+ // Preload instructions
+ void pld(const MemOperand& address);
+
// Load/Store multiple instructions
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
@@ -982,10 +1104,7 @@
LFlag l = Short); // v5 and above
// Support for VFP.
- // All these APIs support S0 to S31 and D0 to D15.
- // Currently these APIs do not support extended D registers, i.e, D16 to D31.
- // However, some simple modifications can allow
- // these APIs to support D16 to D31.
+ // All these APIs support S0 to S31 and D0 to D31.
void vldr(const DwVfpRegister dst,
const Register base,
@@ -1045,7 +1164,7 @@
void vmov(const DwVfpRegister dst,
double imm,
- const Condition cond = al);
+ const Register scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
@@ -1053,6 +1172,14 @@
const DwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
+ const VmovIndex index,
+ const Register src,
+ const Condition cond = al);
+ void vmov(const Register dst,
+ const VmovIndex index,
+ const DwVfpRegister src,
+ const Condition cond = al);
+ void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
const Condition cond = al);
@@ -1094,6 +1221,9 @@
const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
+ void vcvt_f64_s32(const DwVfpRegister dst,
+ int fraction_bits,
+ const Condition cond = al);
void vneg(const DwVfpRegister dst,
const DwVfpRegister src,
@@ -1113,6 +1243,14 @@
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
+ void vmla(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vmls(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@@ -1131,6 +1269,17 @@
const DwVfpRegister src,
const Condition cond = al);
+ // Support for NEON.
+ // All these APIs support D0 to D31 and Q0 to Q15.
+
+ void vld1(NeonSize size,
+ const NeonListOperand& dst,
+ const NeonMemOperand& src);
+ void vst1(NeonSize size,
+ const NeonListOperand& src,
+ const NeonMemOperand& dst);
+ void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
+
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain
@@ -1175,7 +1324,10 @@
}
// Check whether an immediate fits an addressing mode 1 instruction.
- bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+ static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+
+ // Check whether an immediate fits an addressing mode 2 instruction.
+ bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
// Class for scoping postponing the constant pool generation.
class BlockConstPoolScope {
@@ -1203,22 +1355,41 @@
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
- void SetRecordedAstId(unsigned ast_id) {
- ASSERT(recorded_ast_id_ == kNoASTId);
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ DCHECK(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
- unsigned RecordedAstId() {
- ASSERT(recorded_ast_id_ != kNoASTId);
+ TypeFeedbackId RecordedAstId() {
+ DCHECK(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
- void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
+ void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
+ // Record the emission of a constant pool.
+ //
+ // The emission of constant pool depends on the size of the code generated and
+ // the number of RelocInfo recorded.
+ // The Debug mechanism needs to map code offsets between two versions of a
+ // function, compiled with and without debugger support (see for example
+ // Debug::PrepareForBreakPoints()).
+ // Compiling functions with debugger support generates additional code
+ // (DebugCodegen::GenerateSlot()). This may affect the emission of the
+ // constant pools and cause the version of the code with debugger support to
+ // have constant pools generated in different places.
+ // Recording the position and size of emitted constant pools allows to
+ // correctly compute the offset mappings between the different versions of a
+ // function in all situations.
+ //
+ // The parameter indicates the size of the constant pool (in bytes), including
+ // the marker and branch over the data.
+ void RecordConstPool(int size);
+
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables. The constant pool should be
// emitted before any use of db and dd to ensure that constant pools
@@ -1226,7 +1397,8 @@
void db(uint8_t data);
void dd(uint32_t data);
- int pc_offset() const { return pc_ - buffer_; }
+ // Emits the address of the code stub's first instruction.
+ void emit_code_stub_address(Code* stub);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
@@ -1243,8 +1415,17 @@
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
+ static bool IsVldrDRegisterImmediate(Instr instr);
+ static Instr GetConsantPoolLoadPattern();
+ static Instr GetConsantPoolLoadMask();
+ static bool IsLdrPpRegOffset(Instr instr);
+ static Instr GetLdrPpRegOffsetPattern();
+ static bool IsLdrPpImmediateOffset(Instr instr);
+ static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
+ static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+ static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
static bool IsStrRegisterImmediate(Instr instr);
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
static bool IsAddRegisterImmediate(Instr instr);
@@ -1259,18 +1440,35 @@
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
+ static bool IsVldrDPcImmediateOffset(Instr instr);
+ static bool IsBlxReg(Instr instr);
+ static bool IsBlxIp(Instr instr);
static bool IsTstImmediate(Instr instr);
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
+ static bool IsMovImmed(Instr instr);
+ static bool IsOrrImmed(Instr instr);
+ static bool IsMovT(Instr instr);
+ static Instr GetMovTPattern();
+ static bool IsMovW(Instr instr);
+ static Instr GetMovWPattern();
+ static Instr EncodeMovwImmediate(uint32_t immediate);
+ static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
+ static int DecodeShiftImm(Instr instr);
+ static Instr PatchShiftImm(Instr instr, int immed);
// Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant.
- static const int kMaxDistToPool = 4*KB;
- static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
+ // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
+ // PC-relative loads, thereby defining a maximum distance between the
+ // instruction and the accessed constant.
+ static const int kMaxDistToIntPool = 4*KB;
+ static const int kMaxDistToFPPool = 1*KB;
+ // All relocations could be integer, it therefore acts as the limit.
+ static const int kMaxNumPending32RelocInfo = kMaxDistToIntPool/kInstrSize;
+ static const int kMaxNumPending64RelocInfo = kMaxDistToFPPool/kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -1279,13 +1477,25 @@
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ bool is_constant_pool_available() const { return constant_pool_available_; }
+
+ bool use_extended_constant_pool() const {
+ return constant_pool_builder_.current_section() ==
+ ConstantPoolArray::EXTENDED_SECTION;
+ }
+
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
- unsigned recorded_ast_id_;
-
- bool emit_debug_code() const { return emit_debug_code_; }
+ TypeFeedbackId recorded_ast_id_;
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1310,9 +1520,16 @@
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
+#ifdef DEBUG
+ // Max pool start (if we need a jump and an alignment).
+ int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+ DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
+ (start + num_pending_64_bit_reloc_info_ * kDoubleSize <
+ (first_const_pool_32_use_ + kMaxDistToIntPool)));
+ DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
+ (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
+#endif
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
@@ -1327,14 +1544,11 @@
(pc_offset() < no_const_pool_before_);
}
- private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
+ void set_constant_pool_available(bool available) {
+ constant_pool_available_ = available;
+ }
+ private:
int next_buffer_check_; // pc offset of next buffer check
// Code generation
@@ -1343,7 +1557,6 @@
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
- byte* pc_; // the program counter; moves forward
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
@@ -1363,20 +1576,14 @@
static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
- // Average distance beetween a constant pool and the first instruction
- // accessing the constant pool. Longer distance should result in less I-cache
- // pollution.
- // In practice the distance will be smaller since constant pool emission is
- // forced after function return and sometimes after unconditional branches.
- static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
-
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
- int first_const_pool_use_;
+ int first_const_pool_32_use_;
+ int first_const_pool_64_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
@@ -1390,19 +1597,33 @@
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- // the buffer of pending relocation info
- RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
- // number of pending reloc info entries in the buffer
- int num_pending_reloc_info_;
+ // The buffers of pending relocation info.
+ RelocInfo pending_32_bit_reloc_info_[kMaxNumPending32RelocInfo];
+ RelocInfo pending_64_bit_reloc_info_[kMaxNumPending64RelocInfo];
+ // Number of pending reloc info entries in the 32 bits buffer.
+ int num_pending_32_bit_reloc_info_;
+ // Number of pending reloc info entries in the 64 bits buffer.
+ int num_pending_64_bit_reloc_info_;
+
+ ConstantPoolBuilder constant_pool_builder_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Indicates whether the constant pool can be accessed, which is only possible
+ // if the pp register points to the current code object's constant pool.
+ bool constant_pool_available_;
+
// Code emission
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
+ // 32-bit immediate values
+ void move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond = al);
+
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
void addrmod2(Instr instr, Register rd, const MemOperand& x);
@@ -1413,19 +1634,25 @@
// Labels
void print(Label* L);
void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
void next(Label* L);
+ enum UseConstantPoolMode {
+ USE_CONSTANT_POOL,
+ DONT_USE_CONSTANT_POOL
+ };
+
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void RecordRelocInfo(const RelocInfo& rinfo);
+ ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo);
- friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
+ friend class FrameAndConstantPoolScope;
+ friend class ConstantPoolUnavailableScope;
PositionsRecorder positions_recorder_;
- bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index c99e778..9d1a72a 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1,39 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
@@ -62,7 +39,7 @@
num_extra_args = 1;
__ push(r1);
} else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects r0 to contain the number of arguments
@@ -75,12 +52,13 @@
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the global context.
+ // Load the native context.
- __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the InternalArray function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
__ ldr(result,
MemOperand(result,
Context::SlotOffset(
@@ -90,384 +68,19 @@
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
+ // Load the native context.
- __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the Array function from the native context.
__ ldr(result,
MemOperand(result,
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, Operand(0, RelocInfo::NONE));
- __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
- if (initial_capacity == 0) {
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ add(scratch1, result, Operand(JSArray::kSize));
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array (untagged)
- // scratch2: start of next object
- __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
- STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- for (int i = 0; i < initial_capacity; i++) {
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- }
- } else {
- Label loop, entry;
- __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
- __ b(&entry);
- __ bind(&loop);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(scratch1, scratch2);
- __ b(lt, &loop);
- }
-}
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array_storage,
- Register elements_array_end,
- Register scratch1,
- Register scratch2,
- bool fill_with_hole,
- Label* gc_required) {
- // Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ tst(array_size, array_size);
- __ Assert(ne, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested number of elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ mov(elements_array_end,
- Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
- __ add(elements_array_end,
- elements_array_end,
- Operand(array_size, ASR, kSmiTagSize));
- __ AllocateInNewSpace(
- elements_array_end,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array_storage: initial map
- // array_size: size of array (smi)
- __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // array_size: size of array (smi)
- __ add(elements_array_storage, result, Operand(JSArray::kSize));
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ sub(elements_array_storage,
- elements_array_storage,
- Operand(kHeapObjectTag));
- // Initialize the fixed array and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // elements_array_storage: elements array (untagged)
- // array_size: size of array (smi)
- __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ str(array_size,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
-
- // Calculate elements array and elements array end.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // array_size: smi-tagged size of elements array
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(elements_array_end,
- elements_array_storage,
- Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
- __ jmp(&entry);
- __ bind(&loop);
- __ str(scratch1,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(elements_array_storage, elements_array_end);
- __ b(lt, &loop);
- }
-}
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// r0: argc
-// r1: constructor (built-in Array function)
-// lr: return address
-// sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in r1 needs to be preserved for
-// entering the generic code. In both cases argc in r0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments or one.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(ne, &argc_one_or_more);
-
- // Handle construction of an empty array.
- __ bind(&empty_array);
- AllocateEmptyJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
- // Set up return value, remove receiver from stack and return.
- __ mov(r0, r2);
- __ add(sp, sp, Operand(kPointerSize));
- __ Jump(lr);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(r0, Operand(1));
- __ b(ne, &argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
- __ tst(r2, r2);
- __ b(ne, ¬_empty_array);
- __ Drop(1); // Adjust stack.
- __ mov(r0, Operand(0)); // Treat this as a call with argc of zero.
- __ b(&empty_array);
-
- __ bind(¬_empty_array);
- __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
- __ b(ne, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is too large to actually allocate an elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
- __ b(ge, call_generic_code);
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
- // Set up return value, remove receiver and argument from stack and return.
- __ mov(r0, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Jump(lr);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: last argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r6);
-
- // Fill arguments as array elements. Copy from the top of the stack (last
- // element) to the array backing store filling it backwards. Note:
- // elements_array_end points after the backing store therefore PreIndex is
- // used when filling the backing store.
- // r0: argc
- // r3: JSArray
- // r4: elements_array storage start (untagged)
- // r5: elements_array_end (untagged)
- // sp[0]: last argument
- Label loop, entry;
- __ mov(r7, sp);
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r2, &has_non_smi_element);
- }
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ cmp(r4, r5);
- __ b(lt, &loop);
-
- __ bind(&finish);
- __ mov(sp, r7);
-
- // Remove caller arguments and receiver from the stack, setup return value and
- // return.
- // r0: argc
- // r3: JSArray
- // sp[0]: receiver
- __ add(sp, sp, Operand(kPointerSize));
- __ mov(r0, r3);
- __ Jump(lr);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(
- r2, r9, Heap::kHeapNumberMapRootIndex, ¬_double, DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(r3, r4);
- __ b(call_generic_code);
-
- __ bind(¬_double);
- // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
- // r3: JSArray
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- r2,
- r9,
- &cant_transition_map);
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ RecordWriteField(r3,
- HeapObject::kMapOffset,
- r2,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- Label loop2;
- __ sub(r7, r7, Operand(kPointerSize));
- __ bind(&loop2);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ cmp(r4, r5);
- __ b(lt, &loop2);
- __ b(&finish);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -482,23 +95,17 @@
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for InternalArray function");
+ __ SmiTst(r2);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for InternalArray function");
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -516,54 +123,17 @@
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
+ __ SmiTst(r2);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -582,12 +152,12 @@
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r2);
__ cmp(function, Operand(r2));
- __ Assert(eq, "Unexpected String function");
+ __ Assert(eq, kUnexpectedStringFunction);
}
// Load the first arguments in r0 and get rid of the rest.
Label no_arguments;
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
__ b(eq, &no_arguments);
// First args = sp[(argc - 1) * 4].
__ sub(r0, r0, Operand(1));
@@ -597,15 +167,12 @@
Register argument = r2;
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- r0, // Input.
- argument, // Result.
- r3, // Scratch.
- r4, // Scratch.
- r5, // Scratch.
- false, // Is it a Smi?
- ¬_cached);
+ __ LookupNumberStringCache(r0, // Input.
+ argument, // Result.
+ r3, // Scratch.
+ r4, // Scratch.
+ r5, // Scratch.
+ ¬_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
@@ -616,12 +183,12 @@
// -----------------------------------
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- r0, // Result.
- r3, // Scratch.
- r4, // Scratch.
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ r0, // Result.
+ r3, // Scratch.
+ r4, // Scratch.
+ &gc_required,
+ TAG_OBJECT);
// Initialising the String Object.
Register map = r3;
@@ -629,10 +196,10 @@
if (FLAG_debug_code) {
__ ldrb(r4, FieldMemOperand(map, Map::kInstanceSizeOffset));
__ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(eq, "Unexpected string wrapper instance size");
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
__ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmp(r4, Operand(0, RelocInfo::NONE));
- __ Assert(eq, "Unexpected unused properties of string wrapper");
+ __ cmp(r4, Operand::Zero());
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
}
__ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -668,7 +235,7 @@
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
@@ -679,7 +246,7 @@
// Load the empty string into r2, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
__ Drop(1);
__ b(&argument_is_string);
@@ -688,7 +255,7 @@
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
@@ -696,27 +263,80 @@
}
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push function as parameter to the runtime call.
+ __ Push(r1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore receiver.
+ __ pop(r1);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r0);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- r2 : allocation site or undefined
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ DCHECK(!is_api_function || !create_memento);
Isolate* isolate = masm->isolate();
// Enter a construct frame.
{
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(r2, r3);
+ __ push(r2);
+ }
// Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ SmiTag(r0);
__ push(r0); // Smi-tagged arguments count.
__ push(r1); // Constructor function.
@@ -725,14 +345,12 @@
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2));
__ tst(r2, r2);
__ b(ne, &rt_call);
-#endif
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
@@ -749,21 +367,23 @@
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
- if (count_constructions) {
+ if (!is_api_function) {
Label allocate;
+ MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ ldr(r4, bit_field3);
+ __ DecodeField<Map::ConstructionCount>(r3, r4);
+ __ cmp(r3, Operand(JSFunction::kNoSlackTracking));
+ __ b(eq, &allocate);
// Decrease generous allocation count.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
- __ ldrb(r4, constructor_count);
- __ sub(r4, r4, Operand(1), SetCC);
- __ strb(r4, constructor_count);
+ __ sub(r4, r4, Operand(1 << Map::ConstructionCount::kShift));
+ __ str(r4, bit_field3);
+ __ cmp(r3, Operand(JSFunction::kFinishSlackTracking));
__ b(ne, &allocate);
- __ Push(r1, r2);
+ __ push(r1);
- __ push(r1); // constructor
- // The call will replace the stub, so the countdown is only done once.
+ __ Push(r2, r1); // r1 = constructor
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ pop(r2);
@@ -776,47 +396,82 @@
// r1: constructor function
// r2: initial map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+ if (create_memento) {
+ __ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
+ __ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
- // r3: object size
+ // r3: object size (not including memento if create_memento)
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+ DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
// Fill all the in-object properties with the appropriate filler.
// r1: constructor function
// r2: initial map
- // r3: object size (in words)
+ // r3: object size (in words, including memento if create_memento)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
+ DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ ldr(ip, FieldMemOperand(r2, Map::kBitField3Offset));
+ __ DecodeField<Map::ConstructionCount>(ip);
+ __ cmp(ip, Operand(JSFunction::kNoSlackTracking));
+ __ b(eq, &no_inobject_slack_tracking);
+
+ // Allocate object with a slack.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmp(r0, r6);
- __ Assert(le, "Unexpected number of pre-allocated property fields.");
+ __ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ cmp(r0, ip);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
- __ InitializeFieldsWithFiller(r5, r0, r7);
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
- __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
+ __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(r5, r6, r7);
+
+ if (create_memento) {
+ __ sub(ip, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ add(r0, r4, Operand(ip, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+
+ // Fill in memento fields.
+ // r5: points to the allocated but uninitialized memento.
+ __ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
+ DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ // Load the AllocationSite
+ __ ldr(r6, MemOperand(sp, 2 * kPointerSize));
+ DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ } else {
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+ }
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -842,7 +497,7 @@
// Done if no extra properties are to be allocated.
__ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
+ __ Assert(pl, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
@@ -851,7 +506,7 @@
// r4: JSObject
// r5: start of next object
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
+ __ Allocate(
r0,
r5,
r6,
@@ -866,10 +521,10 @@
// r5: FixedArray (not tagged)
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
__ mov(r2, r5);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ SmiTag(r0, r3);
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
// Initialize the fields to undefined.
@@ -879,18 +534,12 @@
// r4: JSObject
// r5: FixedArray (not tagged)
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
- }
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
__ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
@@ -920,13 +569,47 @@
// Allocate the new receiver object using the runtime call.
// r1: constructor function
__ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+ __ push(r2);
+ }
+
__ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 1);
+ }
__ mov(r4, r0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
+
+ if (create_memento) {
+ __ ldr(r2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ cmp(r2, r5);
+ __ b(eq, &count_incremented);
+ // r2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ ldr(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ add(r3, r3, Operand(Smi::FromInt(1)));
+ __ str(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
__ push(r4);
__ push(r4);
@@ -942,7 +625,7 @@
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Set up number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+ __ SmiUntag(r0, r3);
// Copy arguments and receiver to the expression stack.
// r0: number of arguments
@@ -969,17 +652,14 @@
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -1004,7 +684,7 @@
// If the type of the result (stored in its map) is less than
// FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &exit);
// Throw away the result of the constructor invocation and use the
@@ -1031,13 +711,8 @@
}
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
@@ -1054,10 +729,11 @@
// r2: receiver
// r3: argc
// r4: argv
- // r5-r7, cp may be clobbered
+ // r5-r6, r8 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
- __ mov(cp, Operand(0, RelocInfo::NONE));
+ __ mov(cp, Operand::Zero());
// Enter an internal frame.
{
@@ -1093,7 +769,9 @@
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
+ if (!FLAG_enable_ool_constant_pool) {
+ __ mov(r8, Operand(r4));
+ }
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
@@ -1101,12 +779,13 @@
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ // No type feedback cell is available
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Exit the JS frame and remove the parameters (except function), and
// return.
@@ -1128,68 +807,139 @@
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push function as parameter to the runtime call.
+ __ Push(r1);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+ // Restore receiver.
+ __ pop(r1);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r0 - contains return address (beginning of patch sequence)
+ // r1 - isolate
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ PrepareCallCFunction(2, 0, r2);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ mov(pc, r0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r0 - contains return address (beginning of patch sequence)
+ // r1 - isolate
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ PrepareCallCFunction(2, 0, r2);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
+ masm->isolate()), 2);
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ PushFixedFrame(r1);
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Jump to point after the code-age stub.
+ __ add(r0, r0, Operand(kNoCodeAgeSequenceLength));
+ __ mov(pc, r0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
- // Do a tail-call of the compiled function.
- __ Jump(r2);
+ __ add(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ mov(pc, lr); // Jump to miss handler
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
@@ -1223,58 +973,74 @@
}
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ Ret();
-}
-
-
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CpuFeatures::IsSupported(VFP3)) {
- __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
- return;
- }
-
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
+ // Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
+ // If the code object is null, just return to the unoptimized code.
Label skip;
- __ cmp(r0, Operand(Smi::FromInt(-1)));
+ __ cmp(r0, Operand(Smi::FromInt(0)));
__ b(ne, &skip);
__ Ret();
__ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(r0);
- __ push(r0);
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
+
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ if (FLAG_enable_ool_constant_pool) {
+ __ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
+ }
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r0, r0, Operand::SmiUntag(r1));
+ __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+ }
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ Ret();
}
@@ -1282,7 +1048,7 @@
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
{ Label done;
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ b(ne, &done);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ push(r2);
@@ -1303,8 +1069,8 @@
// r0: actual number of arguments
// r1: function
Label shift_arguments;
- __ mov(r4, Operand(0, RelocInfo::NONE)); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_receiver, patch_receiver;
+ __ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION
+ { Label convert_to_object, use_global_proxy, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@@ -1319,7 +1085,7 @@
__ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
@@ -1329,10 +1095,10 @@
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r3);
- __ b(eq, &use_global_receiver);
+ __ b(eq, &use_global_proxy);
__ LoadRoot(r3, Heap::kNullValueRootIndex);
__ cmp(r2, r3);
- __ b(eq, &use_global_receiver);
+ __ b(eq, &use_global_proxy);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
@@ -1342,8 +1108,8 @@
{
// Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r0);
__ push(r0);
__ push(r2);
@@ -1351,25 +1117,19 @@
__ mov(r2, r0);
__ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ SmiUntag(r0);
// Exit the internal frame.
}
// Restore the function to r1, and the flag to r4.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ mov(r4, Operand(0, RelocInfo::NONE));
+ __ mov(r4, Operand::Zero());
__ jmp(&patch_receiver);
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ bind(&use_global_proxy);
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
__ bind(&patch_receiver);
__ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -1380,11 +1140,11 @@
// 3b. Check for function proxy.
__ bind(&slow);
- __ mov(r4, Operand(1, RelocInfo::NONE)); // indicate function proxy
+ __ mov(r4, Operand(1, RelocInfo::NONE32)); // indicate function proxy
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(eq, &shift_arguments);
__ bind(&non_function);
- __ mov(r4, Operand(2, RelocInfo::NONE)); // indicate non-function
+ __ mov(r4, Operand(2, RelocInfo::NONE32)); // indicate non-function
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -1428,19 +1188,18 @@
__ tst(r4, r4);
__ b(eq, &function);
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
+ __ mov(r2, Operand::Zero());
__ cmp(r4, Operand(1));
__ b(ne, &non_proxy);
__ push(r1); // re-add proxy object as additional argument
__ add(r0, r0, Operand(1));
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@@ -1454,29 +1213,29 @@
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ SetCallKind(r5, CALL_AS_METHOD);
+ __ SmiUntag(r2);
__ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET,
ne);
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(0);
- __ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(r3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@@ -1493,20 +1252,19 @@
// here which will cause r2 to become negative.
__ sub(r2, sp, r2);
// Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ cmp(r2, Operand::PointerOffsetFromSmiKey(r0));
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ Push(r1, r0);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// End of stack check.
// Push current limit and index.
__ bind(&okay);
__ push(r0); // limit
- __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
+ __ mov(r1, Operand::Zero()); // initial index
__ push(r1);
// Get the receiver.
@@ -1525,7 +1283,7 @@
// Compute the receiver.
// Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
+ Label call_to_object, use_global_proxy;
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
@@ -1535,14 +1293,14 @@
__ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
+ __ b(eq, &use_global_proxy);
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
+ __ b(eq, &use_global_proxy);
// Check if the receiver is already a JavaScript object.
// r0: receiver
@@ -1557,14 +1315,9 @@
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ b(&push_receiver);
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+ __ bind(&use_global_proxy);
+ __ ldr(r0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalProxyOffset));
// Push the receiver.
// r0: receiver
@@ -1581,8 +1334,7 @@
// r0: current argument index
__ bind(&loop);
__ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
+ __ Push(r1, r0);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
@@ -1600,27 +1352,25 @@
__ cmp(r0, r1);
__ b(ne, &loop);
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ SmiUntag(r0);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
__ push(r1); // add function proxy as last argument
__ add(r0, r0, Operand(1));
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ mov(r2, Operand::Zero());
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1631,11 +1381,34 @@
}
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- r0 : actual number of arguments
+ // -- r1 : function (passed through to callee)
+ // -- r2 : expected number of arguments
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+ // Make r5 the space we have left. The stack might already be overflowed
+ // here which will cause r5 to become negative.
+ __ sub(r5, sp, r5);
+ // Check if the arguments will overflow the stack.
+ __ cmp(r5, Operand(r2, LSL, kPointerSizeLog2));
+ __ b(le, stack_overflow); // Signed comparison.
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(3 * kPointerSize));
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
+ (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
+ __ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1645,10 +1418,11 @@
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ ldr(r1, MemOperand(fp, -3 * kPointerSize));
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
+
+ __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
+ __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}
@@ -1658,13 +1432,14 @@
// -- r0 : actual number of arguments
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
- // -- r3 : code entry to call
- // -- r5 : call kind information
// -----------------------------------
+ Label stack_overflow;
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
Label invoke, dont_adapt_arguments;
Label enough, too_few;
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ cmp(r0, r2);
__ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -1679,7 +1454,7 @@
// r1: function
// r2: expected number of arguments
// r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// adjust for return address and receiver
__ add(r0, r0, Operand(2 * kPointerSize));
__ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
@@ -1710,7 +1485,7 @@
// r1: function
// r2: expected number of arguments
// r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// Copy the arguments (including the receiver) to the new stack frame.
// r0: copy start address
@@ -1732,7 +1507,9 @@
// r3: code entry to call
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
- __ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
+ // Adjust for frame.
+ __ sub(r2, r2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
Label fill;
__ bind(&fill);
@@ -1758,6 +1535,14 @@
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ Jump(r3);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bkpt(0);
+ }
}
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index f772db9..25270d1 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1,1028 +1,245 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
+#include "src/base/bits.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate, CodeStubDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kArrayConstructor)->entry;
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ }
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate, CodeStubDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kInternalArrayConstructor)->entry;
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ }
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
#define __ ACCESS_MASM(masm)
+
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan);
+ Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
Label* lhs_not_nan,
Label* slow,
bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs);
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
- Register scratch1, Register scratch2,
- Label* not_a_heap_number) {
- __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
- __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, scratch2);
- __ b(ne, not_a_heap_number);
-}
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
+ // Update the static counter each time a new code stub is generated.
+ isolate()->counters()->code_stubs()->Increment();
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(r0, &check_heap_number);
- __ Ret();
-
- __ bind(&check_heap_number);
- EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
- __ Ret();
-
- __ bind(&call_builtin);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(r3);
-
- // Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(r4, Heap::kFalseValueRootIndex);
- __ Push(cp, r3, r4);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(r1, Operand(Smi::FromInt(0)));
- __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ pop();
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0, r1, r2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the global context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(r3, &after_sentinel);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmp(r3, Operand::Zero());
- __ Assert(eq, message);
- }
- __ ldr(r3, GlobalObjectOperand());
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
- __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
- __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
- __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- Label* fail) {
- // Registers on entry:
- //
- // r3: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize + elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- fail,
- TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ DCHECK(param_count == 0 ||
+ r0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor.GetEnvironmentParameterRegister(i));
}
+ __ CallExternalReference(miss, param_count);
}
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
- }
+ __ Ret();
}
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done;
+ Register input_reg = source();
+ Register result_reg = destination();
+ DCHECK(is_truncating());
- FastCloneShallowArrayStub::Mode mode = mode_;
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
- __ b(ne, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ int double_offset = offset();
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
- __ bind(&check_fast_elements);
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
+ Register scratch_low =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch_high =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
+ __ Push(scratch_high, scratch_low, scratch);
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
+ if (!skip_fastpath()) {
+ // Load double input.
+ __ vldr(double_scratch, MemOperand(input_reg, double_offset));
+ __ vmov(scratch_low, scratch_high, double_scratch);
+
+ // Do fast-path convert from double to int.
+ __ vcvt_s32_f64(double_scratch.low(), double_scratch);
+ __ vmov(result_reg, double_scratch.low());
+
+ // If result is not saturated (0x7fffffff or 0x80000000), we are done.
+ __ sub(scratch, result_reg, Operand(1));
+ __ cmp(scratch, Operand(0x7ffffffe));
+ __ b(lt, &done);
+ } else {
+ // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
+ // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
+ if (double_offset == 0) {
+ __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
} else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ __ ldr(scratch_low, MemOperand(input_reg, double_offset));
+ __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
}
- __ push(r3);
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ CompareRoot(r3, expected_map_index);
- __ Assert(eq, message);
- __ pop(r3);
}
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+ __ Ubfx(scratch, scratch_high,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // Load scratch with exponent - 1. This is faster than loading
+ // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
+ STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
+ __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
+ // If exponent is greater than or equal to 84, the 32 less significant
+ // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
+ // the result is 0.
+ // Compare exponent with 84 (compare exponent - 1 with 83).
+ __ cmp(scratch, Operand(83));
+ __ b(ge, &out_of_range);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ // If we reach this code, 31 <= exponent <= 83.
+ // So, we don't have to handle cases where 0 <= exponent <= 20 for
+ // which we would need to shift right the high part of the mantissa.
+ // Scratch contains exponent - 1.
+ // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
+ __ rsb(scratch, scratch, Operand(51), SetCC);
+ __ b(ls, &only_low);
+ // 21 <= exponent <= 51, shift scratch_low and scratch_high
+ // to generate the result.
+ __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
+ // Scratch contains: 52 - exponent.
+ // We needs: exponent - 20.
+ // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
+ __ rsb(scratch, scratch, Operand(32));
+ __ Ubfx(result_reg, scratch_high,
+ 0, HeapNumber::kMantissaBitsInTopWord);
+ // Set the implicit 1 before the mantissa part in scratch_high.
+ __ orr(result_reg, result_reg,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
+ __ b(&negate);
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: object literal flags.
- // [sp + kPointerSize]: constant properties.
- // [sp + (2 * kPointerSize)]: literal index.
- // [sp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
- __ cmp(r0, Operand(size >> kPointerSizeLog2));
- __ b(ne, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
- public:
- ConvertToDoubleStub(Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return ConvertToDouble; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
- Register exponent = result1_;
- Register mantissa = result2_;
-
- Label not_special;
- // Convert from Smi to integer.
- __ mov(source_, Operand(source_, ASR, kSmiTagSize));
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
- // Subtract from 0 if source was negative.
- __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(source_, Operand(1));
- __ b(gt, ¬_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
- // 1, 0 and -1 all have 0 for the second word.
- __ mov(mantissa, Operand(0, RelocInfo::NONE));
- __ Ret();
-
- __ bind(¬_special);
- // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ CountLeadingZeros(zeros_, source_, mantissa);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here. Use a fudge factor to
- // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
- // that fit in the ARM's constant field.
- int fudge = 0x400;
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
- __ add(mantissa, mantissa, Operand(fudge));
- __ orr(exponent,
- exponent,
- Operand(mantissa, LSL, HeapNumber::kExponentShift));
- // Shift up the source chopping the top bit off.
- __ add(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ mov(source_, Operand(source_, LSL, zeros_));
- // Compute lower part of fraction (last 12 bits).
- __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
- // And the top (top 20 bits).
- __ orr(exponent,
- exponent,
- Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
- __ Ret();
-}
-
-
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
- __ vmov(d7.high(), scratch1);
- __ vcvt_f64_s32(d7, d7.high());
- __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
- __ vmov(d6.high(), scratch1);
- __ vcvt_f64_s32(d6, d6.high());
- if (destination == kCoreRegisters) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(scratch1, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
- __ push(lr);
- __ Call(stub1.GetCode());
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(scratch1, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode());
- __ pop(lr);
- }
-}
-
-
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
-
- // Load right operand (r0) to d6 or r2/r3.
- LoadNumber(masm, destination,
- r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
-
- // Load left operand (r1) to d7 or r0/r1.
- LoadNumber(masm, destination,
- r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
-
- Label is_smi, done;
-
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP3) &&
- destination == kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber to double register.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(dst, scratch1, HeapNumber::kValueOffset);
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
- }
- __ jmp(&done);
-
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi to double using VFP instructions.
- __ vmov(dst.high(), scratch1);
- __ vcvt_f64_s32(dst, dst.high());
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ vmov(dst1, dst2, dst);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, Operand(object));
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(lr);
- __ Call(stub.GetCode());
- __ pop(lr);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- Label done;
- Label not_in_int32_range;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ cmp(scratch1, heap_number_map);
- __ b(ne, not_number);
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- ¬_in_int32_range);
- __ jmp(&done);
-
- __ bind(¬_in_int32_range);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- SwVfpRegister single_scratch) {
- ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst1));
- ASSERT(!int_scratch.is(dst2));
-
- Label done;
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(single_scratch, int_scratch);
- __ vcvt_f64_s32(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
- }
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst2 | dst1 |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ cmp(int_scratch, Operand::Zero());
- __ mov(dst2, int_scratch);
- __ mov(dst1, int_scratch);
- __ b(eq, &done);
-
- // Preload the sign of the value.
- __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
- // Get the absolute value of the object (as an unsigned integer).
- __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
-
- // Get mantissa[51:20].
-
- // Get the position of the first set bit.
- __ CountLeadingZeros(dst1, int_scratch, scratch2);
- __ rsb(dst1, dst1, Operand(31));
-
- // Set the exponent.
- __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst2, scratch2, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ mov(scratch2, Operand(1));
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
-
- __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
- // Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ b(mi, &fewer_than_20_useful_bits);
- // Set the higher 20 bits of the mantissa.
- __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
- __ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst1, Operand(int_scratch, LSL, scratch2));
- __ b(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
- __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
- __ orr(dst2, dst2, scratch2);
- // Set dst1 to 0.
- __ mov(dst1, Operand::Zero());
- }
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
- scratch2, single_scratch);
+ __ bind(&out_of_range);
+ __ mov(result_reg, Operand::Zero());
__ b(&done);
- __ bind(&obj_is_not_smi);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+ __ bind(&only_low);
+ // 52 <= exponent <= 83, shift only scratch_low.
+ // On entry, scratch contains: 52 - exponent.
+ __ rsb(scratch, scratch, Operand::Zero());
+ __ mov(result_reg, Operand(scratch_low, LSL, scratch));
- // Load the number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_dst,
- scratch1,
- scratch2,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
-
- if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
- }
-
- } else {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers..
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- // Check for 0 and -0.
- __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst2));
- __ cmp(scratch1, Operand::Zero());
- __ b(eq, &done);
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
-
- // dst1 and dst2 were trashed. Reload the double value.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
- }
+ __ bind(&negate);
+ // If input was positive, scratch_high ASR 31 equals 0 and
+ // scratch_high LSR 31 equals zero.
+ // New result = (result eor 0) + 0 = result.
+ // If the input was negative, we have to negate the result.
+ // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
+ __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
__ bind(&done);
+
+ __ Pop(scratch_high, scratch_low, scratch);
+ __ Ret();
}
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
-
- Label done;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
-
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- SwVfpRegister single_scratch = double_scratch.low();
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
- scratch1,
- scratch2,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
- // Get the result in the destination register.
- __ vmov(dst, single_scratch);
-
- } else {
- // Load the double value in the destination registers.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
- __ orr(dst, scratch2, Operand(dst));
- __ cmp(dst, Operand::Zero());
- __ b(eq, &done);
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ mov(dst, Operand(dst, LSR, scratch3));
- // Set the implicit first bit.
- __ rsb(scratch3, scratch3, Operand(32));
- __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
- // Set the sign.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32) {
- // Get exponent alone in scratch.
- __ Ubfx(scratch,
- src1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Substract the bias from the exponent.
- __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
-
- // src1: higher (exponent) part of the double value.
- // src2: lower (mantissa) part of the double value.
- // scratch: unbiased exponent.
-
- // Fast cases. Check for obvious non 32-bit integer values.
- // Negative exponent cannot yield 32-bit integers.
- __ b(mi, not_int32);
- // Exponent greater than 31 cannot yield 32-bit integers.
- // Also, a positive value with an exponent equal to 31 is outside of the
- // signed 32-bit integer range.
- // Another way to put it is that if (exponent - signbit) > 30 then the
- // number cannot be represented as an int32.
- Register tmp = dst;
- __ sub(tmp, scratch, Operand(src1, LSR, 31));
- __ cmp(tmp, Operand(30));
- __ b(gt, not_int32);
- // - Bits [21:0] in the mantissa are not null.
- __ tst(src2, Operand(0x3fffff));
- __ b(ne, not_int32);
-
- // Otherwise the exponent needs to be big enough to shift left all the
- // non zero bits left. So we need the (30 - exponent) last bits of the
- // 31 higher bits of the mantissa to be null.
- // Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
-
- // Get the 32 higher bits of the mantissa in dst.
- __ Ubfx(dst,
- src2,
- HeapNumber::kMantissaBitsInTopWord,
- 32 - HeapNumber::kMantissaBitsInTopWord);
- __ orr(dst,
- dst,
- Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
-
- // Create the mask and test the lower bits (of the higher bits).
- __ rsb(scratch, scratch, Operand(32));
- __ mov(src2, Operand(1));
- __ mov(src1, Operand(src2, LSL, scratch));
- __ sub(src1, src1, Operand(1));
- __ tst(dst, src1);
- __ b(ne, not_int32);
-}
-
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is callee-saved.
- // We currently always use r5 to pass it.
- ASSERT(heap_number_result.is(r5));
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, scratch);
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(d0, r0, r1);
- __ vmov(d1, r2, r3);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number. Double returned in
- // registers r0 and r1 or in d0.
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP3);
- __ vstr(d0,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- __ Strd(r0, r1, FieldMemOperand(heap_number_result,
- HeapNumber::kValueOffset));
- }
- // Place heap_number_result in r0 and return to the pushed return address.
- __ mov(r0, Operand(heap_number_result));
- __ pop(pc);
-}
-
-
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
- return true;
- }
- if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
- WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
- WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode()->set_is_pregenerated(true);
- stub2.GetCode()->set_is_pregenerated(true);
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2);
+ WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3);
+ stub1.GetCode();
+ stub2.GetCode();
}
@@ -1033,29 +250,29 @@
// We test for the special value that has a different exponent. This test
// has the neat side effect of setting the flags according to the sign.
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int_, Operand(0x80000000u));
+ __ cmp(the_int(), Operand(0x80000000u));
__ b(eq, &max_negative_int);
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ mov(scratch_, Operand(non_smi_exponent));
+ __ mov(scratch(), Operand(non_smi_exponent));
// Set the sign bit in scratch_ if the value was negative.
- __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
+ __ orr(scratch(), scratch(), Operand(HeapNumber::kSignMask), LeaveCC, cs);
// Subtract from 0 if the value was negative.
- __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
+ __ rsb(the_int(), the_int(), Operand::Zero(), LeaveCC, cs);
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kExponentOffset));
- __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kMantissaOffset));
+ __ orr(scratch(), scratch(), Operand(the_int(), LSR, shift_distance));
+ __ str(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
+ __ mov(scratch(), Operand(the_int(), LSL, 32 - shift_distance));
+ __ str(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
__ Ret();
__ bind(&max_negative_int);
@@ -1065,9 +282,9 @@
// significant 1 bit is not stored.
non_smi_exponent += 1 << HeapNumber::kExponentShift;
__ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(ip, Operand(0, RelocInfo::NONE));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
+ __ mov(ip, Operand::Zero());
+ __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
__ Ret();
}
@@ -1077,48 +294,43 @@
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan) {
+ Condition cond) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
__ b(ne, ¬_identical);
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cond != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cond == lt || cond == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cond == le || cond == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r2);
+ __ b(ne, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
}
+ __ Ret();
}
}
}
@@ -1133,47 +345,45 @@
}
__ Ret();
- if (cond != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cond != lt && cond != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
+ __ b(ne, &return_equal);
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cond != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq);
+ if (cond == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
}
- __ Ret();
}
- // No fall through here.
+ __ Ret();
}
+ // No fall through here.
__ bind(¬_identical);
}
@@ -1186,7 +396,7 @@
Label* lhs_not_nan,
Label* slow,
bool strict) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ DCHECK((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
Label rhs_is_smi;
@@ -1209,23 +419,10 @@
}
// Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert lhs to a double in d7.
- CpuFeatures::Scope scope(VFP3);
- __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
- // Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- __ push(lr);
- // Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode());
- // Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ pop(lr);
- }
+ // Convert lhs to a double in d7.
+ __ SmiToDouble(d7, lhs);
+ // Load the double from rhs, tagged HeapNumber r0, to d6.
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
// We now have both loaded as doubles but we can skip the lhs nan check
// since it's a smi.
@@ -1249,137 +446,19 @@
}
// Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- // Convert rhs to a double in d6 .
- __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
- } else {
- __ push(lr);
- // Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- // Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode());
- __ pop(lr);
- }
+ // Load the double from lhs, tagged HeapNumber r1, to d7.
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
+ // Convert rhs to a double in d6 .
+ __ SmiToDouble(d6, rhs);
// Fall through to both_loaded_as_doubles.
}
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
- Label one_is_nan, neither_is_nan;
-
- __ Sbfx(r4,
- lhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, lhs_not_nan);
- __ mov(r4,
- Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
- __ b(ne, &one_is_nan);
-
- __ bind(lhs_not_nan);
- __ Sbfx(r4,
- rhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, &neither_is_nan);
- __ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
- __ b(eq, &neither_is_nan);
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in r0 to make the comparison fail.
- if (cond == lt || cond == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
- Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
-
- // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cond == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- __ cmp(rhs_mantissa, Operand(lhs_mantissa));
- __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
- // Return non-zero if the numbers are unequal.
- __ Ret(ne);
-
- __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
- // If exponents are equal then return 0.
- __ Ret(eq);
-
- // Exponents are unequal. The only way we can return that the numbers
- // are equal is if one is -0 and the other is 0. We already dealt
- // with the case where both are -0 or both are 0.
- // We start by seeing if the mantissas (that are equal) or the bottom
- // 31 bits of the rhs exponent are non-zero. If so we return not
- // equal.
- __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
- __ mov(r0, Operand(r4), LeaveCC, ne);
- __ Ret(ne);
- // Now they are equal if and only if the lhs exponent is zero in its
- // low 31 bits.
- __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ Ret();
- } else {
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, r5);
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(d0, r0, r1);
- __ vmov(d1, r2, r3);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
- __ pop(pc); // Return.
- }
-}
-
-
// See comment at call site.
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ DCHECK((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
// If either operand is a JS object or an oddball value, then they are
@@ -1409,13 +488,12 @@
__ cmp(r3, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- // Now that we have the types we might as well check for symbol-symbol.
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(r2, r2, Operand(r3));
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(ne, &return_not_equal);
+ // Now that we have the types we might as well check for
+ // internalized-internalized.
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ orr(r2, r2, Operand(r3));
+ __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ b(eq, &return_not_equal);
}
@@ -1426,7 +504,7 @@
Label* both_loaded_as_doubles,
Label* not_heap_numbers,
Label* slow) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ DCHECK((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
__ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
@@ -1437,43 +515,34 @@
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- }
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(both_loaded_as_doubles);
}
-// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ DCHECK((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
// r2 is object type of rhs.
- // Ensure that no non-strings have the symbol bit set.
Label object_test;
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
+ __ tst(r2, Operand(kIsNotInternalizedMask));
+ __ b(ne, possible_strings);
__ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, not_both_strings);
- __ tst(r3, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
+ __ tst(r3, Operand(kIsNotInternalizedMask));
+ __ b(ne, possible_strings);
- // Both are symbols. We already checked they weren't the same pointer
+ // Both are internalized. We already checked they weren't the same pointer
// so they are not equal.
__ mov(r0, Operand(NOT_EQUAL));
__ Ret();
@@ -1496,153 +565,59 @@
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
- } else {
- __ b(not_found);
- }
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+ Register scratch,
+ CompareICState::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareICState::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareICState::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
}
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- Register probe = mask;
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
}
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-// On entry lhs_ and rhs_ are the values to be compared.
+// On entry r1 and r2 are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = r1;
+ Register rhs = r0;
+ Condition cc = GetCondition();
+
+ Label miss;
+ CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, ¬_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(¬_two_smis);
- } else if (FLAG_debug_code) {
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "CompareStub: unexpected smi operands.");
- }
+ Label not_two_smis, smi_done;
+ __ orr(r2, r1, r0);
+ __ JumpIfNotSmi(r2, ¬_two_smis);
+ __ mov(r1, Operand(r1, ASR, 1));
+ __ sub(r0, r1, Operand(r0, ASR, 1));
+ __ Ret();
+ __ bind(¬_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs_, Operand(rhs_));
+ DCHECK_EQ(0, Smi::FromInt(0));
+ __ and_(r2, lhs, Operand(rhs));
__ JumpIfNotSmi(r2, ¬_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1653,115 +628,97 @@
// comparison. If VFP3 is supported the double values of the numbers have
// been loaded into d7 and d6. Otherwise, the double values have been loaded
// into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
- Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(VFP3)) {
- __ bind(&lhs_not_nan);
- CpuFeatures::Scope scope(VFP3);
- Label no_nan;
- // ARMv7 VFP3 instructions to implement double precision comparison.
- __ VFPCompareAndSetFlags(d7, d6);
- Label nan;
- __ b(vs, &nan);
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
+ __ bind(&lhs_not_nan);
+ Label no_nan;
+ // ARMv7 VFP3 instructions to implement double precision comparison.
+ __ VFPCompareAndSetFlags(d7, d6);
+ Label nan;
+ __ b(vs, &nan);
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ Ret();
- __ bind(&nan);
- // If one of the sides was a NaN then the v flag is set. Load r0 with
- // whatever it takes to make the comparison fail, since comparisons with NaN
- // always fail.
- if (cc_ == lt || cc_ == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r0 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc == lt || cc == le) {
+ __ mov(r0, Operand(GREATER));
} else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc_);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ __ mov(r0, Operand(LESS));
}
+ __ Ret();
__ bind(¬_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict_) {
+ if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
- Label check_for_symbols;
+ Label check_for_internalized_strings;
Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // that case. If the inputs are not doubles then jumps to
+ // check_for_internalized_strings.
// In this case r2 will contain the type of rhs_. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
&both_loaded_as_doubles,
- &check_for_symbols,
+ &check_for_internalized_strings,
&flat_string_check);
- __ bind(&check_for_symbols);
+ __ bind(&check_for_internalized_strings);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
- // symbols.
- if (cc_ == eq && !strict_) {
- // Returns an answer for two symbols or two detectable objects.
+ // internalized strings.
+ if (cc == eq && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ EmitCheckForInternalizedStringsOrObjects(
+ masm, lhs, rhs, &flat_string_check, &slow);
}
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- if (cc_ == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs_,
- rhs_,
- r2,
- r3,
- r4);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
+ r3);
+ if (cc == eq) {
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r2, r3, r4);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
- r2,
- r3,
- r4,
- r5);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r2, r3, r4,
+ r5);
}
// Never falls through to here.
__ bind(&slow);
- __ Push(lhs_, rhs_);
+ __ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ DCHECK(cc == gt || cc == ge); // remaining cases
ncr = LESS;
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
@@ -1771,118 +728,9 @@
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
-}
-
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // This stub uses VFP3 instructions.
- CpuFeatures::Scope scope(VFP3);
-
- Label patch;
- const Register map = r9.is(tos_) ? r7 : r9;
-
- // undefined -> false.
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value.
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- __ tst(tos_, Operand(kSmiTagMask));
- // tos_ contains the correct return value already
- __ Ret(eq);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(tos_, &patch);
- }
-
- if (types_.NeedsMap()) {
- __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- __ Ret(ne);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // Spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- // tos_ contains the correct non-zero return value already.
- __ Ret(ge);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
- __ Ret(lt); // the string length is OK as the return value
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // Heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ b(ne, ¬_heap_number);
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
- __ Ret();
- __ bind(¬_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- __ LoadRoot(ip, value);
- __ cmp(tos_, ip);
- // The value of a root is never NULL, so we can avoid loading a non-null
- // value into tos_ when we want to return 'true'.
- if (!result) {
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- }
- __ Ret(eq);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- if (!tos_.is(r3)) {
- __ mov(r3, Operand(tos_));
- }
- __ mov(r2, Operand(Smi::FromInt(tos_.code())));
- __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
- __ Push(r3, r2, r1);
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -1891,1575 +739,44 @@
// store the registers in any particular way, but we do have to store and
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vstr(reg, MemOperand(sp, i * kDoubleSize));
- }
+
+ const Register scratch = r1;
+
+ if (save_doubles()) {
+ __ SaveFPRegs(sp, scratch);
}
const int argument_count = 1;
const int fp_argument_count = 0;
- const Register scratch = r1;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, MemOperand(sp, i * kDoubleSize));
- }
- __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ if (save_doubles()) {
+ __ RestoreFPRegs(sp, scratch);
}
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
}
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ mov(r3, Operand(r0)); // the operand
- __ mov(r2, Operand(Smi::FromInt(op_)));
- __ mov(r1, Operand(Smi::FromInt(mode_)));
- __ mov(r0, Operand(Smi::FromInt(operand_type_)));
- __ Push(r3, r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // The result of negating zero or the smallest negative smi is not a smi.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, slow);
-
- // Return '0 - value'.
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // Flip bits and revert inverted smi-tag.
- __ mvn(r0, Operand(r0));
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ Ret();
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateHeapNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (mode_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- }
-
- __ bind(&heapnumber_allocated);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(
- MacroAssembler* masm, Label* slow) {
- Label impossible;
-
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
- // Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ add(r2, r1, Operand(0x40000000), SetCC);
- __ b(mi, &try_float);
-
- // Tag the result as a smi and we're done.
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
- __ Ret();
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- // Allocate a new heap number without zapping r0, which we need if it fails.
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- }
-
- // Convert the heap number in r0 to an untagged integer in r1.
- // This can't go slow-case because it's the same number we already
- // converted once again.
- __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
- __ mvn(r1, Operand(r1));
-
- __ bind(&heapnumber_allocated);
- __ mov(r0, r2); // Move newly allocated heap number to r0.
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&impossible);
- if (FLAG_debug_code) {
- __ stop("Incorrect assumption in bit-not stub");
- }
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(operands_type_)));
- __ Push(r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- ASSERT(right.is(r0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op_) {
- case Token::ADD:
- __ add(right, left, Operand(right), SetCC); // Add optimistically.
- __ Ret(vc);
- __ sub(right, right, Operand(left)); // Revert optimistic add.
- break;
- case Token::SUB:
- __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
- __ Ret(vc);
- __ sub(right, left, Operand(right)); // Revert optimistic subtract.
- break;
- case Token::MUL:
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(ip, right);
- // Do multiplication
- // scratch1 = lower 32 bits of ip * left.
- // scratch2 = higher 32 bits of ip * left.
- __ smull(scratch1, scratch2, left, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, ¬_smi_result);
- // Go slow on zero result to handle -0.
- __ cmp(scratch1, Operand(0));
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return smi 0 if the non-zero one was positive.
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- break;
- case Token::DIV:
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
- // Check for positive and no remainder (scratch1 contains right - 1).
- __ orr(scratch2, scratch1, Operand(0x80000000u));
- __ tst(left, scratch2);
- __ b(ne, ¬_smi_result);
-
- // Perform division by shifting.
- __ CountLeadingZeros(scratch1, scratch1, scratch2);
- __ rsb(scratch1, scratch1, Operand(31));
- __ mov(right, Operand(left, LSR, scratch1));
- __ Ret();
- break;
- case Token::MOD:
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
- __ b(ne, ¬_smi_result);
-
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
-
- // Perform modulus by masking.
- __ and_(right, left, Operand(scratch1));
- __ Ret();
- break;
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- __ Ret();
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- // Smi tag result.
- __ bic(right, right, Operand(kSmiTagMask));
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, ¬_smi_result);
- // Smi tag result.
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, ¬_smi_result);
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(¬_smi_result);
-}
-
-
-void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- Register scratch3 = r4;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands && FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
- // depending on whether VFP3 is available or not.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) &&
- op_ != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
-
- // Allocate new heap number for result.
- Register result = r5;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
-
- // Load the operands.
- if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
- } else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
- }
-
- // Calculate the result.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- // Using VFP registers:
- // d6: Left value
- // d7: Right value
- CpuFeatures::Scope scope(VFP3);
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
- result,
- scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(r3, left);
- __ SmiUntag(r2, right);
- } else {
- // Convert operands to 32-bit integers. Right in r2 and left in r3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- }
-
- Label result_not_a_smi;
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, not_numbers);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check that the *signed* result fits in a smi.
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ SmiTag(r0, r2);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = r5;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
- }
-
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r2);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void BinaryOpStub::GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- Label not_smis;
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, ¬_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, use_runtime, gc_required);
- }
- __ bind(¬_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smis, call_runtime;
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- GenerateSmiCode(masm,
- &call_runtime,
- &call_runtime,
- ALLOW_HEAPNUMBER_RESULTS);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::INT32);
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- DwVfpRegister double_scratch = d0;
- SwVfpRegister single_scratch = s3;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ orr(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
- ? FloatingPointHelper::kVFPRegisters
- : FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- d7,
- r2,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- d6,
- r4,
- r5,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
-
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ != Token::DIV) {
- // These operations produce an integer result.
- // Try to return a smi if we can.
- // Otherwise return a heap number if allowed, or jump to type
- // transition.
-
- __ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- d5,
- scratch1,
- scratch2);
-
- if (result_type_ <= BinaryOpIC::INT32) {
- // If the ne condition is set, result does
- // not fit in a 32-bit integer.
- __ b(ne, &transition);
- }
-
- // Check if the result fits in a smi.
- __ vmov(scratch1, single_scratch);
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- // If not try to return a heap number.
- __ b(mi, &return_heap_number);
- // Check for minus zero. Return heap number for minus zero.
- Label not_zero;
- __ cmp(scratch1, Operand::Zero());
- __ b(ne, ¬_zero);
- __ vmov(scratch2, d5.high());
- __ tst(scratch2, Operand(HeapNumber::kSignMask));
- __ b(ne, &return_heap_number);
- __ bind(¬_zero);
-
- // Tag the result and return.
- __ SmiTag(r0, scratch1);
- __ Ret();
- } else {
- // DIV just falls through to allocating a heap number.
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
- : BinaryOpIC::INT32)) {
- // We are using vfp registers so r5 is available.
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- }
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- // We preserved r0 and r1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(r5, r4);
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime);
-
- // Load the left value from the value saved on the stack.
- __ Pop(r1, r0);
-
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ b(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- Register scratch3 = r5;
- // Convert operands to 32-bit integers. Right in r2 and left in r3. The
- // registers r0 and r1 (right and left) are preserved for the runtime
- // call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (r2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- // The non vfp3 code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number);
- } else {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &call_runtime);
- }
- break;
- case Token::SHL:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ add(scratch1, r2, Operand(0x40000000), SetCC);
- // If not try to return a heap number. (We know the result is an int32.)
- __ b(mi, &return_heap_number);
- // Tag the result and return.
- __ SmiTag(r0, r2);
- __ Ret();
-
- __ bind(&return_heap_number);
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_s32(double_scratch, double_scratch.low());
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_u32(double_scratch, double_scratch.low());
- }
-
- // Store the result.
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- __ mov(r0, r5);
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(ne, &check);
- if (Token::IsBitOp(op_)) {
- __ mov(r1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &done);
- if (Token::IsBitOp(op_)) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateHeapNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
-
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = r1;
- Register right = r0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &left_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(r0) && !result.is(r1));
-
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ b(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, Operand(overwritable_operand));
- __ bind(&allocated);
- } else {
- ASSERT(mode_ == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(r1, r0);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in d2, double result goes
- // into d2.
- // Tagged case: tagged input on top of stack and in r0,
- // tagged result (heap number) goes into r0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = r9;
- const Register scratch1 = r7;
- const Register cache_entry = r0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- if (tagged) {
- // Argument is a number and is on stack and in r0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(r0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(r2, r3, d0);
- } else {
- // Input is untagged double in d2. Output goes to d2.
- __ vmov(r2, r3, d2);
- }
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- Isolate* isolate = masm->isolate();
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(isolate);
- __ mov(cache_entry, Operand(cache_array));
- // cache_entry points to cache array.
- int cache_array_index
- = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
- __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
- __ b(eq, &invalid_cache);
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ cmp(r3, r5, eq);
- __ b(ne, &calculate);
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into r0.
- __ pop();
- __ mov(r0, Operand(r6));
- } else {
- // Load result into d2.
- __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(VFP3))
-
- __ bind(&calculate);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- ExternalReference runtime_function =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime_function, 1, 1);
- } else {
- if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
- CpuFeatures::Scope scope(VFP3);
-
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // Register r0 holds precalculated cache entry address; preserve
- // it on the stack and pop it into register cache_entry after the
- // call.
- __ push(cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ pop(cache_entry);
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
- __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
- __ Ret();
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
- __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
- __ bind(&no_update);
-
- // We return the value in d2 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- Isolate* isolate = masm->isolate();
-
- __ push(lr);
- __ PrepareCallCFunction(0, 1, scratch);
- if (masm->use_eabi_hardfloat()) {
- __ vmov(d0, d2);
- } else {
- __ vmov(r0, r1, d2);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(lr);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope vfp3_scope(VFP3);
const Register base = r1;
- const Register exponent = r2;
+ const Register exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent.is(r2));
const Register heapnumbermap = r5;
const Register heapnumber = r0;
- const DoubleRegister double_base = d1;
- const DoubleRegister double_exponent = d2;
- const DoubleRegister double_result = d3;
- const DoubleRegister double_scratch = d0;
- const SwVfpRegister single_scratch = s0;
+ const DwVfpRegister double_base = d0;
+ const DwVfpRegister double_exponent = d1;
+ const DwVfpRegister double_result = d2;
+ const DwVfpRegister double_scratch = d3;
+ const SwVfpRegister single_scratch = s6;
const Register scratch = r9;
- const Register scratch2 = r7;
+ const Register scratch2 = r4;
Label call_runtime, done, int_exponent;
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
@@ -3489,7 +806,7 @@
__ b(ne, &call_runtime);
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
@@ -3497,7 +814,7 @@
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ vcvt_u32_f64(single_scratch, double_exponent);
@@ -3507,20 +824,20 @@
__ VFPCompareAndSetFlags(double_scratch, double_exponent);
__ b(eq, &int_exponent_convert);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
Label not_plus_half;
// Test for 0.5.
- __ vmov(double_scratch, 0.5);
+ __ vmov(double_scratch, 0.5, scratch);
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
__ b(ne, ¬_plus_half);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY);
+ __ vmov(double_scratch, -V8_INFINITY, scratch);
__ VFPCompareAndSetFlags(double_base, double_scratch);
__ vneg(double_result, double_scratch, eq);
__ b(eq, &done);
@@ -3531,20 +848,20 @@
__ jmp(&done);
__ bind(¬_plus_half);
- __ vmov(double_scratch, -0.5);
+ __ vmov(double_scratch, -0.5, scratch);
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
__ b(ne, &call_runtime);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY);
+ __ vmov(double_scratch, -V8_INFINITY, scratch);
__ VFPCompareAndSetFlags(double_base, double_scratch);
__ vmov(double_result, kDoubleRegZero, eq);
__ b(eq, &done);
// Add +0 to convert -0 to +0.
__ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vmov(double_result, 1);
+ __ vmov(double_result, 1.0, scratch);
__ vsqrt(double_scratch, double_scratch);
__ vdiv(double_result, double_result, double_scratch);
__ jmp(&done);
@@ -3554,13 +871,13 @@
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
@@ -3572,18 +889,18 @@
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type_ == INTEGER) {
+ if (exponent_type() == INTEGER) {
__ mov(scratch, exponent);
} else {
// Exponent has previously been stored into scratch as untagged integer.
__ mov(exponent, scratch);
}
__ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, 1.0);
+ __ vmov(double_result, 1.0, scratch2);
// Get absolute value of exponent.
- __ cmp(scratch, Operand(0));
- __ mov(scratch2, Operand(0), LeaveCC, mi);
+ __ cmp(scratch, Operand::Zero());
+ __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
__ sub(scratch, scratch2, scratch, LeaveCC, mi);
Label while_true;
@@ -3593,9 +910,9 @@
__ vmul(double_scratch, double_scratch, double_scratch, ne);
__ b(ne, &while_true);
- __ cmp(exponent, Operand(0));
+ __ cmp(exponent, Operand::Zero());
__ b(ge, &done);
- __ vmov(double_scratch, 1.0);
+ __ vmov(double_scratch, 1.0, scratch);
__ vdiv(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
@@ -3607,11 +924,11 @@
__ vcvt_f64_s32(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
+ Counters* counters = isolate()->counters();
+ if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -3620,7 +937,7 @@
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- ASSERT(heapnumber.is(r0));
+ DCHECK(heapnumber.is(r0));
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret(2);
} else {
@@ -3628,13 +945,13 @@
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
@@ -3648,77 +965,69 @@
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Generate if not already in cache.
+ SaveFPRegsMode mode = kSaveFPRegs;
+ CEntryStub(isolate, 1, mode).GetCode();
+ StoreBufferOverflowStub(isolate, mode).GetCode();
+ isolate->set_fp_stubs_generated(true);
}
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
}
-void CEntryStub::GenerateAheadOfTime() {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
- code->set_is_pregenerated(true);
-}
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function.
+ // r0: number of arguments including receiver
+ // r1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // r0: result parameter for PerformGC, if any
- // r4: number of arguments including receiver (C callee-saved)
+ __ mov(r5, Operand(r1));
+
+ // Compute the argv pointer in a callee-saved register.
+ __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r1, r1, Operand(kPointerSize));
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles());
+
+ // Store a copy of argc in callee-saved registers for later.
+ __ mov(r4, Operand(r0));
+
+ // r0, r4: number of arguments including receiver (C callee-saved)
+ // r1: pointer to the first argument (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
- // r6: pointer to the first argument (C callee-saved)
- Isolate* isolate = masm->isolate();
- if (do_gc) {
- // Passing r0.
- __ PrepareCallCFunction(1, 0, r1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate),
- 1, 0);
- }
+ // Result returned in r0 or r0+r1 by default.
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ mov(r0, Operand(scope_depth));
- __ ldr(r1, MemOperand(r0));
- __ add(r1, r1, Operand(1));
- __ str(r1, MemOperand(r0));
- }
-
- // Call C built-in.
- // r0 = argc, r1 = argv
- __ mov(r0, Operand(r4));
- __ mov(r1, Operand(r6));
-
-#if defined(V8_HOST_ARCH_ARM)
+#if V8_HOST_ARCH_ARM
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
__ tst(sp, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -3728,7 +1037,9 @@
}
#endif
- __ mov(r2, Operand(ExternalReference::isolate_address()));
+ // Call C built-in.
+ // r0 = argc, r1 = argv
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -3737,153 +1048,81 @@
// Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address.
- masm->add(lr, pc, Operand(4));
- __ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
-
- if (always_allocate) {
- // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
- // though (contain the result).
- __ mov(r2, Operand(scope_depth));
- __ ldr(r3, MemOperand(r2));
- __ sub(r3, r3, Operand(1));
- __ str(r3, MemOperand(r2));
+ {
+ // Prevent literal pool emission before return address.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ add(lr, pc, Operand(4));
+ __ str(lr, MemOperand(sp, 0));
+ __ Call(r5);
}
- // check for failure result
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- // Lower 2 bits of r2 are 0 iff r0 has failure tag.
- __ add(r2, r0, Operand(1));
- __ tst(r2, Operand(kFailureTagMask));
- __ b(eq, &failure_returned);
+ __ VFPEnsureFPSCRState(r2);
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &okay);
+ __ stop("The hole escaped");
+ __ bind(&okay);
+ }
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(r0, Heap::kExceptionRootIndex);
+ __ b(eq, &exception_returned);
+
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ mov(r2, Operand(pending_exception_address));
+ __ ldr(r2, MemOperand(r2));
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ b(eq, &okay);
+ __ stop("Unexpected pending exception");
+ __ bind(&okay);
+ }
// Exit C frame and return.
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- // Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles_, r4);
+ // Callee-saved register r4 still holds argc.
+ __ LeaveExitFrame(save_doubles(), r4, true);
__ mov(pc, lr);
- // check if we should retry or throw exception
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ b(eq, &retry);
+ // Handling of exception.
+ __ bind(&exception_returned);
- // Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ b(eq, throw_out_of_memory_exception);
+ // Retrieve the pending exception.
+ __ mov(r2, Operand(pending_exception_address));
+ __ ldr(r0, MemOperand(r2));
- // Retrieve the pending exception and clear the variable.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ ldr(r0, MemOperand(ip));
- __ str(r3, MemOperand(ip));
+ // Clear the pending exception.
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ str(r3, MemOperand(r2));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(r0, Operand(isolate->factory()->termination_exception()));
- __ b(eq, throw_termination_exception);
+ Label throw_termination_exception;
+ __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
+ __ b(eq, &throw_termination_exception);
// Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // r0: number of arguments including receiver
- // r1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // Result returned in r0 or r0+r1 by default.
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Compute the argv pointer in a callee-saved register.
- __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ sub(r6, r6, Operand(kPointerSize));
-
- // Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
-
- // Set up argc and the builtin function in callee-saved registers.
- __ mov(r4, Operand(r0));
- __ mov(r5, Operand(r1));
-
- // r4: number of arguments (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to first argument (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r0, MemOperand(r2));
- // Fall through to the next label.
+ __ Throw(r0);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(r0);
-
- __ bind(&throw_normal_exception);
- __ Throw(r0);
}
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
// r0: code entry
// r1: function
// r2: receiver
@@ -3892,18 +1131,18 @@
Label invoke, handler_entry, exit;
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Called from C, so do not pop argc and args on exit (preserve sp)
// No need to save register-passed args
// Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit());
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Save callee-saved vfp registers.
- __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- // Set up the reserved register for 0.0.
- __ vmov(kDoubleRegZero, 0.0);
- }
+ // Save callee-saved vfp registers.
+ __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ // Set up the reserved register for 0.0.
+ __ vmov(kDoubleRegZero, 0.0);
+ __ VFPEnsureFPSCRState(r4);
// Get address of argv, see stm above.
// r0: code entry
@@ -3913,9 +1152,7 @@
// Set up argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(VFP3)) {
- offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
- }
+ offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
__ ldr(r4, MemOperand(sp, offset_to_argv));
// Push a frame with special values setup to mark it as an entry frame.
@@ -3924,22 +1161,26 @@
// r2: receiver
// r3: argc
// r4: argv
- Isolate* isolate = masm->isolate();
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
+ if (FLAG_enable_ool_constant_pool) {
+ __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
+ }
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
+ Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
+ __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
+ (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
+ ip.bit());
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ ldr(r6, MemOperand(r5));
__ cmp(r6, Operand::Zero());
@@ -3956,22 +1197,29 @@
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
__ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+
+ // Block literal pool emission whilst taking the position of the handler
+ // entry. This avoids making the assumption that literal pools are always
+ // emitted after an instruction is emitted, rather than before.
+ {
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+ }
__ str(r0, MemOperand(ip));
- __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ LoadRoot(r0, Heap::kExceptionRootIndex);
__ b(&exit);
// Invoke: Link this frame into the handler chain. There's only one
// handler block in this code object, so its index is 0.
__ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
+ // Must preserve r0-r4, r5-r6 are available.
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
@@ -3979,9 +1227,9 @@
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ mov(r5, Operand(isolate->factory()->the_hole_value()));
+ __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
__ str(r5, MemOperand(ip));
// Invoke the function by calling through JS entry trampoline builtin.
@@ -3994,21 +1242,19 @@
// r2: receiver
// r3: argc
// r4: argv
- if (is_construct) {
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
+ isolate());
__ mov(ip, Operand(construct_entry));
} else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
__ mov(ip, Operand(entry));
}
__ ldr(ip, MemOperand(ip)); // deref address
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc.
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Branch and link to JSEntryTrampoline.
+ __ Call(ip);
// Unlink this frame from the handler chain.
__ PopTryHandler();
@@ -4027,7 +1273,7 @@
// Restore the top frame descriptors from the stack.
__ pop(r3);
__ mov(ip,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
+ Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ str(r3, MemOperand(ip));
// Reset the stack to the callee saved registers.
@@ -4040,11 +1286,8 @@
}
#endif
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Restore callee-saved vfp registers.
- __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- }
+ // Restore callee-saved vfp registers.
+ __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
@@ -4056,25 +1299,19 @@
// * function: r1 or at sp.
//
// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register r4.
+// In this case the offset to the inline sites to patch are passed in r5 and r6.
// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
- // ReturnTrueFalse is only implemented for inlined call sites.
- ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+ DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// Fixed register usage throughout the stub:
const Register object = r0; // Object (lhs).
Register map = r3; // Map of the object.
const Register function = r1; // Function (rhs).
const Register prototype = r4; // Prototype of the function.
- const Register inline_site = r9;
const Register scratch = r2;
- const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
-
Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) {
@@ -4088,7 +1325,7 @@
// If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
+ if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
Label miss;
__ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ b(ne, &miss);
@@ -4113,17 +1350,17 @@
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
} else {
- ASSERT(HasArgsInRegisters());
+ DCHECK(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
- // The offset was stored in r4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ LoadFromSafepointRegisterSlot(scratch, r4);
- __ sub(inline_site, lr, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ // The map_load_offset was stored in r5
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register map_load_offset = r5;
+ __ sub(r9, lr, map_load_offset);
+ // Get the map location in r5 and patch it.
+ __ GetRelocatedValueLocation(r9, map_load_offset, scratch);
+ __ ldr(map_load_offset, MemOperand(map_load_offset));
+ __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
@@ -4144,17 +1381,24 @@
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
__ jmp(&loop);
+ Factory* factory = isolate()->factory();
__ bind(&is_instance);
if (!HasCallSiteInlineCheck()) {
__ mov(r0, Operand(Smi::FromInt(0)));
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ Move(r0, factory->true_value());
+ }
} else {
// Patch the call site to return true.
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // The bool_load_offset was stored in r6
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register bool_load_offset = r6;
+ __ sub(r9, lr, bool_load_offset);
// Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
+ __ GetRelocatedValueLocation(r9, scratch, scratch2);
__ str(r0, MemOperand(scratch));
if (!ReturnTrueFalseObject()) {
@@ -4167,12 +1411,19 @@
if (!HasCallSiteInlineCheck()) {
__ mov(r0, Operand(Smi::FromInt(1)));
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ Move(r0, factory->false_value());
+ }
} else {
// Patch the call site to return false.
__ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // The bool_load_offset was stored in r6
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register bool_load_offset = r6;
+ __ sub(r9, lr, bool_load_offset);
+ ;
// Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
+ __ GetRelocatedValueLocation(r9, scratch, scratch2);
__ str(r0, MemOperand(scratch));
if (!ReturnTrueFalseObject()) {
@@ -4190,21 +1441,33 @@
__ b(ne, &slow);
// Null is not instance of anything.
- __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
+ __ cmp(scratch, Operand(isolate()->factory()->null_value()));
__ b(ne, &object_not_null);
- __ mov(r0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ Move(r0, factory->false_value());
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ }
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null);
// Smi values are not instances of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ mov(r0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ Move(r0, factory->false_value());
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ }
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null_or_smi);
// String values are not instances of anything.
__ IsObjectJSStringType(object, scratch, &slow);
- __ mov(r0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ Move(r0, factory->false_value());
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ }
__ Ret(HasArgsInRegisters() ? 0 : 2);
// Slow-case. Tail call builtin.
@@ -4216,7 +1479,7 @@
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r0, r1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
@@ -4228,10 +1491,16 @@
}
-Register InstanceofStub::left() { return r0; }
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver = LoadDescriptor::ReceiverRegister();
-
-Register InstanceofStub::right() { return r1; }
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3,
+ r4, &miss);
+ __ bind(&miss);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
+}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
@@ -4239,6 +1508,8 @@
// relative to the frame pointer.
const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ DCHECK(r1.is(ArgumentsAccessReadDescriptor::index()));
+ DCHECK(r0.is(ArgumentsAccessReadDescriptor::parameter_count()));
// Check that the key is a smi.
Label slow;
@@ -4259,7 +1530,7 @@
// Read the argument from the stack and return it.
__ sub(r3, r0, r1);
- __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r0, MemOperand(r3, kDisplacement));
__ Jump(lr);
@@ -4273,7 +1544,7 @@
// Read the argument from the adaptor frame and return it.
__ sub(r3, r0, r1);
- __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r0, MemOperand(r3, kDisplacement));
__ Jump(lr);
@@ -4285,7 +1556,7 @@
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
@@ -4305,11 +1576,11 @@
__ str(r3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
@@ -4363,43 +1634,44 @@
__ add(r9, r9, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
+ __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
+ __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
// r0 = address of new object(s) (tagged)
- // r2 = argument count (tagged)
- // Get the arguments boilerplate from the current (global) context into r4.
+ // r2 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into r4.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
const int kAliasedOffset =
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
- __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ cmp(r1, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
// r0 = address of new object (tagged)
// r1 = mapped parameter count (tagged)
- // r2 = argument count (tagged)
- // r4 = address of boilerplate object (tagged)
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ ldr(r3, FieldMemOperand(r4, i));
- __ str(r3, FieldMemOperand(r0, i));
- }
+ // r2 = argument count (smi-tagged)
+ // r4 = address of arguments map (tagged)
+ __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
+ __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ __ AssertNotSmi(r3);
const int kCalleeOffset = JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize;
__ str(r3, FieldMemOperand(r0, kCalleeOffset));
// Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(r2);
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
const int kLengthOffset = JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize;
@@ -4408,7 +1680,7 @@
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
// it will point to the backing store.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
// r0 = address of new object (tagged)
@@ -4423,11 +1695,11 @@
__ mov(r3, r4, LeaveCC, eq);
__ b(eq, &skip_parameter_map);
- __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
__ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
__ add(r6, r1, Operand(Smi::FromInt(2)));
__ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
__ add(r6, r4, Operand(r1, LSL, 1));
__ add(r6, r6, Operand(kParameterMapHeaderSize));
__ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
@@ -4445,31 +1717,36 @@
__ ldr(r9, MemOperand(sp, 0 * kPointerSize));
__ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ sub(r9, r9, Operand(r1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ add(r3, r4, Operand(r6, LSL, 1));
__ add(r3, r3, Operand(kParameterMapHeaderSize));
// r6 = loop variable (tagged)
// r1 = mapping index (tagged)
// r3 = address of backing store (tagged)
- // r4 = address of parameter map (tagged)
- // r5 = temporary scratch (a.o., for address calculation)
- // r7 = the hole value
+ // r4 = address of parameter map (tagged), which is also the address of new
+ // object + Heap::kSloppyArgumentsObjectSize (tagged)
+ // r0 = temporary scratch (a.o., for address calculation)
+ // r5 = the hole value
__ jmp(¶meters_test);
__ bind(¶meters_loop);
__ sub(r6, r6, Operand(Smi::FromInt(1)));
- __ mov(r5, Operand(r6, LSL, 1));
- __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ str(r9, MemOperand(r4, r5));
- __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(r7, MemOperand(r3, r5));
+ __ mov(r0, Operand(r6, LSL, 1));
+ __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ str(r9, MemOperand(r4, r0));
+ __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ str(r5, MemOperand(r3, r0));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ bind(¶meters_test);
__ cmp(r6, Operand(Smi::FromInt(0)));
__ b(ne, ¶meters_loop);
+ // Restore r0 = new object (tagged)
+ __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
+
__ bind(&skip_parameter_map);
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
// r3 = address of backing store (tagged)
// r5 = scratch
@@ -4500,10 +1777,37 @@
__ Ret();
// Do the runtime call to allocate the arguments object.
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Check that the key is an array index, that is Uint32.
+ __ NonNegativeSmiTst(key);
+ __ b(ne, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
@@ -4526,7 +1830,7 @@
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ str(r1, MemOperand(sp, 0));
- __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
__ str(r3, MemOperand(sp, 1 * kPointerSize));
@@ -4534,40 +1838,37 @@
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ SmiUntag(r1, SetCC);
__ b(eq, &add_arguments_object);
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT |
- SIZE_IN_WORDS));
+ __ Allocate(r1, r0, r2, r3, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // Get the arguments boilerplate from the current (global) context.
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
- __ ldr(r4, MemOperand(r4, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ // Get the arguments boilerplate from the current native context.
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+ __ ldr(r4, MemOperand(
+ r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
- // Copy the JS object part.
- __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
+ __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
+ __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ __ AssertSmi(r1);
__ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize));
// If there are no actual arguments, we're done.
Label done;
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ cmp(r1, Operand::Zero());
__ b(eq, &done);
// Get the parameters pointer from the stack.
@@ -4575,13 +1876,12 @@
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // Untag the length for the loop.
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
+ __ SmiUntag(r1);
// Copy the fixed array slots.
Label loop;
@@ -4594,7 +1894,7 @@
// Post-increment r4 with kPointerSize on each iteration.
__ str(r3, MemOperand(r4, kPointerSize, PostIndex));
__ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ cmp(r1, Operand::Zero());
__ b(ne, &loop);
// Return and remove the on-stack parameters.
@@ -4604,7 +1904,7 @@
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
@@ -4613,7 +1913,7 @@
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -4627,8 +1927,7 @@
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
- Label runtime, invoke_regexp;
-
+ Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
@@ -4636,22 +1935,20 @@
// therefore the content of these registers are safe to use after the call.
Register subject = r4;
Register regexp_data = r5;
- Register last_match_info_elements = r6;
+ Register last_match_info_elements = no_reg; // will be r6;
// Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ b(eq, &runtime);
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
__ b(ne, &runtime);
@@ -4659,10 +1956,10 @@
// Check that the RegExp has been compiled (data contains a fixed array).
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- __ tst(regexp_data, Operand(kSmiTagMask));
- __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
+ __ SmiTst(regexp_data);
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
- __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// regexp_data: RegExp data (FixedArray)
@@ -4675,68 +1972,48 @@
// Check that the number of captures fit in the static offsets vector buffer.
__ ldr(r2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since r2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r2, r2, Operand(2)); // r2 was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
__ b(hi, &runtime);
- // r2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
+ // Reset offset for possibly sliced string.
+ __ mov(r9, Operand::Zero());
__ ldr(subject, MemOperand(sp, kSubjectOffset));
__ JumpIfSmi(subject, &runtime);
- Condition is_string = masm->IsObjectStringType(subject, r0);
- __ b(NegateCondition(is_string), &runtime);
- // Get the length of the string to r3.
- __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
-
- // r2: Number of capture registers
- // r3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(r0, &runtime);
- __ cmp(r3, Operand(r0));
- __ b(ls, &runtime);
-
- // r2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- __ b(ne, &runtime);
- // Check that the JSArray is in fast case.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ ldr(r0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
- __ b(gt, &runtime);
-
- // Reset offset for possibly sliced string.
- __ mov(r9, Operand(0));
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
+ __ mov(r3, subject); // Make a copy of the original subject string.
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string. None of the following string type tests will
- // succeed if subject is not a string or a short external string.
+ // subject: subject string
+ // r3: subject string
+ // r0: subject string instance type
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label seq_string /* 5 */, external_string /* 7 */,
+ check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
+ not_long_external /* 8 */;
+
+ // (1) Sequential string? If yes, go to (5).
__ and_(r1,
r0,
Operand(kIsNotStringMask |
@@ -4744,117 +2021,109 @@
kShortExternalStringMask),
SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ b(eq, &seq_string);
+ __ b(eq, &seq_string); // Go to (5).
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r1: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ // (2) Anything but sequential or cons? If yes, go to (6).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
- __ b(lt, &cons_string);
- __ b(eq, &external_string);
+ __ b(ge, ¬_seq_nor_cons); // Go to (6).
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ b(ne, &runtime);
-
- // String is sliced.
- __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ mov(r9, Operand(r9, ASR, kSmiTagSize));
- __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- // r9: offset of sliced string, smi-tagged.
- __ jmp(&check_encoding);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
+ __ CompareRoot(r0, Heap::kempty_stringRootIndex);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- // Is first part of cons or parent of slice a flat string?
- __ bind(&check_encoding);
+
+ // (4) Is subject external? If yes, go to (7).
+ __ bind(&check_underlying);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(ne, &external_string);
+ // The underlying external string is never a short external string.
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ b(ne, &external_string); // Go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r0: Instance type of subject string
- STATIC_ASSERT(4 == kAsciiStringTag);
+ // subject: sequential subject string (or look-alike, external string)
+ // r3: original subject string
+ // Load previous index and check range before r3 is overwritten. We have to
+ // use r3 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(r1, &runtime);
+ __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
+ __ cmp(r3, Operand(r1));
+ __ b(ls, &runtime);
+ __ SmiUntag(r1);
+
+ STATIC_ASSERT(4 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
+ ne);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+ // (E) Carry on. String handling is done.
+ // r6: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
- __ JumpIfSmi(r7, &runtime);
-
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+ __ JumpIfSmi(r6, &runtime);
// r1: previous index
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
+ // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
+ // r6: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 8;
+ const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
- // Argument 8 (sp[16]): Pass current isolate address.
- __ mov(r0, Operand(ExternalReference::isolate_address()));
+ // Argument 9 (sp[20]): Pass current isolate address.
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
+ __ str(r0, MemOperand(sp, 5 * kPointerSize));
+
+ // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
+ __ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 4 * kPointerSize));
- // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
- __ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
+ // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
+ __ str(r0, MemOperand(sp, 3 * kPointerSize));
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(r0, Operand::Zero());
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
__ mov(r0,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
+ Operand(ExternalReference::address_of_static_offsets_vector(
+ isolate())));
__ str(r0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and
- // calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ // calculate the shift of the index (0 for one-byte and 1 for two-byte).
+ __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4865,12 +2134,12 @@
// Argument 4, r3: End of string data
// Argument 3, r2: Start of string data
// Prepare start and end index of the input.
- __ add(r9, r8, Operand(r9, LSL, r3));
+ __ add(r9, r7, Operand(r9, LSL, r3));
__ add(r2, r9, Operand(r1, LSL, r3));
- __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
- __ mov(r8, Operand(r8, ASR, kSmiTagSize));
- __ add(r3, r9, Operand(r8, LSL, r3));
+ __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
+ __ SmiUntag(r7);
+ __ add(r3, r9, Operand(r7, LSL, r3));
// Argument 2 (r1): Previous index.
// Already there
@@ -4879,21 +2148,23 @@
__ mov(r0, subject);
// Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub;
- stub.GenerateCall(masm, r7);
+ __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm, r6);
- __ LeaveExitFrame(false, no_reg);
+ __ LeaveExitFrame(false, no_reg, true);
+
+ last_match_info_elements = r6;
// r0: result
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
-
// Check the result.
Label success;
-
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ __ cmp(r0, Operand(1));
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
@@ -4905,9 +2176,9 @@
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(isolate->factory()->the_hole_value()));
+ __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
__ ldr(r0, MemOperand(r2, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
@@ -4927,7 +2198,7 @@
__ bind(&failure);
// For failure and exception return null.
- __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
+ __ mov(r0, Operand(isolate()->factory()->null_value()));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -4936,14 +2207,33 @@
__ ldr(r1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
+ // Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r1, r1, Operand(2)); // r1 was a smi.
+ __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(r0, &runtime);
+ __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
+ __ b(ne, &runtime);
+ // Check that the JSArray is in fast case.
+ __ ldr(last_match_info_elements,
+ FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
+ __ b(ne, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ ldr(r0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ cmp(r2, Operand::SmiUntag(r0));
+ __ b(gt, &runtime);
+
// r1: number of capture registers
// r4: subject string
// Store the capture count.
- __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
+ __ SmiTag(r2, r1);
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
@@ -4953,23 +2243,24 @@
__ mov(r2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
- r2,
- r7,
+ subject,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
+ __ mov(subject, r2);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastInputOffset,
subject,
- r7,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
+ ExternalReference::address_of_static_offsets_vector(isolate());
__ mov(r2, Operand(address_of_static_offsets_vector));
// r1: number of capture registers
@@ -4986,7 +2277,7 @@
// Read the value from the static offsets vector buffer.
__ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
// Store the smi value in the last match info.
- __ mov(r3, Operand(r3, LSL, kSmiTagSize));
+ __ SmiTag(r3);
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
__ jmp(&next_capture);
__ bind(&done);
@@ -4996,8 +2287,17 @@
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
- // External string. Short external strings have already been ruled out.
- // r0: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ bind(¬_seq_nor_cons);
+ // Compare flags are still set.
+ __ b(gt, ¬_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
@@ -5005,324 +2305,459 @@
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ tst(r0, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
+ __ Assert(eq, kExternalStringExpectedButNotFound);
}
__ ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(subject,
subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&seq_string);
+ __ jmp(&seq_string); // Go to (5).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(¬_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ b(ne, &runtime);
+
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into r9 and replace subject string with parent.
+ __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ SmiUntag(r9);
+ __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- Factory* factory = masm->isolate()->factory();
-
- __ ldr(r1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(r1, &slowcase);
- __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
- __ b(hi, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
- __ add(r2, r5, Operand(objects_size));
- __ AllocateInNewSpace(
- r2, // In: Size, in words.
- r0, // Out: Start of allocation (tagged).
- r3, // Scratch register.
- r4, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // r0: Start of allocated area, object-tagged.
- // r1: Number of elements in array, as smi.
- // r5: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(factory->empty_fixed_array()));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ ldr(r1, MemOperand(sp, kPointerSize * 0));
- __ ldr(r2, MemOperand(sp, kPointerSize * 1));
- __ ldr(r6, MemOperand(sp, kPointerSize * 2));
- __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
- __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
- __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // r0: JSArray, tagged.
- // r3: FixedArray, tagged.
- // r5: Number of elements in array, untagged.
-
- // Set map.
- __ mov(r2, Operand(factory->fixed_array_map()));
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ mov(r6, Operand(r5, LSL, kSmiTagSize));
- __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with the-hole.
- __ mov(r2, Operand(factory->the_hole_value()));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with hole.
- // r0: JSArray, tagged.
- // r2: the hole.
- // r3: Start of elements in FixedArray.
- // r5: Number of elements to fill.
- Label loop;
- __ cmp(r5, Operand(0));
- __ bind(&loop);
- __ b(le, &done); // Jump if r5 is negative or zero.
- __ sub(r5, r5, Operand(1), SetCC);
- __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
- __ jmp(&loop);
-
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // r0 : number of arguments to the construct function
// r1 : the function to call
- // r2 : cache cell for call target
- Label done;
+ // r2 : Feedback vector
+ // r3 : slot in feedback vector (Smi)
+ Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ // Load the cache state into r4.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmp(r3, r1);
+ __ cmp(r4, r1);
__ b(eq, &done);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &done);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ __ ldr(r5, FieldMemOperand(r4, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &megamorphic);
+ __ jmp(&done);
+ }
+
+ __ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
+ __ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
+ __ bind(&megamorphic);
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ __ jmp(&done);
- // An uninitialized cache is patched with the function.
- __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
- // No need for a write barrier here - cells are rescanned.
+ // An uninitialized cache is patched with the function
+ __ bind(&initialize);
+
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, ¬_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(r0);
+ __ Push(r3, r2, r1, r0);
+
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ __ CallStub(&create_stub);
+
+ __ Pop(r3, r2, r1, r0);
+ __ SmiUntag(r0);
+ }
+ __ b(&done);
+
+ __ bind(¬_array_function);
+ }
+
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r1, MemOperand(r4, 0));
+
+ __ Push(r4, r2, r1);
+ __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(r4, r2, r1);
__ bind(&done);
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // r1 : the function to call
- // r2 : cache cell for call target
- Label slow, non_function;
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, cont);
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(ne, &call);
- // Patch the receiver on the stack with the global receiver object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
- }
+ // Do not transform the receiver for native (Compilerhints already in r3).
+ __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, cont);
+}
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &non_function);
- // Get the map of the function object.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
- // Fast-case: Invoke the function now.
- // r1: pushed function
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(eq, &call_as_function);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
+static void EmitSlowCase(MacroAssembler* masm,
+ int argc,
+ Label* non_function) {
// Check for function proxy.
- __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function);
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(ne, non_function);
__ push(r1); // put proxy as additional argument
- __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_METHOD);
+ __ mov(r0, Operand(argc + 1, RelocInfo::NONE32));
+ __ mov(r2, Operand::Zero());
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
- __ bind(&non_function);
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Set up the number of arguments.
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(r5, CALL_AS_METHOD);
+ __ bind(non_function);
+ __ str(r1, MemOperand(sp, argc * kPointerSize));
+ __ mov(r0, Operand(argc)); // Set up the number of arguments.
+ __ mov(r2, Operand::Zero());
+ __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(r1);
+ }
+ __ str(r0, MemOperand(sp, argc * kPointerSize));
+ __ jmp(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // r1 : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ JumpIfSmi(r1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ b(ne, &slow);
+ }
+
+ // Fast-case: Invoke the function now.
+ // r1: pushed function
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ ldr(r3, MemOperand(sp, argc * kPointerSize));
+
+ if (needs_checks) {
+ __ JumpIfSmi(r3, &wrap);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
+}
+
+
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into r2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by r3 + 1.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into r2, or undefined.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(eq, &feedback_register_initialized);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(r2, r5);
}
// Jump to the function-specific construct stub.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Register jmp_reg = r4;
+ __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
// r0: number of arguments
// r1: called object
- // r3: object type
+ // r4: object type
Label do_call;
__ bind(&slow);
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
__ bind(&non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
+ __ mov(r2, Operand::Zero());
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == eq || cc_ == ne;
- stream->Add("CompareStub_%s", cc_name);
- stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
- stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(vector, FieldMemOperand(vector,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(vector, FieldMemOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
}
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
- | RegisterField::encode(lhs_.is(r0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // r1 - function
+ // r3 - slot id
+ Label miss;
+ int argc = arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, r2);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &miss);
+
+ __ mov(r0, Operand(arg_count()));
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
+
+ // Verify that r4 contains an AllocationSite
+ __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
+
+ __ mov(r2, r4);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unexpected code address");
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // r1 - function
+ // r3 - slot id (Smi)
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, r2);
+
+ // The checks. First, does r1 match the recorded monomorphic target?
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ __ cmp(r1, r4);
+ __ b(ne, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ // Compute the receiver in sloppy mode.
+ __ ldr(r3, MemOperand(sp, argc * kPointerSize));
+
+ __ JumpIfSmi(r3, &wrap);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, &wrap);
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+
+ if (CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ CompareRoot(r4, Heap::kMegamorphicSymbolRootIndex);
+ __ b(eq, &slow_start);
+ __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
+ __ b(eq, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(r4);
+ __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+ __ b(ne, &miss);
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ __ jmp(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ JumpIfSmi(r1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ b(ne, &slow);
+ __ jmp(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ ldr(r4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
+
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(r4, r1, r2, r3);
+
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ mov(r1, r0);
+ }
}
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
// If the receiver is a smi trigger the non-string case.
__ JumpIfSmi(object_, receiver_not_string_);
@@ -5342,7 +2777,7 @@
__ cmp(ip, Operand(index_));
__ b(ls, index_out_of_range_);
- __ mov(index_, Operand(index_, ASR, kSmiTagSize));
+ __ SmiUntag(index_);
StringCharLoadGenerator::Generate(masm,
object_,
@@ -5350,7 +2785,7 @@
result_,
&call_runtime_);
- __ mov(result_, Operand(result_, LSL, kSmiTagSize));
+ __ SmiTag(result_);
__ bind(&exit_);
}
@@ -5358,7 +2793,7 @@
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
// Index is not a smi.
__ bind(&index_not_smi_);
@@ -5374,7 +2809,7 @@
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
@@ -5396,14 +2831,14 @@
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
- __ mov(index_, Operand(index_, LSL, kSmiTagSize));
+ __ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
@@ -5414,16 +2849,15 @@
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
__ tst(code_,
Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
+ // At this point code register contains smi tagged one-byte char code.
+ __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case_);
@@ -5434,7 +2868,7 @@
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -5444,25 +2878,11 @@
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
@@ -5470,359 +2890,37 @@
Register src,
Register count,
Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand(0, RelocInfo::NONE));
- }
- __ b(eq, &done);
-
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex));
- // Perform sub between load and dependent store to get the load time to
- // complete.
- __ sub(count, count, Operand(1), SetCC);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- // last iteration.
- __ b(gt, &loop);
-
- __ bind(&done);
-}
-
-
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
-
-
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
+ String::Encoding encoding) {
+ if (FLAG_debug_code) {
+ // Check that destination is word aligned.
__ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, "Destination of copy not aligned.");
+ __ Check(eq, kDestinationOfCopyNotAligned);
}
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
// Assumes word reads and writes are little endian.
// Nothing to do for zero characters.
Label done;
- if (!ascii) {
+ if (encoding == String::TWO_BYTE_ENCODING) {
__ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand(0, RelocInfo::NONE));
- }
- __ b(eq, &done);
-
- // Assume that you cannot read (or write) unaligned.
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ cmp(count, Operand(8));
- __ add(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ b(lt, &byte_loop);
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
- Label dest_aligned;
- __ b(eq, &dest_aligned);
- __ cmp(scratch4, Operand(2));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
- __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
- __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
- __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
- __ bind(&dest_aligned);
}
- Label simple_loop;
+ Register limit = count; // Read until dest equals this.
+ __ add(limit, dest, Operand(count));
- __ sub(scratch4, dest, Operand(src));
- __ and_(scratch4, scratch4, Operand(0x03), SetCC);
- __ b(eq, &simple_loop);
- // Shift register is number of bits in a source word that
- // must be combined with bits in the next source word in order
- // to create a destination word.
-
- // Complex loop for src/dst that are not aligned the same way.
- {
- Label loop;
- __ mov(scratch4, Operand(scratch4, LSL, 3));
- Register left_shift = scratch4;
- __ and_(src, src, Operand(~3)); // Round down to load previous word.
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- // Store the "shift" most significant bits of scratch in the least
- // signficant bits (i.e., shift down by (32-shift)).
- __ rsb(scratch2, left_shift, Operand(32));
- Register right_shift = scratch2;
- __ mov(scratch1, Operand(scratch1, LSR, right_shift));
-
- __ bind(&loop);
- __ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
- __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- __ mov(scratch1, Operand(scratch3, LSR, right_shift));
- // Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
- __ b(ge, &loop);
- }
- // There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
- // scratch1 (eight times that number in scratch4). We may have read past
- // the end of the string, but because objects are aligned, we have not read
- // past the end of the object.
- // Find the minimum of remaining characters to move and preloaded characters
- // and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
- __ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
- // Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
- // scratch ready to write.
- __ cmp(scratch5, Operand(2));
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
- // Copy any remaining bytes.
- __ b(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dst, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- __ sub(scratch3, limit, Operand(dest));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- // Compare to 8, not 4, because we do the substraction before increasing
- // dest.
- __ cmp(scratch3, Operand(8));
- __ b(ge, &loop);
- }
-
- // Copy bytes from src to dst until dst hits limit.
- __ bind(&byte_loop);
+ Label loop_entry, loop;
+ // Copy bytes from src to dest until dest hits limit.
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
+ __ strb(scratch, MemOperand(dest, 1, PostIndex));
+ __ bind(&loop_entry);
__ cmp(dest, Operand(limit));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
- __ b(ge, &done);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ b(&byte_loop);
+ __ b(lt, &loop);
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
- __ sub(scratch, c1, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
- __ b(hi, ¬_array_index);
- __ sub(scratch, c2, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register
- __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
- __ b(ls, not_found);
-
- __ bind(¬_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load symbol table
- // Load address of first element of the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, 1));
- __ sub(mask, mask, Operand(1));
-
- // Calculate untagged address of the first element of the symbol table.
- Register first_symbol_table_element = symbol_table;
- __ add(first_symbol_table_element, symbol_table,
- Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_symbol_table_element: address of the first element of
- // the symbol table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
- if (i > 0) {
- __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ and_(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ ldr(candidate,
- MemOperand(first_symbol_table_element,
- candidate,
- LSL,
- kPointerSizeLog2));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
- __ b(ne, &is_string);
-
- __ cmp(undefined, candidate);
- __ b(eq, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(ip, candidate);
- __ Assert(eq, "oddball in symbol table is not undefined or the hole");
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(2)));
- __ b(ne, &next_probe[i]);
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
- __ cmp(chars, scratch);
- __ b(eq, &found_in_symbol_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_symbol_table);
- __ Move(r0, result);
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = character + (character << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ add(hash, hash, Operand(character));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ add(hash, hash, Operand(hash, LSL, 3));
- // hash ^= hash >> 11;
- __ eor(hash, hash, Operand(hash, LSR, 11));
- // hash += hash << 15;
- __ add(hash, hash, Operand(hash, LSL, 15));
-
- __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
-
- // if (hash == 0) hash = 27;
- __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -5848,61 +2946,43 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- // I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(r2, ASR, 1), SetCC);
- __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
- // If either to or from had the smi tag bit set, then carry is set now.
- __ b(cs, &runtime); // Either "from" or "to" is not a smi.
+ // Arithmetic shift right by one un-smi-tags. In this case we rotate right
+ // instead because we bail out on non-smi values: ROR and ASR are equivalent
+ // for smis but they set the flags in a way that's easier to optimize.
+ __ mov(r2, Operand(r2, ROR, 1), SetCC);
+ __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
+ // If either to or from had the smi tag bit set, then C is set now, and N
+ // has the same value: we rotated by 1, so the bottom bit is now the top bit.
// We want to bailout to runtime here if From is negative. In that case, the
// next instruction is not executed and we fall through to bailing out to
- // runtime. pl is the opposite of mi.
- // Both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC, pl);
- __ b(mi, &runtime); // Fail if from > to.
+ // runtime.
+ // Executed if both r2 and r3 are untagged integers.
+ __ sub(r2, r2, Operand(r3), SetCC, cc);
+ // One of the above un-smis or the above SUB could have set N==1.
+ __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
// Make sure first argument is a string.
__ ldr(r0, MemOperand(sp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r0, &runtime);
Condition is_string = masm->IsObjectStringType(r0, r1);
__ b(NegateCondition(is_string), &runtime);
+ Label single_char;
+ __ cmp(r2, Operand(1));
+ __ b(eq, &single_char);
+
// Short-cut for the case of trivial substring.
Label return_r0;
// r0: original string
// r2: result string length
__ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
__ cmp(r2, Operand(r4, ASR, 1));
+ // Return original string.
__ b(eq, &return_r0);
+ // Longer than original string's length or negative: unsafe arguments.
+ __ b(hi, &runtime);
+ // Shorter than original string's length: an actual substring.
- Label result_longer_than_two;
- // Check for special case of two character ASCII string, in which case
- // we do a lookup in the symbol table first.
- __ cmp(r2, Operand(2));
- __ b(gt, &result_longer_than_two);
- __ b(lt, &runtime);
-
- __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
-
- // Get the two characters forming the sub string.
- __ add(r0, r0, Operand(r3));
- __ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
- __ jmp(&return_r0);
-
- // r2: result string length.
- // r3: two characters combined into halfword in little endian byte order.
- __ bind(&make_two_character_string);
- __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
- __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ jmp(&return_r0);
-
- __ bind(&result_longer_than_two);
// Deal with different string types: update the index if necessary
// and put the underlying string into r5.
// r0: original string
@@ -5920,7 +3000,7 @@
__ b(ne, &sliced_string);
// Cons string. Check whether it is flat, then fetch first part.
__ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
- __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
+ __ CompareRoot(r5, Heap::kempty_stringRootIndex);
__ b(ne, &runtime);
__ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
// Update instance type.
@@ -5959,14 +3039,14 @@
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
__ jmp(&set_slice_header);
__ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
__ bind(&set_slice_header);
__ mov(r3, Operand(r3, LSL, 1));
__ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
@@ -5988,7 +3068,7 @@
// Handle external string.
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(r1, Operand(kShortExternalStringTag));
__ b(ne, &runtime);
__ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
@@ -5997,35 +3077,35 @@
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_sequential);
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
+ // Allocate and copy the resulting one-byte string.
+ __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
__ add(r5, r5, r3);
// Locate first character of result.
- __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string
// r1: first character of result string
// r2: result string length
// r5: first character of substring to copy
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharacters(
+ masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
__ jmp(&return_r0);
// Allocate and copy the resulting two-byte string.
__ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
+ __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
@@ -6038,27 +3118,37 @@
// r2: result length.
// r5: first character of substring to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharacters(
+ masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
__ bind(&return_r0);
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Drop(3);
__ Ret();
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // r0: original string
+ // r1: instance type
+ // r2: length
+ // r3: from index (untagged)
+ __ SmiTag(r3, r3);
+ StringCharAtGenerator generator(
+ r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
+void StringHelper::GenerateFlatOneByteStringEquals(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
Register length = scratch1;
// Compare lengths.
@@ -6075,16 +3165,15 @@
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
- __ cmp(length, Operand(0));
+ __ cmp(length, Operand::Zero());
__ b(ne, &compare_chars);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
// Compare characters.
__ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm,
- left, right, length, scratch2, scratch3,
- &strings_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
+ &strings_not_equal);
// Characters are equal.
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
@@ -6092,13 +3181,9 @@
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4) {
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
@@ -6108,17 +3193,16 @@
__ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
- __ cmp(min_length, Operand(0));
+ __ cmp(min_length, Operand::Zero());
__ b(eq, &compare_lengths);
// Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4,
- &result_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ scratch4, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
// Use length_delta as result if it's zero.
__ mov(r0, Operand(length_delta), SetCC);
__ bind(&result_not_equal);
@@ -6130,20 +3214,15 @@
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal) {
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Label* chars_not_equal) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
__ SmiUntag(length);
__ add(scratch1, length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ add(left, left, Operand(scratch1));
__ add(right, right, Operand(scratch1));
__ rsb(length, length, Operand::Zero());
@@ -6164,7 +3243,7 @@
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
// Stack frame on entry.
// sp[0]: right string
@@ -6183,13 +3262,13 @@
__ bind(¬_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
- // Compare flat ASCII strings natively. Remove arguments from stack first.
+ // Compare flat one-byte strings natively. Remove arguments from stack first.
__ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
@@ -6198,345 +3277,39 @@
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : left
+ // -- r0 : right
+ // -- lr : return address
+ // -----------------------------------
- Counters* counters = masm->isolate()->counters();
+ // Load r2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ Move(r2, handle(isolate()->heap()->undefined_value()));
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfEitherSmi(r0, r1, &call_runtime);
- // Load instance types.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ tst(r4, Operand(kIsNotStringMask));
- __ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, kExpectedAllocationSite);
+ __ push(r2);
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
+ __ cmp(r2, ip);
+ __ pop(r2);
+ __ Assert(eq, kExpectedAllocationSite);
}
- // Both arguments are strings.
- // r0: first string
- // r1: second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
- __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
- __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- STATIC_ASSERT(kSmiTag == 0);
- // Else test if second string is empty.
- __ cmp(r3, Operand(Smi::FromInt(0)), ne);
- __ b(ne, &strings_not_empty); // If either string was empty, return r0.
-
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&strings_not_empty);
- }
-
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize));
- // Both strings are non-empty.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ add(r6, r2, Operand(r3));
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
- __ cmp(r6, Operand(2));
- __ b(ne, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in r2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode)
- __ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(r6, Operand(ConsString::kMinLength));
- __ b(lt, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r4, Operand(kStringEncodingMask));
- __ tst(r5, Operand(kStringEncodingMask), ne);
- __ b(eq, &non_ascii);
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ mov(r0, Operand(r7));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
- // r4: first instance type.
- // r5: second instance type.
- __ tst(r4, Operand(kAsciiDataHintMask));
- __ tst(r5, Operand(kAsciiDataHintMask), ne);
- __ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ b(eq, &ascii_data);
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r6: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
-
- // Check whether both strings have same encoding
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
- __ b(ne, &call_runtime);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r7,
- r0,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &first_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r4, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r1,
- r1,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &second_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r5, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r7: first character of first string
- // r1: first character of second string
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of lengths.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r5, Operand(kStringEncodingMask));
- __ b(eq, &non_ascii_string_add_flat_result);
-
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r7: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r7: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
+ __ TailCallStub(&stub);
}
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, ¬_string);
- __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
- __ b(lt, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(¬_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- false,
- ¬_cached);
- __ mov(arg, scratch1);
- __ str(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(¬_cached);
- __ JumpIfSmi(arg, slow);
- __ CompareObjectType(
- arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
- __ b(ne, slow);
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ and_(scratch2,
- scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ cmp(scratch2,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, slow);
- __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ str(arg, MemOperand(sp, stack_offset));
-
- __ bind(&done);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
Label miss;
__ orr(r2, r1, r0);
__ JumpIfNotSmi(r2, &miss);
@@ -6547,7 +3320,7 @@
} else {
// Untag before subtracting to avoid handling overflow.
__ SmiUntag(r1);
- __ sub(r0, r1, SmiUntagOperand(r0));
+ __ sub(r0, r1, Operand::SmiUntag(r0));
}
__ Ret();
@@ -6556,60 +3329,74 @@
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &generic_stub);
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined1);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP3 is unsupported.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
- // Load left and right operand
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vldr(d1, r2, HeapNumber::kValueOffset);
-
- // Compare operands
- __ VFPCompareAndSetFlags(d0, d1);
-
- // Don't base result on status bits when a NaN is involved.
- __ b(vs, &unordered);
-
- // Return a result of -1, 0, or 1, based on status bits.
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
+ if (left() == CompareICState::SMI) {
+ __ JumpIfNotSmi(r1, &miss);
+ }
+ if (right() == CompareICState::SMI) {
+ __ JumpIfNotSmi(r0, &miss);
}
+ // Inlining the double comparison and falling back to the general compare
+ // stub if NaN is involved.
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(r0, &right_smi);
+ __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vldr(d1, r2, HeapNumber::kValueOffset);
+ __ b(&left);
+ __ bind(&right_smi);
+ __ SmiToDouble(d1, r0);
+
+ __ bind(&left);
+ __ JumpIfSmi(r1, &left_smi);
+ __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r1, Operand(kHeapObjectTag));
+ __ vldr(d0, r2, HeapNumber::kValueOffset);
+ __ b(&done);
+ __ bind(&left_smi);
+ __ SmiToDouble(d0, r1);
+
+ __ bind(&done);
+ // Compare operands.
+ __ VFPCompareAndSetFlags(d0, d1);
+
+ // Don't base result on status bits when a NaN is involved.
+ __ b(vs, &unordered);
+
+ // Return a result of -1, 0, or 1, based on status bits.
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ Ret();
+
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &miss);
+ __ JumpIfSmi(r1, &unordered);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &maybe_undefined2);
__ jmp(&unordered);
}
__ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
__ b(eq, &unordered);
}
@@ -6619,8 +3406,8 @@
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -6632,21 +3419,21 @@
// Check that both operands are heap objects.
__ JumpIfEitherSmi(left, right, &miss);
- // Check that both operands are symbols.
+ // Check that both operands are internalized strings.
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp1, Operand(tmp2));
- __ tst(tmp1, Operand(kIsSymbolMask));
- __ b(eq, &miss);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ orr(tmp1, tmp1, Operand(tmp2));
+ __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ b(ne, &miss);
- // Symbols are compared by identity.
+ // Internalized strings are compared by identity.
__ cmp(left, right);
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
- ASSERT(right.is(r0));
+ DCHECK(right.is(r0));
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
@@ -6657,11 +3444,50 @@
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
+ DCHECK(GetCondition() == eq);
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ // Registers containing left and right operands respectively.
+ Register left = r1;
+ Register right = r0;
+ Register tmp1 = r2;
+ Register tmp2 = r3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
+
+ // Unique names are compared by identity.
+ __ cmp(left, right);
+ // Make sure r0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ DCHECK(right.is(r0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
+ Label miss;
+
+ bool equality = Token::IsEqualityOp(op());
// Registers containing left and right operands respectively.
Register left = r1;
@@ -6694,31 +3520,32 @@
// Handle not identical strings.
- // Check that both strings are symbols. If they are, we're done
- // because we already know they are not identical.
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
if (equality) {
- ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp3, tmp1, Operand(tmp2));
- __ tst(tmp3, Operand(kIsSymbolMask));
+ DCHECK(GetCondition() == eq);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ orr(tmp3, tmp1, Operand(tmp2));
+ __ tst(tmp3, Operand(kIsNotInternalizedMask));
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
- ASSERT(right.is(r0));
- __ Ret(ne);
+ DCHECK(right.is(r0));
+ __ Ret(eq);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- tmp1, tmp2, tmp3, tmp4, &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one-byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
+ tmp3);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, tmp4);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+ tmp2, tmp3, tmp4);
}
// Handle more complex cases in runtime.
@@ -6735,8 +3562,8 @@
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -6746,7 +3573,7 @@
__ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
__ sub(r0, r0, Operand(r1));
__ Ret();
@@ -6755,7 +3582,7 @@
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -6774,26 +3601,23 @@
}
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
- __ push(lr);
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
+ __ Push(lr, r1, r0);
+ __ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(lr);
- __ pop(r0);
- __ pop(r1);
+ __ Pop(r1, r0);
}
__ Jump(r2);
@@ -6801,40 +3625,33 @@
void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ str(lr, MemOperand(sp, 0));
+ __ blx(ip); // Call the C++ function.
+ __ VFPEnsureFPSCRState(r2);
__ ldr(pc, MemOperand(sp, 0));
}
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ExternalReference function) {
- __ mov(r2, Operand(function));
- GenerateCall(masm, r2);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET));
- // Push return address (accessible to GC through exit frame pc).
- // Note that using pc with str is deprecated.
- Label start;
- __ bind(&start);
- __ add(ip, pc, Operand(Assembler::kInstrSize));
- __ str(ip, MemOperand(sp, 0));
- __ Jump(target); // Call the C++ function.
- ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
- masm->SizeOfCodeGeneratedSince(&start));
+ intptr_t code =
+ reinterpret_cast<intptr_t>(GetCode().location());
+ __ Move(ip, target);
+ __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
+ __ blx(lr); // Call the stub.
}
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0) {
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ DCHECK(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -6848,49 +3665,45 @@
__ ldr(index, FieldMemOperand(properties, kCapacityOffset));
__ sub(index, index, Operand(1));
__ and_(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+ Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
+ DCHECK_EQ(kSmiTagSize, 1);
Register tmp = properties;
__ add(tmp, properties, Operand(index, LSL, 1));
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
- ASSERT(!tmp.is(entity_name));
+ DCHECK(!tmp.is(entity_name));
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ cmp(entity_name, tmp);
__ b(eq, done);
- if (i != kInlinedProbes - 1) {
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+ // Load the hole ready for use below:
+ __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
- // Stop if found the property.
- __ cmp(entity_name, Operand(Handle<String>(name)));
- __ b(eq, miss);
+ // Stop if found the property.
+ __ cmp(entity_name, Operand(Handle<Name>(name)));
+ __ b(eq, miss);
- Label the_hole;
- __ cmp(entity_name, tmp);
- __ b(eq, &the_hole);
+ Label good;
+ __ cmp(entity_name, tmp);
+ __ b(eq, &good);
- // Check if the entry name is not a symbol.
- __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ tst(entity_name, Operand(kIsSymbolMask));
- __ b(eq, miss);
+ // Check if the entry name is not a unique name.
+ __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
+ __ bind(&good);
- __ bind(&the_hole);
-
- // Restore the properties.
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- }
+ // Restore the properties.
+ __ ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
}
const int spill_mask =
@@ -6899,10 +3712,10 @@
__ stm(db_w, sp, spill_mask);
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(r1, Operand(Handle<String>(name)));
- StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ __ mov(r1, Operand(Handle<Name>(name)));
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
@@ -6910,28 +3723,27 @@
}
-// Probe the string dictionary in the |elements| register. Jump to the
+// Probe the name dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found. Jump to
// the |miss| label otherwise.
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- ASSERT(!elements.is(scratch1));
- ASSERT(!elements.is(scratch2));
- ASSERT(!name.is(scratch1));
- ASSERT(!name.is(scratch2));
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ DCHECK(!elements.is(scratch1));
+ DCHECK(!elements.is(scratch2));
+ DCHECK(!name.is(scratch1));
+ DCHECK(!name.is(scratch2));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertName(name);
// Compute the capacity mask.
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
+ __ SmiUntag(scratch1);
__ sub(scratch1, scratch1, Operand(1));
// Generate an unrolled loop that performs a few probes before
@@ -6939,20 +3751,20 @@
// cover ~93% of loads from dictionaries.
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+ __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
+ DCHECK(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
__ add(scratch2, scratch2, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
}
- __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
+ __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
@@ -6970,16 +3782,16 @@
__ stm(db_w, sp, spill_mask);
if (name.is(r0)) {
- ASSERT(!elements.is(r1));
+ DCHECK(!elements.is(r1));
__ Move(r1, name);
__ Move(r0, elements);
} else {
__ Move(r0, elements);
__ Move(r1, name);
}
- StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ mov(scratch2, Operand(r2));
__ ldm(ia_w, sp, spill_mask);
@@ -6988,15 +3800,15 @@
}
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Registers:
- // result: StringDictionary to probe
+ // result: NameDictionary to probe
// r1: key
- // : StringDictionary to probe.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
+ // dictionary: NameDictionary to probe.
+ // index: will hold an index of entry if lookup is successful.
+ // might alias with result_.
// Returns:
// result_ is zero if lookup failed, non zero otherwise.
@@ -7012,10 +3824,10 @@
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
__ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, kSmiTagSize));
+ __ SmiUntag(mask);
__ sub(mask, mask, Operand(1));
- __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
+ __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
@@ -7026,20 +3838,20 @@
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
+ DCHECK(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
__ add(index, hash, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
} else {
__ mov(index, Operand(hash));
}
- __ and_(index, mask, Operand(index, LSR, String::kHashShift));
+ __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
- ASSERT_EQ(kSmiTagSize, 1);
+ DCHECK_EQ(kSmiTagSize, 1);
__ add(index, dictionary, Operand(index, LSL, 2));
__ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
@@ -7051,13 +3863,12 @@
__ cmp(entry_key, Operand(key));
__ b(eq, &in_dictionary);
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a symbol.
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a unique name.
__ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ ldrb(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ tst(entry_key, Operand(kIsSymbolMask));
- __ b(eq, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
@@ -7065,7 +3876,7 @@
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
+ if (mode() == POSITIVE_LOOKUP) {
__ mov(result, Operand::Zero());
__ Ret();
}
@@ -7080,86 +3891,13 @@
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
- { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateSmiOnlyToObject
- // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated() {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- }
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
@@ -7176,14 +3914,16 @@
// forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details.
- __ b(&skip_to_incremental_noncompacting);
- __ b(&skip_to_incremental_compacting);
+ {
+ // Block literal pool emission, as the position of these two instructions
+ // is assumed by the patching code.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ b(&skip_to_incremental_noncompacting);
+ __ b(&skip_to_incremental_compacting);
+ }
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
}
__ Ret();
@@ -7196,8 +3936,8 @@
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+ DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+ DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
PatchBranchIntoNop(masm, 0);
PatchBranchIntoNop(masm, Assembler::kInstrSize);
}
@@ -7206,7 +3946,7 @@
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -7224,12 +3964,9 @@
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
@@ -7237,44 +3974,30 @@
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
Register address =
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(r0));
+ DCHECK(!address.is(regs_.object()));
+ DCHECK(!address.is(r0));
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(r1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ ldr(r1, MemOperand(address, 0));
- }
- __ mov(r2, Operand(ExternalReference::isolate_address()));
+ __ Move(r1, address);
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
@@ -7286,16 +4009,23 @@
Label need_incremental;
Label need_incremental_pop_scratch;
+ __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
+ __ ldr(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
+ __ str(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ b(mi, &need_incremental);
+
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
@@ -7336,10 +4066,7 @@
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
@@ -7357,10 +4084,10 @@
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : element value to store
- // -- r1 : array literal
- // -- r2 : map of array literal
// -- r3 : element index as smi
- // -- r4 : array literal index in function as smi
+ // -- sp[0] : array literal index in function as smi
+ // -- sp[4] : array literal
+ // clobbers r1, r2, r4
// -----------------------------------
Label element_done;
@@ -7369,10 +4096,15 @@
Label slow_elements;
Label fast_elements;
+ // Get array literal index, array literal and its map.
+ __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
+
__ CheckFastElements(r2, r5, &double_elements);
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
- __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
+ __ CheckFastSmiElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -7384,10 +4116,10 @@
__ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
__ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r0, MemOperand(r6, 0));
// Update the write barrier for the array store.
@@ -7395,22 +4127,559 @@
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
__ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
__ Ret();
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
- &slow_elements);
+ __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
__ Ret();
}
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ ldr(r1, MemOperand(fp, parameter_count_offset));
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
+ __ add(r1, r1, Operand(1));
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
+ __ add(sp, sp, r1);
+ __ Ret();
+}
+
+
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ ProfileEntryHookStub stub(masm->isolate());
+ int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize;
+ PredictableCodeSizeScope predictable(masm, code_size);
+ __ push(lr);
+ __ CallStub(&stub);
+ __ pop(lr);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // The entry hook is a "push lr" instruction, followed by a call.
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ 3 * Assembler::kInstrSize;
+
+ // This should contain all kCallerSaved registers.
+ const RegList kSavedRegs =
+ 1 << 0 | // r0
+ 1 << 1 | // r1
+ 1 << 2 | // r2
+ 1 << 3 | // r3
+ 1 << 5 | // r5
+ 1 << 9; // r9
+ // We also save lr, so the count here is one higher than the mask indicates.
+ const int32_t kNumSavedRegs = 7;
+
+ DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
+
+ // Save all caller-save registers as this may be called from anywhere.
+ __ stm(db_w, sp, kSavedRegs | lr.bit());
+
+ // Compute the function's address for the first argument.
+ __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
+
+ // The caller's return address is above the saved temporaries.
+ // Grab that for the second argument to the hook.
+ __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
+
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ __ mov(r5, sp);
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ __ and_(sp, sp, Operand(-frame_alignment));
+ }
+
+#if V8_HOST_ARCH_ARM
+ int32_t entry_hook =
+ reinterpret_cast<int32_t>(isolate()->function_entry_hook());
+ __ mov(ip, Operand(entry_hook));
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ // It additionally takes an isolate as a third parameter
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ mov(ip, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ isolate())));
+#endif
+ __ Call(ip);
+
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+ __ mov(sp, r5);
+ }
+
+ // Also pop pc to get Ret(0).
+ __ ldm(ia_w, sp, kSavedRegs | pc.bit());
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(r3, Operand(kind));
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // r0 - number of arguments
+ // r1 - constructor?
+ // sp[0] - last argument
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ DCHECK(FAST_SMI_ELEMENTS == 0);
+ DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+ DCHECK(FAST_ELEMENTS == 2);
+ DCHECK(FAST_HOLEY_ELEMENTS == 3);
+ DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+ DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ tst(r3, Operand(1));
+ __ b(ne, &normal_sequence);
+ }
+
+ // look at the first argument
+ __ ldr(r5, MemOperand(sp, 0));
+ __ cmp(r5, Operand::Zero());
+ __ b(eq, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the slot).
+ __ add(r3, r3, Operand(1));
+
+ if (FLAG_debug_code) {
+ __ ldr(r5, FieldMemOperand(r2, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
+ __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(r3, Operand(kind));
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count() == ANY) {
+ Label not_zero_case, not_one_case;
+ __ tst(r0, r0);
+ __ b(ne, ¬_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(¬_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, ¬_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(¬_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count() == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count() == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count() == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc (only if argument_count() == ANY)
+ // -- r1 : constructor
+ // -- r2 : AllocationSite or undefined
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ tst(r4, Operand(kSmiTagMask));
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(r4, r4, r5, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+
+ // We should either have undefined in r2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(r2, r4);
+ }
+
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &no_info);
+
+ __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(r3);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ __ cmp(r0, Operand(1));
+
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0, lo);
+
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN, hi);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ ldr(r3, MemOperand(sp, 0));
+ __ cmp(r3, Operand::Zero());
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey, ne);
+ }
+
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+ __ TailCallStub(&stub1);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- r1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r3);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(r3, Operand(FAST_ELEMENTS));
+ __ b(eq, &done);
+ __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
+ __ Assert(eq,
+ kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(r3, Operand(FAST_ELEMENTS));
+ __ b(eq, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : callee
+ // -- r4 : call_data
+ // -- r2 : holder
+ // -- r1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = r0;
+ Register call_data = r4;
+ Register holder = r2;
+ Register api_function_address = r1;
+ Register context = cp;
+
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // context save
+ __ push(context);
+ // load context from callee
+ __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ // isolate
+ __ mov(scratch,
+ Operand(ExternalReference::isolate_address(isolate())));
+ __ push(scratch);
+ // holder
+ __ push(holder);
+
+ // Prepare arguments.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
+ // r0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ add(r0, sp, Operand(1 * kPointerSize));
+ // FunctionCallbackInfo::implicit_args_
+ __ str(scratch, MemOperand(r0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ mov(ip, Operand(argc));
+ __ str(ip, MemOperand(r0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ mov(ip, Operand::Zero());
+ __ str(ip, MemOperand(r0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- r2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(r2));
+
+ __ mov(r0, sp); // r0 = Handle<Name>
+ __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // r1 (internal::Object** args_) as the data.
+ __ str(r1, MemOperand(sp, 1 * kPointerSize));
+ __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 38ed476..727bb1b 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -1,497 +1,127 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
-#include "ic-inl.h"
-
namespace v8 {
namespace internal {
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public CodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-class UnaryOpStub: public CodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
- void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_vfp3_ = CpuFeatures::IsSupported(VFP3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_vfp3_(VFP3Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_vfp3_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class VFP3Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | VFP3Bits::encode(use_vfp3_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
- bool ascii);
+ String::Encoding encoding);
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
+ // Compares two flat one-byte strings and returns result in r0.
+ static void GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4);
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-
-class StringCompareStub: public CodeStub {
- public:
- StringCompareStub() { }
-
- // Compares two flat ASCII strings and returns result in r0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
+ // Compares two flat one-byte strings for equality and returns result in r0.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
Register scratch1,
Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in r0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
+ Register scratch3);
private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
+ static void GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Label* chars_not_equal);
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch) { }
+ WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
+ Register the_heap_number, Register scratch)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = IntRegisterBits::encode(the_int.code()) |
+ HeapNumberRegisterBits::encode(the_heap_number.code()) |
+ ScratchRegisterBits::encode(scratch.code());
+ }
- bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
+ Register the_int() const {
+ return Register::from_code(IntRegisterBits::decode(minor_key_));
+ }
+
+ Register the_heap_number() const {
+ return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
+ }
+
+ Register scratch() const {
+ return Register::from_code(ScratchRegisterBits::decode(minor_key_));
+ }
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
- }
-
- void Generate(MacroAssembler* masm);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
};
-class NumberToStringStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class RecordWriteStub: public CodeStub {
- public:
- RecordWriteStub(Register object,
+ RecordWriteStub(Isolate* isolate,
+ Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
+ : PlatformCodeStub(isolate),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
- ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
+ DCHECK(Assembler::IsTstImmediate(masm->instr_at(pos)));
}
static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
- ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
+ DCHECK(Assembler::IsBranch(masm->instr_at(pos)));
}
static Mode GetMode(Code* stub) {
@@ -503,13 +133,13 @@
return INCREMENTAL;
}
- ASSERT(Assembler::IsTstImmediate(first_instruction));
+ DCHECK(Assembler::IsTstImmediate(first_instruction));
if (Assembler::IsBranch(second_instruction)) {
return INCREMENTAL_COMPACTION;
}
- ASSERT(Assembler::IsTstImmediate(second_instruction));
+ DCHECK(Assembler::IsTstImmediate(second_instruction));
return STORE_BUFFER_ONLY;
}
@@ -520,24 +150,27 @@
stub->instruction_size());
switch (mode) {
case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
+ DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchIntoNop(&masm, 0);
PatchBranchIntoNop(&masm, Assembler::kInstrSize);
break;
case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, 0);
break;
case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, Assembler::kInstrSize);
break;
}
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
+ DCHECK(GetMode(stub) == mode);
+ CpuFeatures::FlushICache(stub->instruction_start(),
+ 2 * Assembler::kInstrSize);
}
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
@@ -550,12 +183,12 @@
: object_(object),
address_(address),
scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ DCHECK(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->push(scratch1_);
@@ -571,30 +204,14 @@
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- masm->sub(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
- // Save all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
+ masm->SaveFPRegs(sp, scratch0_);
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- // Restore all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
- masm->add(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ masm->RestoreFPRegs(sp, scratch0_);
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
@@ -610,19 +227,6 @@
Register scratch0_;
Register scratch1_;
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
friend class RecordWriteStub;
};
@@ -631,58 +235,50 @@
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
- void Generate(MacroAssembler* masm);
+ virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+ virtual void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
+ void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
+ }
+
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
+ }
+
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
+ }
+
class ObjectBits: public BitField<int, 0, 4> {};
class ValueBits: public BitField<int, 4, 4> {};
class AddressBits: public BitField<int, 8, 4> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
-};
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
+ DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
@@ -691,177 +287,34 @@
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
public:
- DirectCEntryStub() {}
- void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm, ExternalReference function);
+ explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void GenerateCall(MacroAssembler* masm, Register target);
private:
- Major MajorKey() { return DirectCEntry; }
- int MinorKey() { return 0; }
-
bool NeedsImmovableCode() { return true; }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination {
- kVFPRegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Loads objects from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will still be scratched. If
- // either r0 or r1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with r0 and r1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |int_scratch| to a double, storing
- // the result either in |double_dst| or |dst2:dst1|, depending on
- // |destination|.
- // Warning: The value in |int_scratch| will be changed in the process!
- static void ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- SwVfpRegister single_scratch);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when VFP3 is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32);
-
- // Generate non VFP3 code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when VFP3 is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in r0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- private:
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-
-class StringDictionaryLookupStub: public CodeStub {
+class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
+ NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = LookupModeBits::encode(mode);
+ }
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
- Handle<String> name,
+ Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
@@ -879,25 +332,21 @@
static const int kTotalProbes = 20;
static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
-
- int MinorKey() {
- return LookupModeBits::encode(mode_);
- }
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
- LookupMode mode_;
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index befd8f2..d050399 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,71 +1,369 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "codegen.h"
-#include "macro-assembler.h"
+#include "src/arm/simulator-arm.h"
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_arm_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
+ fast_exp_arm_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &std::exp;
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ DwVfpRegister input = d0;
+ DwVfpRegister result = d1;
+ DwVfpRegister double_scratch1 = d2;
+ DwVfpRegister double_scratch2 = d3;
+ Register temp1 = r4;
+ Register temp2 = r5;
+ Register temp3 = r6;
+
+ if (masm.use_eabi_hardfloat()) {
+ // Input value is in d0 anyway, nothing to do.
+ } else {
+ __ vmov(input, r0, r1);
+ }
+ __ Push(temp3, temp2, temp1);
+ MathExpGenerator::EmitMathExp(
+ &masm, input, result, double_scratch1, double_scratch2,
+ temp1, temp2, temp3);
+ __ Pop(temp3, temp2, temp1);
+ if (masm.use_eabi_hardfloat()) {
+ __ vmov(d0, result);
+ } else {
+ __ vmov(r0, r1, result);
+ }
+ __ Ret();
}
- return NULL;
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
+
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_arm_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
}
+#if defined(V8_HOST_ARCH_ARM)
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+ if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ Register dest = r0;
+ Register src = r1;
+ Register chars = r2;
+ Register temp1 = r3;
+ Label less_4;
+
+ if (CpuFeatures::IsSupported(NEON)) {
+ Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
+ Label size_less_than_8;
+ __ pld(MemOperand(src, 0));
+
+ __ cmp(chars, Operand(8));
+ __ b(lt, &size_less_than_8);
+ __ cmp(chars, Operand(32));
+ __ b(lt, &less_32);
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 32));
+ }
+ __ cmp(chars, Operand(64));
+ __ b(lt, &less_64);
+ __ pld(MemOperand(src, 64));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 96));
+ }
+ __ cmp(chars, Operand(128));
+ __ b(lt, &less_128);
+ __ pld(MemOperand(src, 128));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 160));
+ }
+ __ pld(MemOperand(src, 192));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 224));
+ }
+ __ cmp(chars, Operand(256));
+ __ b(lt, &less_256);
+ __ sub(chars, chars, Operand(256));
+
+ __ bind(&loop);
+ __ pld(MemOperand(src, 256));
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 256));
+ }
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ sub(chars, chars, Operand(64), SetCC);
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+ __ b(ge, &loop);
+ __ add(chars, chars, Operand(256));
+
+ __ bind(&less_256);
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ sub(chars, chars, Operand(128));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+ __ cmp(chars, Operand(64));
+ __ b(lt, &less_64);
+
+ __ bind(&less_128);
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ sub(chars, chars, Operand(64));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+
+ __ bind(&less_64);
+ __ cmp(chars, Operand(32));
+ __ b(lt, &less_32);
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ sub(chars, chars, Operand(32));
+
+ __ bind(&less_32);
+ __ cmp(chars, Operand(16));
+ __ b(le, &_16_or_less);
+ __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
+ __ sub(chars, chars, Operand(16));
+
+ __ bind(&_16_or_less);
+ __ cmp(chars, Operand(8));
+ __ b(le, &_8_or_less);
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
+ __ sub(chars, chars, Operand(8));
+
+ // Do a last copy which may overlap with the previous copy (up to 8 bytes).
+ __ bind(&_8_or_less);
+ __ rsb(chars, chars, Operand(8));
+ __ sub(src, src, Operand(chars));
+ __ sub(dest, dest, Operand(chars));
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
+ __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
+
+ __ Ret();
+
+ __ bind(&size_less_than_8);
+
+ __ bic(temp1, chars, Operand(0x3), SetCC);
+ __ b(&less_4, eq);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ str(temp1, MemOperand(dest, 4, PostIndex));
+ } else {
+ Register temp2 = ip;
+ Label loop;
+
+ __ bic(temp2, chars, Operand(0x3), SetCC);
+ __ b(&less_4, eq);
+ __ add(temp2, dest, temp2);
+
+ __ bind(&loop);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ str(temp1, MemOperand(dest, 4, PostIndex));
+ __ cmp(dest, temp2);
+ __ b(&loop, ne);
+ }
+
+ __ bind(&less_4);
+ __ mov(chars, Operand(chars, LSL, 31), SetCC);
+ // bit0 => Z (ne), bit1 => C (cs)
+ __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
+ __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
+ __ ldrb(temp1, MemOperand(src), ne);
+ __ strb(temp1, MemOperand(dest), ne);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
+
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<MemCopyUint8Function>(buffer);
+#endif
+}
+
+
+// Convert 8 to 16. The number of character to copy must be at least 8.
+MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
+ MemCopyUint16Uint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+ if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ Register dest = r0;
+ Register src = r1;
+ Register chars = r2;
+ if (CpuFeatures::IsSupported(NEON)) {
+ Register temp = r3;
+ Label loop;
+
+ __ bic(temp, chars, Operand(0x7));
+ __ sub(chars, chars, Operand(temp));
+ __ add(temp, dest, Operand(temp, LSL, 1));
+
+ __ bind(&loop);
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
+ __ vmovl(NeonU8, q0, d0);
+ __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
+ __ cmp(dest, temp);
+ __ b(&loop, ne);
+
+ // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
+ __ rsb(chars, chars, Operand(8));
+ __ sub(src, src, Operand(chars));
+ __ sub(dest, dest, Operand(chars, LSL, 1));
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
+ __ vmovl(NeonU8, q0, d0);
+ __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
+ __ Ret();
+ } else {
+ Register temp1 = r3;
+ Register temp2 = ip;
+ Register temp3 = lr;
+ Register temp4 = r4;
+ Label loop;
+ Label not_two;
+
+ __ Push(lr, r4);
+ __ bic(temp2, chars, Operand(0x3));
+ __ add(temp2, dest, Operand(temp2, LSL, 1));
+
+ __ bind(&loop);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ uxtb16(temp3, Operand(temp1, ROR, 0));
+ __ uxtb16(temp4, Operand(temp1, ROR, 8));
+ __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
+ __ str(temp1, MemOperand(dest));
+ __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
+ __ str(temp1, MemOperand(dest, 4));
+ __ add(dest, dest, Operand(8));
+ __ cmp(dest, temp2);
+ __ b(&loop, ne);
+
+ __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
+ __ b(¬_two, cc);
+ __ ldrh(temp1, MemOperand(src, 2, PostIndex));
+ __ uxtb(temp3, Operand(temp1, ROR, 8));
+ __ mov(temp3, Operand(temp3, LSL, 16));
+ __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
+ __ str(temp3, MemOperand(dest, 4, PostIndex));
+ __ bind(¬_two);
+ __ ldrb(temp1, MemOperand(src), ne);
+ __ strh(temp1, MemOperand(dest), ne);
+ __ Pop(pc, r4);
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+
+ return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
+#endif
+}
+#endif
UnaryMathFunction CreateSqrtFunction() {
- return &sqrt;
+#if defined(USE_SIMULATOR)
+ return &std::sqrt;
+#else
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::sqrt;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ __ MovFromFloatParameter(d0);
+ __ vsqrt(d0, d0);
+ __ MovToFloatResult(d0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
+
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
}
+#undef __
+
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
+ DCHECK(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
+ DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
@@ -73,21 +371,31 @@
// -------------------------------------------------------------------------
// Code generators
-void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : target map, scratch for subsequent call
- // -- r4 : scratch (elements)
- // -----------------------------------
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register target_map,
+ AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ Register scratch_elements = r4;
+ DCHECK(!AreAliased(receiver, key, value, target_map,
+ scratch_elements));
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ DCHECK(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(
+ receiver, scratch_elements, allocation_memento_found);
+ }
+
// Set transitioned map.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
+ __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver,
HeapObject::kMapOffset,
- r3,
+ target_map,
r9,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
@@ -96,84 +404,105 @@
}
-void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
- MacroAssembler* masm, Label* fail) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : target map, scratch for subsequent call
- // -- r4 : scratch (elements)
- // -----------------------------------
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register target_map,
+ AllocationSiteMode mode,
+ Label* fail) {
+ // Register lr contains the return address.
Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
+ Register elements = r4;
+ Register length = r5;
+ Register array = r6;
+ Register array_end = array;
+
+ // target_map parameter can be clobbered.
+ Register scratch1 = target_map;
+ Register scratch2 = r9;
+
+ // Verify input registers don't conflict with locals.
+ DCHECK(!AreAliased(receiver, key, value, target_map,
+ elements, length, array, scratch2));
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+ }
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
- __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedArray
- // r5: number of elements (smi-tagged)
+ __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ // length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
- __ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
- __ add(lr, lr, Operand(r5, LSL, 2));
- __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
- // r6: destination FixedDoubleArray, not tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
- // Update receiver's map.
+ // Use lr as a temporary register.
+ __ mov(lr, Operand(length, LSL, 2));
+ __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
+ __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+ // array: destination FixedDoubleArray, not tagged as heap object.
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // r4: source FixedArray.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
+ // Set destination FixedDoubleArray's length and map.
+ __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
+ __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ // Update receiver's map.
+ __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
+
+ __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver,
HeapObject::kMapOffset,
- r3,
- r9,
+ target_map,
+ scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
- __ add(r3, r6, Operand(kHeapObjectTag));
- __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ RecordWriteField(r2,
+ __ add(scratch1, array, Operand(kHeapObjectTag));
+ __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver,
JSObject::kElementsOffset,
- r3,
- r9,
+ scratch1,
+ scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
- __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r6, r7, Operand(r5, LSL, 2));
- __ mov(r4, Operand(kHoleNanLower32));
- __ mov(r5, Operand(kHoleNanUpper32));
- // r3: begin of source FixedArray element fields, not tagged
- // r4: kHoleNanLower32
- // r5: kHoleNanUpper32
- // r6: end of destination FixedDoubleArray, not tagged
- // r7: begin of FixedDoubleArray element fields, not tagged
- if (!vfp3_supported) __ Push(r1, r0);
+ __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(array_end, scratch2, Operand(length, LSL, 2));
+
+ // Repurpose registers no longer in use.
+ Register hole_lower = elements;
+ Register hole_upper = length;
+
+ __ mov(hole_lower, Operand(kHoleNanLower32));
+ __ mov(hole_upper, Operand(kHoleNanUpper32));
+ // scratch1: begin of source FixedArray element fields, not tagged
+ // hole_lower: kHoleNanLower32
+ // hole_upper: kHoleNanUpper32
+ // array_end: end of destination FixedDoubleArray, not tagged
+ // scratch2: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
__ bind(&only_change_map);
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
+ __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver,
HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasBeenSaved,
+ target_map,
+ scratch2,
+ kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -186,124 +515,136 @@
// Convert and copy elements.
__ bind(&loop);
- __ ldr(r9, MemOperand(r3, 4, PostIndex));
- // r9: current element
- __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
+ __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
+ // lr: current element
+ __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
- if (vfp3_supported) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r9);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r7, 0);
- __ add(r7, r7, Operand(8));
- } else {
- FloatingPointHelper::ConvertIntToDouble(masm,
- r9,
- FloatingPointHelper::kCoreRegisters,
- d0,
- r0,
- r1,
- lr,
- s0);
- __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
- }
+ __ vmov(s0, lr);
+ __ vcvt_f64_s32(d0, s0);
+ __ vstr(d0, scratch2, 0);
+ __ add(scratch2, scratch2, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
- __ SmiTag(r9);
- __ orr(r9, r9, Operand(1));
- __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "object found in smi-only array");
+ __ SmiTag(lr);
+ __ orr(lr, lr, Operand(1));
+ __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, kObjectFoundInSmiOnlyArray);
}
- __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
+ __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
__ bind(&entry);
- __ cmp(r7, r6);
+ __ cmp(scratch2, array_end);
__ b(lt, &loop);
- if (!vfp3_supported) __ Pop(r1, r0);
__ pop(lr);
__ bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : target map, scratch for subsequent call
- // -- r4 : scratch (elements)
- // -----------------------------------
+ MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register target_map,
+ AllocationSiteMode mode,
+ Label* fail) {
+ // Register lr contains the return address.
Label entry, loop, convert_hole, gc_required, only_change_map;
+ Register elements = r4;
+ Register array = r6;
+ Register length = r5;
+ Register scratch = r9;
+
+ // Verify input registers don't conflict with locals.
+ DCHECK(!AreAliased(receiver, key, value, target_map,
+ elements, array, length, scratch));
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+ }
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
- __ Push(r3, r2, r1, r0);
- __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedDoubleArray
- // r5: number of elements (smi-tagged)
+ __ Push(target_map, receiver, key, value);
+ __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ // elements: source FixedDoubleArray
+ // length: number of elements (smi-tagged)
// Allocate new FixedArray.
- __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r0, r0, Operand(r5, LSL, 1));
- __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
- // r6: destination FixedArray, not tagged as heap object
+ // Re-use value and target_map registers, as they have been saved on the
+ // stack.
+ Register array_size = value;
+ Register allocate_scratch = target_map;
+ __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(array_size, array_size, Operand(length, LSL, 1));
+ __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
+ NO_ALLOCATION_FLAGS);
+ // array: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
- __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
- __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+ __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+ __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ str(scratch, MemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
- __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
- __ add(r3, r6, Operand(FixedArray::kHeaderSize));
- __ add(r6, r6, Operand(kHeapObjectTag));
- __ add(r5, r3, Operand(r5, LSL, 1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses in r4 to fully take advantage of post-indexing.
- // r3: begin of destination FixedArray element fields, not tagged
- // r4: begin of source FixedDoubleArray element fields, not tagged, +4
- // r5: end of destination FixedArray, not tagged
- // r6: destination FixedArray
- // r7: the-hole pointer
- // r9: heap number map
+ Register src_elements = elements;
+ Register dst_elements = target_map;
+ Register dst_end = length;
+ Register heap_number_map = scratch;
+ __ add(src_elements, elements,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+ __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
+ __ add(array, array, Operand(kHeapObjectTag));
+ __ add(dst_end, dst_elements, Operand(length, LSL, 1));
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ // Using offsetted addresses in src_elements to fully take advantage of
+ // post-indexing.
+ // dst_elements: begin of destination FixedArray element fields, not tagged
+ // src_elements: begin of source FixedDoubleArray element fields,
+ // not tagged, +4
+ // dst_end: end of destination FixedArray, not tagged
+ // array: destination FixedArray
+ // heap_number_map: heap number map
__ b(&entry);
// Call into runtime if GC is required.
__ bind(&gc_required);
- __ Pop(r3, r2, r1, r0);
+ __ Pop(target_map, receiver, key, value);
__ pop(lr);
__ b(fail);
__ bind(&loop);
- __ ldr(r1, MemOperand(r4, 8, PostIndex));
- // lr: current element's upper 32 bit
- // r4: address of next element's upper 32 bit
- __ cmp(r1, Operand(kHoleNanUpper32));
+ Register upper_bits = key;
+ __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
+ // upper_bits: current element's upper 32 bit
+ // src_elements: address of next element's upper 32 bit
+ __ cmp(upper_bits, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
- // r2: new heap number
- __ ldr(r0, MemOperand(r4, 12, NegOffset));
- __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
- __ mov(r0, r3);
- __ str(r2, MemOperand(r3, 4, PostIndex));
- __ RecordWrite(r6,
- r0,
- r2,
+ Register heap_number = receiver;
+ Register scratch2 = value;
+ __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
+ &gc_required);
+ // heap_number: new heap number
+ __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
+ __ Strd(scratch2, upper_bits,
+ FieldMemOperand(heap_number, HeapNumber::kValueOffset));
+ __ mov(scratch2, dst_elements);
+ __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
+ __ RecordWrite(array,
+ scratch2,
+ heap_number,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
@@ -312,19 +653,20 @@
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ str(r7, MemOperand(r3, 4, PostIndex));
+ __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
__ bind(&entry);
- __ cmp(r3, r5);
+ __ cmp(dst_elements, dst_end);
__ b(lt, &loop);
- __ Pop(r3, r2, r1, r0);
+ __ Pop(target_map, receiver, key, value);
// Replace receiver's backing store with newly created and filled FixedArray.
- __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ RecordWriteField(r2,
+ __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver,
JSObject::kElementsOffset,
- r6,
- r9,
+ array,
+ scratch,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
@@ -333,11 +675,11 @@
__ bind(&only_change_map);
// Update receiver's map.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
+ __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver,
HeapObject::kMapOffset,
- r3,
- r9,
+ target_map,
+ scratch,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@@ -368,7 +710,7 @@
Label indirect_string_loaded;
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ add(index, index, Operand(result, ASR, kSmiTagSize));
+ __ add(index, index, Operand::SmiUntag(result));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
@@ -378,7 +720,7 @@
// the string.
__ bind(&cons_string);
__ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kEmptyStringRootIndex);
+ __ CompareRoot(result, Heap::kempty_stringRootIndex);
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
@@ -397,7 +739,7 @@
__ b(ne, &external_string);
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ add(string,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -409,30 +751,187 @@
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ tst(result, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
+ __ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(result, Operand(kShortExternalStringMask));
__ b(ne, call_runtime);
__ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
- Label ascii, done;
+ Label one_byte, done;
__ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &ascii);
+ __ b(ne, &one_byte);
// Two-byte string.
__ ldrh(result, MemOperand(string, index, LSL, 1));
__ jmp(&done);
- __ bind(&ascii);
- // Ascii string.
+ __ bind(&one_byte);
+ // One-byte string.
__ ldrb(result, MemOperand(string, index));
__ bind(&done);
}
+
+static MemOperand ExpConstant(int index, Register base) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ DCHECK(!input.is(result));
+ DCHECK(!input.is(double_scratch1));
+ DCHECK(!input.is(double_scratch2));
+ DCHECK(!result.is(double_scratch1));
+ DCHECK(!result.is(double_scratch2));
+ DCHECK(!double_scratch1.is(double_scratch2));
+ DCHECK(!temp1.is(temp2));
+ DCHECK(!temp1.is(temp3));
+ DCHECK(!temp2.is(temp3));
+ DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+ DCHECK(!masm->serializer_enabled()); // External references not serializable.
+
+ Label zero, infinity, done;
+
+ __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+ __ vldr(double_scratch1, ExpConstant(0, temp3));
+ __ VFPCompareAndSetFlags(double_scratch1, input);
+ __ b(ge, &zero);
+
+ __ vldr(double_scratch2, ExpConstant(1, temp3));
+ __ VFPCompareAndSetFlags(input, double_scratch2);
+ __ b(ge, &infinity);
+
+ __ vldr(double_scratch1, ExpConstant(3, temp3));
+ __ vldr(result, ExpConstant(4, temp3));
+ __ vmul(double_scratch1, double_scratch1, input);
+ __ vadd(double_scratch1, double_scratch1, result);
+ __ VmovLow(temp2, double_scratch1);
+ __ vsub(double_scratch1, double_scratch1, result);
+ __ vldr(result, ExpConstant(6, temp3));
+ __ vldr(double_scratch2, ExpConstant(5, temp3));
+ __ vmul(double_scratch1, double_scratch1, double_scratch2);
+ __ vsub(double_scratch1, double_scratch1, input);
+ __ vsub(result, result, double_scratch1);
+ __ vmul(double_scratch2, double_scratch1, double_scratch1);
+ __ vmul(result, result, double_scratch2);
+ __ vldr(double_scratch2, ExpConstant(7, temp3));
+ __ vmul(result, result, double_scratch2);
+ __ vsub(result, result, double_scratch1);
+ // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+ DCHECK(*reinterpret_cast<double*>
+ (ExternalReference::math_exp_constants(8).address()) == 1);
+ __ vmov(double_scratch2, 1);
+ __ vadd(result, result, double_scratch2);
+ __ mov(temp1, Operand(temp2, LSR, 11));
+ __ Ubfx(temp2, temp2, 0, 11);
+ __ add(temp1, temp1, Operand(0x3ff));
+
+ // Must not call ExpConstant() after overwriting temp3!
+ __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
+ __ add(temp3, temp3, Operand(temp2, LSL, 3));
+ __ ldm(ia, temp3, temp2.bit() | temp3.bit());
+ // The first word is loaded is the lower number register.
+ if (temp2.code() < temp3.code()) {
+ __ orr(temp1, temp3, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp2, temp1);
+ } else {
+ __ orr(temp1, temp2, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp3, temp1);
+ }
+ __ vmul(result, result, double_scratch1);
+ __ b(&done);
+
+ __ bind(&zero);
+ __ vmov(result, kDoubleRegZero);
+ __ b(&done);
+
+ __ bind(&infinity);
+ __ vldr(result, ExpConstant(2, temp3));
+
+ __ bind(&done);
+}
+
#undef __
+#ifdef DEBUG
+// add(r0, pc, Operand(-8))
+static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
+#endif
+
+CodeAgingHelper::CodeAgingHelper() {
+ DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // Since patcher is a large object, allocate it dynamically when needed,
+ // to avoid overloading the stack in stress conditions.
+ // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+ // the process, before ARM simulator ICache is setup.
+ SmartPointer<CodePatcher> patcher(
+ new CodePatcher(young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
+ PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+ patcher->masm()->PushFixedFrame(r1);
+ patcher->masm()->nop(ip.code());
+ patcher->masm()->add(
+ fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+}
+
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(isolate, sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ Address target_address = Memory::Address_at(
+ sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
+ if (age == kNoAgeCodeAge) {
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
+ CpuFeatures::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ patcher.masm()->add(r0, pc, Operand(-8));
+ patcher.masm()->ldr(pc, MemOperand(pc, -4));
+ patcher.masm()->emit_code_stub_address(stub);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index c340e6b..4c7c768 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -1,77 +1,19 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
-#include "ast.h"
-#include "ic-inl.h"
+#include "src/ast.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Expression* type);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
@@ -88,6 +30,23 @@
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ // Register input isn't modified. All other registers are clobbered.
+ static void EmitMathExp(MacroAssembler* masm,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index bf9da23..3f3c5ed 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -1,35 +1,12 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "constants-arm.h"
+#include "src/arm/constants-arm.h"
namespace v8 {
@@ -87,8 +64,8 @@
}
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2"
+// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
+// Note that "sN:sM" is the same as "dN/2" up to d15.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* VFPRegisters::names_[kNumVFPRegisters] = {
@@ -97,12 +74,14 @@
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
};
const char* VFPRegisters::Name(int reg, bool is_double) {
- ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
+ DCHECK((0 <= reg) && (reg < kNumVFPRegisters));
return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
}
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index e767001..375ef89 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
@@ -33,70 +10,32 @@
#error ARM EABI support is required.
#endif
-// This means that interwork-compatible jump instructions are generated. We
-// want to generate them on the simulator too so it makes snapshots that can
-// be used on real hardware.
-#if defined(__THUMB_INTERWORK__) || !defined(__arm__)
-# define USE_THUMB_INTERWORK 1
-#endif
-
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_6J__) || \
- defined(__ARM_ARCH_6K__) || \
- defined(__ARM_ARCH_6Z__) || \
- defined(__ARM_ARCH_6ZK__) || \
- defined(__ARM_ARCH_6T2__) || \
- defined(CAN_USE_ARMV7_INSTRUCTIONS)
-# define CAN_USE_ARMV6_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
- defined(CAN_USE_ARMV6_INSTRUCTIONS)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-#endif
-
-// Simulator should support ARM5 instructions and unaligned access by default.
-#if !defined(__arm__)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-
-# ifndef CAN_USE_UNALIGNED_ACCESSES
-# define CAN_USE_UNALIGNED_ACCESSES 1
-# endif
-
-#endif
-
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#endif
-
-// Using blx may yield better code, so use it when required or when available
-#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
-#define USE_BLX 1
-#endif
-
namespace v8 {
namespace internal {
// Constant pool marker.
-const int kConstantPoolMarkerMask = 0xffe00000;
-const int kConstantPoolMarker = 0x0c000000;
-const int kConstantPoolLengthMask = 0x001ffff;
+// Use UDF, the permanently undefined instruction.
+const int kConstantPoolMarkerMask = 0xfff000f0;
+const int kConstantPoolMarker = 0xe7f000f0;
+const int kConstantPoolLengthMaxMask = 0xffff;
+inline int EncodeConstantPoolLength(int length) {
+ DCHECK((length & kConstantPoolLengthMaxMask) == length);
+ return ((length & 0xfff0) << 4) | (length & 0xf);
+}
+inline int DecodeConstantPoolLength(int instr) {
+ DCHECK((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
+ return ((instr >> 4) & 0xfff0) | (instr & 0xf);
+}
+
+// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
+const int kCodeAgeJumpInstruction = 0xe51ff004;
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
// VFP support.
const int kNumVFPSingleRegisters = 32;
-const int kNumVFPDoubleRegisters = 16;
+const int kNumVFPDoubleRegisters = 32;
const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15.
@@ -145,13 +84,13 @@
inline Condition NegateCondition(Condition cond) {
- ASSERT(cond != al);
+ DCHECK(cond != al);
return static_cast<Condition>(cond ^ ne);
}
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cond) {
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cond) {
switch (cond) {
case lo:
return hi;
@@ -171,7 +110,7 @@
return ge;
default:
return cond;
- };
+ }
}
@@ -261,7 +200,10 @@
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
- kOff12Mask = (1 << 12) - 1
+ kImm16Mask = (1 << 16) - 1,
+ kImm8Mask = (1 << 8) - 1,
+ kOff12Mask = (1 << 12) - 1,
+ kOff8Mask = (1 << 8) - 1
};
@@ -355,6 +297,32 @@
};
+// NEON data type
+enum NeonDataType {
+ NeonS8 = 0x1, // U = 0, imm3 = 0b001
+ NeonS16 = 0x2, // U = 0, imm3 = 0b010
+ NeonS32 = 0x4, // U = 0, imm3 = 0b100
+ NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
+ NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
+ NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
+ NeonDataTypeSizeMask = 0x7,
+ NeonDataTypeUMask = 1 << 24
+};
+
+enum NeonListType {
+ nlt_1 = 0x7,
+ nlt_2 = 0xA,
+ nlt_3 = 0x6,
+ nlt_4 = 0x2
+};
+
+enum NeonSize {
+ Neon8 = 0x0,
+ Neon16 = 0x1,
+ Neon32 = 0x2,
+ Neon64 = 0x3
+};
+
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
@@ -396,6 +364,7 @@
const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
const uint32_t kVFPInexactExceptionBit = 1 << 4;
const uint32_t kVFPFlushToZeroMask = 1 << 24;
+const uint32_t kVFPDefaultNaNModeControlBit = 1 << 25;
const uint32_t kVFPNConditionFlagBit = 1 << 31;
const uint32_t kVFPZConditionFlagBit = 1 << 30;
@@ -437,61 +406,6 @@
// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-// These constants are declared in assembler-arm.cc, as they use named registers
-// and other constants.
-
-
-// add(sp, sp, 4) instruction (aka Pop())
-extern const Instr kPopInstruction;
-
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-extern const Instr kPushRegPattern;
-
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-extern const Instr kPopRegPattern;
-
-// mov lr, pc
-extern const Instr kMovLrPc;
-// ldr rd, [pc, #offset]
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-// blxcc rm
-extern const Instr kBlxRegMask;
-
-extern const Instr kBlxRegPattern;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
-
-// A mask for the Rd register for push, pop, ldr, str instructions.
-extern const Instr kLdrRegFpOffsetPattern;
-
-extern const Instr kStrRegFpOffsetPattern;
-
-extern const Instr kLdrRegFpNegOffsetPattern;
-
-extern const Instr kStrRegFpNegOffsetPattern;
-
-extern const Instr kLdrStrInstrTypeMask;
-extern const Instr kLdrStrInstrArgumentMask;
-extern const Instr kLdrStrOffsetMask;
-
-
-// -----------------------------------------------------------------------------
// Instruction abstraction.
// The class Instruction enables access to individual fields defined in the ARM
@@ -593,6 +507,7 @@
DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
inline int TypeValue() const { return Bits(27, 25); }
+ inline int SpecialValue() const { return Bits(27, 23); }
inline int RnValue() const { return Bits(19, 16); }
DECLARE_STATIC_ACCESSOR(RnValue);
@@ -649,10 +564,13 @@
inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate
inline int RotateValue() const { return Bits(11, 8); }
+ DECLARE_STATIC_ACCESSOR(RotateValue);
inline int Immed8Value() const { return Bits(7, 0); }
+ DECLARE_STATIC_ACCESSOR(Immed8Value);
inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const {
return Immed4Value() << 12 | Offset12Value(); }
+ DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue);
// Fields used in Load/Store instructions
inline int PUValue() const { return Bits(24, 23); }
@@ -690,6 +608,9 @@
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
+ // Test for a nop instruction, which falls under type 1.
+ inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
+
// Test for a stop instruction.
inline bool IsStop() const {
return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 7b08ed8..9c7104e 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -1,117 +1,64 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
#ifdef __arm__
+#ifdef __QNXNTO__
+#include <sys/mman.h> // for cache flushing.
+#undef MAP_TYPE
+#else
#include <sys/syscall.h> // for cache flushing.
#endif
+#endif
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "cpu.h"
-#include "macro-assembler.h"
-#include "simulator.h" // for cache flushing.
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+#include "src/simulator.h" // for cache flushing.
namespace v8 {
namespace internal {
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
+void CpuFeatures::FlushICache(void* start, size_t size) {
+ if (size == 0) return;
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(VFP3);
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // Nothing to do flushing no instructions.
- if (size == 0) {
- return;
- }
-
-#if defined (USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. We should notify the simulator
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+
+#elif V8_OS_QNX
+ msync(start, size, MS_SYNC | MS_INVALIDATE_ICACHE);
+
#else
- // Ideally, we would call
- // syscall(__ARM_NR_cacheflush, start,
- // reinterpret_cast<intptr_t>(start) + size, 0);
- // however, syscall(int, ...) is not supported on all platforms, especially
- // not when using EABI, so we call the __ARM_NR_cacheflush syscall directly.
+ register uint32_t beg asm("r0") = reinterpret_cast<uint32_t>(start);
+ register uint32_t end asm("r1") = beg + size;
+ register uint32_t flg asm("r2") = 0;
- register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start);
- register uint32_t end asm("a2") =
- reinterpret_cast<uint32_t>(start) + size;
- register uint32_t flg asm("a3") = 0;
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
- asm volatile(
- "svc 0x0"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (scno));
- #else
- // r7 is reserved by the EABI in thumb mode.
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: push {r7} \n\t"
- "mov r7, %4 \n\t"
- "svc 0x0 \n\t"
- "pop {r7} \n\t"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
- : "r3");
- #endif
-#endif
-}
+ asm volatile(
+ // This assembly works for both ARM and Thumb targets.
+ // Preserve r7; it is callee-saved, and GCC uses it as a frame pointer for
+ // Thumb targets.
+ " push {r7}\n"
+ // r0 = beg
+ // r1 = end
+ // r2 = flags (0)
+ " ldr r7, =%c[scno]\n" // r7 = syscall number
+ " svc 0\n"
-void CPU::DebugBreak() {
-#if !defined (__arm__) || !defined(CAN_USE_ARMV5_INSTRUCTIONS)
- UNIMPLEMENTED(); // when building ARM emulator target
-#else
- asm volatile("bkpt 0");
+ " pop {r7}\n"
+ :
+ : "r" (beg), "r" (end), "r" (flg), [scno] "i" (__ARM_NR_cacheflush)
+ : "memory");
#endif
}
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 96139a2..6d7d6b8 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -1,41 +1,17 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "codegen.h"
-#include "debug.h"
+#include "src/codegen.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
@@ -48,24 +24,15 @@
// add sp, sp, #4
// bx lr
// to a call to the debug break return code.
- // #if USE_BLX
// ldr ip, [pc, #0]
// blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
// <debug break return code entry point address>
- // bktp 0
+ // bkpt 0
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
-#ifdef USE_BLX
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
+ patcher.Emit(
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry());
patcher.masm()->bkpt(0);
}
@@ -80,47 +47,38 @@
// A debug break in the frame exit code is identified by the JS frame exit code
// having been patched with a call instruction.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
+ DCHECK(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
+ DCHECK(IsDebugBreakSlot());
// Patch the code changing the debug break slot code from
// mov r2, r2
// mov r2, r2
// mov r2, r2
// to a call to the debug break slot code.
- // #if USE_BLX
// ldr ip, [pc, #0]
// blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
// <debug break slot code entry point address>
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
-#ifdef USE_BLX
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
+ patcher.Emit(
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry());
}
void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
+ DCHECK(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
@@ -133,14 +91,22 @@
RegList object_regs,
RegList non_object_regs) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ push(ip);
+ }
+ __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+ __ push(ip);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
+ DCHECK((object_regs & ~kJSCallerSaved) == 0);
+ DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
+ DCHECK((object_regs & non_object_regs) == 0);
if ((object_regs | non_object_regs) != 0) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
@@ -148,9 +114,9 @@
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
+ __ Assert(eq, kUnableToEncodeValueAsSmi);
}
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
+ __ SmiTag(reg);
}
}
__ stm(db_w, sp, object_regs | non_object_regs);
@@ -159,10 +125,10 @@
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
+ __ mov(r0, Operand::Zero()); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
- CEntryStub ceb(1);
+ CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
@@ -172,7 +138,7 @@
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+ __ SmiUntag(reg);
}
if (FLAG_debug_code &&
(((object_regs |non_object_regs) & (1 << r)) == 0)) {
@@ -181,6 +147,9 @@
}
}
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
// Leave the internal frame.
}
@@ -188,70 +157,67 @@
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
+ ExternalReference::debug_after_break_target_address(masm->isolate());
__ mov(ip, Operand(after_break_target));
__ ldr(ip, MemOperand(ip));
__ Jump(ip);
}
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC load (from ic-arm.cc).
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
// ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- [sp] : receiver
+ // -- r1 : function
+ // -- r3 : slot in feedback array (smi)
// -----------------------------------
- // Registers r0 and r2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r3.bit(), 0);
}
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC load (from ic-arm.cc).
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Generate_DebugBreakCallHelper(
+ masm, receiver.bit() | name.bit() | value.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for keyed IC load (from ic-arm.cc).
+ GenerateLoadICDebugBreak(masm);
+}
+
+
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC keyed store call (from ic-arm.cc).
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Generate_DebugBreakCallHelper(
+ masm, receiver.bit() | name.bit() | value.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
// -----------------------------------
- // Registers r0, r1, and r2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
}
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC call (from ic-arm.cc)
- // ----------- S t a t e -------------
- // -- r2 : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r2.bit(), 0);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
@@ -259,7 +225,7 @@
}
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
@@ -268,17 +234,7 @@
}
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm.cc).
- // ----------- S t a t e -------------
- // -- r1 : function
- // -- r2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
@@ -288,18 +244,20 @@
}
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
}
-void Debug::GenerateSlot(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
Assembler::BlockConstPoolScope block_const_pool(masm);
@@ -309,35 +267,58 @@
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
__ nop(MacroAssembler::DEBUG_BREAK_NOP);
}
- ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+ DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
masm->InstructionsGeneratedSince(&check_codesize));
}
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0);
}
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ __ Ret();
}
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ mov(ip, Operand(restarter_frame_function_slot));
+ __ mov(r1, Operand::Zero());
+ __ str(r1, MemOperand(ip, 0));
+
+ // Load the function pointer off of our current stack frame.
+ __ ldr(r1, MemOperand(fp,
+ StandardFrameConstants::kConstantPoolOffset - kPointerSize));
+
+ // Pop return address, frame and constant pool pointer (if
+ // FLAG_enable_ool_constant_pool).
+ __ LeaveFrame(StackFrame::INTERNAL);
+
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ // Load context from the function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Re-run JSFunction, r1 is function, cp is context.
+ __ Jump(ip);
+ }
}
-const bool Debug::kFrameDropperSupported = false;
+
+const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 699e6aa..0455a3b 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 16;
+const int Deoptimizer::table_entry_size_ = 8;
int Deoptimizer::patch_size() {
@@ -44,705 +21,61 @@
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
- AssertNoAllocation no_allocation;
-
- if (!function->IsOptimized()) return;
-
- // Get the optimized code.
- Code* code = function->code();
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
-
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->bkpt(0);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->bkpt(0);
+ }
+ }
+
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
- int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
- RelocInfo::NONE);
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+ // We need calls to have a predictable size in the unoptimized code, but
+ // this is optimized code, so we don't have to have a predictable size.
+ int call_size_in_bytes =
+ MacroAssembler::CallSizeNotPredictableCodeSize(isolate,
+ deopt_entry,
+ RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
- ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
- ASSERT(call_size_in_bytes <= patch_size());
+ DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
+ DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
- ASSERT(prev_call_address == NULL ||
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
+ DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
- ASSERT(call_address + patch_size() <= code->instruction_end());
+ DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
-
- Isolate* isolate = code->GetIsolate();
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
-}
-
-
-static const int32_t kBranchBeforeStackCheck = 0x2a000001;
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- // The call of the stack guard check has the following form:
- // e1 5d 00 0c cmp sp, <limit>
- // 2a 00 00 01 bcs ok
- // e5 9f c? ?? ldr ip, [pc, <stack guard address>]
- // e1 2f ff 3c blx ip
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- } else {
- ASSERT_EQ(kBranchBeforeStackCheck,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- }
-
- // We patch the code to the following form:
- // e1 5d 00 0c cmp sp, <limit>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
- // and overwrite the constant containing the
- // address of the stack check stub.
-
- // Replace conditional jump with NOP.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->nop();
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(check_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
-
- // Replace NOP with conditional jump.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- if (FLAG_count_based_interrupts) {
- patcher.masm()->b(+16, pl);
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- } else {
- patcher.masm()->b(+4, cs);
- ASSERT_EQ(kBranchBeforeStackCheck,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- }
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, ast_id);
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
- }
-
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- ASSERT(0 == output_offset);
-
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- uint32_t pc = reinterpret_cast<uint32_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 8 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // Constructor function being invoked by the stub.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-// This code is very similar to ia32 code, but relies on register names (fp, sp)
-// and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- output_frame->SetRegister(fp.code(), fp_value);
- }
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
- if (is_topmost) {
- output_frame->SetRegister(pc.code(), pc_value);
- }
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
}
@@ -756,7 +89,7 @@
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -767,6 +100,31 @@
}
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler());
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(r0.code(), params);
+ output_frame->SetRegister(r1.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on ARM in the input frame.
+ return false;
+}
+
+
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -774,9 +132,6 @@
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- Isolate* isolate = masm()->isolate();
-
- CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -784,23 +139,20 @@
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
+ kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
- // Save all VFP registers before messing with them.
- DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
- DwVfpRegister last =
- DwVfpRegister::FromAllocationIndex(
- DwVfpRegister::kNumAllocatableRegisters - 1);
- ASSERT(last.code() > first.code());
- ASSERT((last.code() - first.code()) ==
- (DwVfpRegister::kNumAllocatableRegisters - 1));
-#ifdef DEBUG
- for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
- ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
- (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
- }
-#endif
- __ vstm(db_w, sp, first, last);
+ // Save all allocatable VFP registers before messing with them.
+ DCHECK(kDoubleRegZero.code() == 14);
+ DCHECK(kScratchDoubleReg.code() == 15);
+
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
+
+ // Push registers d0-d13, and possibly d16-d31, on the stack.
+ // If d16-d31 are not pushed, decrease the stack pointer instead.
+ __ vstm(db_w, sp, d16, d31, ne);
+ __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
+ __ vstm(db_w, sp, d0, d13);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -813,22 +165,12 @@
// Get the bailout id from the stack.
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible (r3) (return
+ // Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
- if (type() == EAGER) {
- __ mov(r3, Operand(0));
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(r3, lr);
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(r3, lr);
- // Correct two words for bailout id and return address.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ __ mov(r3, lr);
+ // Correct one word for bailout id.
+ __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
@@ -839,12 +181,12 @@
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
- __ mov(r5, Operand(ExternalReference::isolate_address()));
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve "deoptimizer" object in register r0 and get the input
@@ -852,7 +194,7 @@
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
- ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+ DCHECK(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
@@ -860,22 +202,17 @@
}
// Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
- // Remove the bailout id, eventually return address, and the saved registers
- // from the stack.
- if (type() == EAGER || type() == OSR) {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ // Remove the bailout id and the saved registers from the stack.
+ __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
@@ -887,10 +224,13 @@
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
__ cmp(r2, sp);
__ b(ne, &pop_loop);
@@ -902,38 +242,54 @@
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
+ ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop;
- // Outer loop state: r0 = current "FrameDescription** output_",
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ // Outer loop state: r4 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
- __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
- __ add(r1, r0, Operand(r1, LSL, 2));
+ __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
+ __ add(r1, r4, Operand(r1, LSL, 2));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
- __ ldr(r2, MemOperand(r0, 0)); // output_[ix]
+ __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
- __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r7);
- __ cmp(r3, Operand(0));
+ __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
+ __ push(r6);
+ __ bind(&inner_loop_header);
+ __ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
- __ add(r0, r0, Operand(kPointerSize));
- __ cmp(r0, r1);
+ __ add(r4, r4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(r4, r1);
__ b(lt, &outer_push_loop);
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
- __ push(r6);
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
+
+ __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+ int src_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
+ if (i == kDoubleRegZero.code()) continue;
+ if (i == kScratchDoubleReg.code()) continue;
+
+ const DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
+ src_offset += kDoubleSize;
}
+ // Push state, pc, and continuation from the last output frame.
+ __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
+ __ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
@@ -954,34 +310,45 @@
__ InitializeRootRegister();
__ pop(ip); // remove pc
- __ pop(r7); // get continuation, leave pc on stack
+ __ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
- __ Jump(r7);
+ __ Jump(ip);
__ stop("Unreachable.");
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- if (type() == EAGER) {
- __ nop();
- } else {
- // Emulate ia32 like call by pushing return address to stack.
- __ push(lr);
- }
__ mov(ip, Operand(i));
- __ push(ip);
__ b(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
+ __ push(ip);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ DCHECK(FLAG_enable_ool_constant_pool);
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 96a7d3c..85977b1 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// A Disassembler object is used to disassemble a block of code instruction by
// instruction. The default implementation of the NameConverter object can be
@@ -47,21 +24,18 @@
#include <assert.h>
-#include <stdio.h>
#include <stdarg.h>
+#include <stdio.h>
#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "constants-arm.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "platform.h"
+#include "src/arm/constants-arm.h"
+#include "src/base/platform/platform.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
namespace v8 {
@@ -113,6 +87,8 @@
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
+ void FormatNeonList(int Vd, int type);
+ void FormatNeonMemory(int Rn, int align, int Rm);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
@@ -133,6 +109,8 @@
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
+ void DecodeSpecialCondition(Instruction* instr);
+
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
@@ -187,12 +165,14 @@
Print(converter_.NameOfCPURegister(reg));
}
+
// Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) {
Print(VFPRegisters::Name(reg, false));
}
-// Print the VFP D register name according to the active name converter.
+
+// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(VFPRegisters::Name(reg, true));
}
@@ -227,15 +207,15 @@
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[shift_index],
- shift_amount);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[shift_index],
+ shift_amount);
} else {
// by register
int rs = instr->RsValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s ", shift_names[shift_index]);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s ", shift_names[shift_index]);
PrintRegister(rs);
}
}
@@ -247,8 +227,7 @@
int rotate = instr->RotateValue() * 2;
int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%d", imm);
}
@@ -256,10 +235,10 @@
void Decoder::PrintShiftSat(Instruction* instr) {
int shift = instr->Bits(11, 7);
if (shift > 0) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[instr->Bit(6) * 2],
- instr->Bits(11, 7));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[instr->Bit(6) * 2],
+ instr->Bits(11, 7));
}
}
@@ -303,14 +282,14 @@
return;
default:
if (svc >= kStopCode) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d - 0x%x",
- svc & kStopCodeMask,
- svc & kStopCodeMask);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d - 0x%x",
+ svc & kStopCodeMask,
+ svc & kStopCodeMask);
} else {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- svc);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ svc);
}
return;
}
@@ -320,7 +299,7 @@
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'r');
+ DCHECK(format[0] == 'r');
if (format[1] == 'n') { // 'rn: Rn register
int reg = instr->RnValue();
PrintRegister(reg);
@@ -343,7 +322,7 @@
return 2;
} else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions
- ASSERT(STRING_STARTS_WITH(format, "rlist"));
+ DCHECK(STRING_STARTS_WITH(format, "rlist"));
int rlist = instr->RlistValue();
int reg = 0;
Print("{");
@@ -369,7 +348,7 @@
// Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
- ASSERT((format[0] == 'S') || (format[0] == 'D'));
+ DCHECK((format[0] == 'S') || (format[0] == 'D'));
VFPRegPrecision precision =
format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
@@ -381,7 +360,16 @@
} else if (format[1] == 'm') {
reg = instr->VFPMRegValue(precision);
} else if (format[1] == 'd') {
- reg = instr->VFPDRegValue(precision);
+ if ((instr->TypeValue() == 7) &&
+ (instr->Bit(24) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(4) == 0x1)) {
+ // vmov.32 has Vd in a different place.
+ reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ } else {
+ reg = instr->VFPDRegValue(precision);
+ }
+
if (format[2] == '+') {
int immed8 = instr->Immed8Value();
if (format[0] == 'S') reg += immed8 - 1;
@@ -408,13 +396,47 @@
}
+void Decoder::FormatNeonList(int Vd, int type) {
+ if (type == nlt_1) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d}", Vd);
+ } else if (type == nlt_2) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d}", Vd, Vd + 1);
+ } else if (type == nlt_3) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
+ } else if (type == nlt_4) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
+ }
+}
+
+
+void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "[r%d", Rn);
+ if (align != 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ":%d", (1 << align) << 6);
+ }
+ if (Rm == 15) {
+ Print("]");
+ } else if (Rm == 13) {
+ Print("]!");
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "], r%d", Rm);
+ }
+}
+
+
// Print the movw or movt instruction.
void Decoder::PrintMovwMovt(Instruction* instr) {
int imm = instr->ImmedMovwMovtValue();
int rd = instr->RdValue();
PrintRegister(rd);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", #%d", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", imm);
}
@@ -440,14 +462,13 @@
return 1;
}
case 'c': { // 'cond: conditional execution
- ASSERT(STRING_STARTS_WITH(format, "cond"));
+ DCHECK(STRING_STARTS_WITH(format, "cond"));
PrintCondition(instr);
return 4;
}
case 'd': { // 'd: vmov double immediate.
double d = instr->DoubleImmedVmov();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%g", d);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%g", d);
return 1;
}
case 'f': { // 'f: bitfield instructions - v7 and above.
@@ -457,11 +478,11 @@
// BFC/BFI:
// Bits 20-16 represent most-significant bit. Covert to width.
width -= lsbit;
- ASSERT(width > 0);
+ DCHECK(width > 0);
}
- ASSERT((width + lsbit) <= 32);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d, #%d", lsbit, width);
+ DCHECK((width + lsbit) <= 32);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d, #%d", lsbit, width);
return 1;
}
case 'h': { // 'h: halfword operation for extra loads and stores
@@ -477,13 +498,13 @@
int width = (format[3] - '0') * 10 + (format[4] - '0');
int lsb = (format[6] - '0') * 10 + (format[7] - '0');
- ASSERT((width >= 1) && (width <= 32));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width + lsb) <= 32);
+ DCHECK((width >= 1) && (width <= 32));
+ DCHECK((lsb >= 0) && (lsb <= 31));
+ DCHECK((width + lsb) <= 32);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- instr->Bits(width + lsb - 1, lsb));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ instr->Bits(width + lsb - 1, lsb));
return 8;
}
case 'l': { // 'l: branch and link
@@ -499,7 +520,7 @@
return 2;
}
if (format[1] == 'e') { // 'memop: load/store instructions.
- ASSERT(STRING_STARTS_WITH(format, "memop"));
+ DCHECK(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
} else {
@@ -517,38 +538,37 @@
return 5;
}
// 'msg: for simulator break instructions
- ASSERT(STRING_STARTS_WITH(format, "msg"));
+ DCHECK(STRING_STARTS_WITH(format, "msg"));
byte* str =
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s", converter_.NameInCode(str));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s", converter_.NameInCode(str));
return 3;
}
case 'o': {
if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "off12"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Offset12Value());
+ DCHECK(STRING_STARTS_WITH(format, "off12"));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", instr->Offset12Value());
return 5;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
- ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- (instr->Bits(19, 8) << 4) +
- instr->Bits(3, 0));
+ DCHECK(STRING_STARTS_WITH(format, "off0to3and8to19"));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ (instr->Bits(19, 8) << 4) +
+ instr->Bits(3, 0));
return 15;
}
// 'off8: 8-bit offset for extra load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "off8"));
+ DCHECK(STRING_STARTS_WITH(format, "off8"));
int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", offs8);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs8);
return 4;
}
case 'p': { // 'pu: P and U bits for load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "pu"));
+ DCHECK(STRING_STARTS_WITH(format, "pu"));
PrintPU(instr);
return 2;
}
@@ -558,29 +578,29 @@
case 's': {
if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
if (format[6] == 'o') { // 'shift_op
- ASSERT(STRING_STARTS_WITH(format, "shift_op"));
+ DCHECK(STRING_STARTS_WITH(format, "shift_op"));
if (instr->TypeValue() == 0) {
PrintShiftRm(instr);
} else {
- ASSERT(instr->TypeValue() == 1);
+ DCHECK(instr->TypeValue() == 1);
PrintShiftImm(instr);
}
return 8;
} else if (format[6] == 's') { // 'shift_sat.
- ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
+ DCHECK(STRING_STARTS_WITH(format, "shift_sat"));
PrintShiftSat(instr);
return 9;
} else { // 'shift_rm
- ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
+ DCHECK(STRING_STARTS_WITH(format, "shift_rm"));
PrintShiftRm(instr);
return 8;
}
} else if (format[1] == 'v') { // 'svc
- ASSERT(STRING_STARTS_WITH(format, "svc"));
+ DCHECK(STRING_STARTS_WITH(format, "svc"));
PrintSoftwareInterrupt(instr->SvcValue());
return 3;
} else if (format[1] == 'i') { // 'sign: signed extra loads and stores
- ASSERT(STRING_STARTS_WITH(format, "sign"));
+ DCHECK(STRING_STARTS_WITH(format, "sign"));
if (instr->HasSign()) {
Print("s");
}
@@ -593,13 +613,13 @@
return 1;
}
case 't': { // 'target: target of branch instructions
- ASSERT(STRING_STARTS_WITH(format, "target"));
+ DCHECK(STRING_STARTS_WITH(format, "target"));
int off = (instr->SImmed24Value() << 2) + 8;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%+d -> %s",
- off,
- converter_.NameOfAddress(
- reinterpret_cast<byte*>(instr) + off));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%+d -> %s",
+ off,
+ converter_.NameOfAddress(
+ reinterpret_cast<byte*>(instr) + off));
return 6;
}
case 'u': { // 'u: signed or unsigned multiplies
@@ -692,11 +712,19 @@
// Rn field to encode it.
Format(instr, "mul'cond's 'rn, 'rm, 'rs");
} else {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ if (instr->Bit(22) == 0) {
+ // The MLA instruction description (A 4.1.28) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ } else {
+ // The MLS instruction description (A 4.1.29) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
+ }
}
} else {
// The signed/long multiply instructions use the terms RdHi and RdLo
@@ -822,6 +850,8 @@
} else {
Unknown(instr); // not used by V8
}
+ } else if ((type == 1) && instr->IsNopType1()) {
+ Format(instr, "nop'cond");
} else {
switch (instr->OpcodeField()) {
case AND: {
@@ -961,19 +991,125 @@
break;
}
case ia_x: {
- if (instr->HasW()) {
- VERIFY(instr->Bits(5, 4) == 0x1);
- if (instr->Bit(22) == 0x1) {
- Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
- } else {
- UNREACHABLE(); // SSAT.
- }
- } else {
+ if (instr->Bit(4) == 0) {
Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ } else {
+ if (instr->Bit(5) == 0) {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(6) == 0) {
+ Format(instr, "pkhbt'cond 'rd, 'rn, 'rm, lsl #'imm05@07");
+ } else {
+ if (instr->Bits(11, 7) == 0) {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #32");
+ } else {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #'imm05@07");
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case 1:
+ UNREACHABLE();
+ break;
+ case 2:
+ UNREACHABLE();
+ break;
+ case 3:
+ Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
+ break;
+ }
+ } else {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ UNREACHABLE();
+ break;
+ case 1:
+ UNREACHABLE();
+ break;
+ case 2:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb16'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case 3:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ }
}
break;
}
case db_x: {
+ if (FLAG_enable_sudiv) {
+ if (instr->Bits(5, 4) == 0x1) {
+ if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+ if (instr->Bit(21) == 0x1) {
+ // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+ Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
+ } else {
+ // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+ Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
+ }
+ break;
+ }
+ }
+ }
Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
break;
}
@@ -1047,14 +1183,14 @@
Format(instr, "stop'cond 'svc");
// Also print the stop message. Its address is encoded
// in the following 4 bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "\n %p %08x stop message: %s",
- reinterpret_cast<int32_t*>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "\n %p %08x stop message: %s",
+ reinterpret_cast<void*>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<uint32_t*>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<char**>(instr
+ + Instruction::kInstrSize));
// We have decoded 2 * Instruction::kInstrSize bytes.
return 2 * Instruction::kInstrSize;
} else {
@@ -1072,11 +1208,14 @@
// vmov: Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
+// vcvt.f64.s32 Dd, Dd, #<fbits>
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
+// Dd = vmla(Dn, Dm)
+// Dd = vmls(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// vmrs
@@ -1092,20 +1231,27 @@
if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
if (instr->SzValue() == 0x1) {
- Format(instr, "vmov.f64'cond 'Dd, 'Dm");
+ Format(instr, "vmov'cond.f64 'Dd, 'Dm");
} else {
- Format(instr, "vmov.f32'cond 'Sd, 'Sm");
+ Format(instr, "vmov'cond.f32 'Sd, 'Sm");
}
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
- Format(instr, "vabs.f64'cond 'Dd, 'Dm");
+ Format(instr, "vabs'cond.f64 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
- Format(instr, "vneg.f64'cond 'Dd, 'Dm");
+ Format(instr, "vneg'cond.f64 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
+ (instr->Bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>
+ int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", fraction_bits);
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
@@ -1113,10 +1259,10 @@
(instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
- Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
+ Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
} else if (instr->Opc3Value() == 0x0) {
if (instr->SzValue() == 0x1) {
- Format(instr, "vmov.f64'cond 'Dd, 'd");
+ Format(instr, "vmov'cond.f64 'Dd, 'd");
} else {
Unknown(instr); // Not used by V8.
}
@@ -1126,22 +1272,34 @@
} else if (instr->Opc1Value() == 0x3) {
if (instr->SzValue() == 0x1) {
if (instr->Opc3Value() & 0x1) {
- Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vsub'cond.f64 'Dd, 'Dn, 'Dm");
} else {
- Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm");
}
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
- Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
- Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
@@ -1152,6 +1310,22 @@
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'Dd[0], 'rt");
+ } else {
+ Format(instr, "vmov'cond.32 'Dd[1], 'rt");
+ }
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+ } else {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+ }
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
@@ -1199,9 +1373,9 @@
if (dp_operation && !raise_exception_for_qnan) {
if (instr->Opc2Value() == 0x4) {
- Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ Format(instr, "vcmp'cond.f64 'Dd, 'Dm");
} else if (instr->Opc2Value() == 0x5) {
- Format(instr, "vcmp.f64'cond 'Dd, #0.0");
+ Format(instr, "vcmp'cond.f64 'Dd, #0.0");
} else {
Unknown(instr); // invalid
}
@@ -1218,9 +1392,9 @@
bool double_to_single = (instr->SzValue() == 1);
if (double_to_single) {
- Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
+ Format(instr, "vcvt'cond.f32.f64 'Sd, 'Dm");
} else {
- Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm");
+ Format(instr, "vcvt'cond.f64.f32 'Dd, 'Sm");
}
}
@@ -1237,15 +1411,15 @@
if (dp_operation) {
if (unsigned_integer) {
- Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm");
+ Format(instr, "vcvt'cond.u32.f64 'Sd, 'Dm");
} else {
- Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
+ Format(instr, "vcvt'cond.s32.f64 'Sd, 'Dm");
}
} else {
if (unsigned_integer) {
- Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.u32.f32 'Sd, 'Sm");
} else {
- Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.s32.f32 'Sd, 'Sm");
}
}
} else {
@@ -1253,15 +1427,15 @@
if (dp_operation) {
if (unsigned_integer) {
- Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm");
+ Format(instr, "vcvt'cond.f64.u32 'Dd, 'Sm");
} else {
- Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Sm");
}
} else {
if (unsigned_integer) {
- Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.f32.u32 'Sd, 'Sm");
} else {
- Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.f32.s32 'Sd, 'Sm");
}
}
}
@@ -1315,7 +1489,7 @@
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
Unknown(instr); // Not used by V8.
} else if (instr->HasL()) {
Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
@@ -1324,6 +1498,7 @@
}
break;
case 0x8:
+ case 0xA:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
} else {
@@ -1331,6 +1506,7 @@
}
break;
case 0xC:
+ case 0xE:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
} else {
@@ -1339,7 +1515,10 @@
break;
case 0x4:
case 0x5:
- case 0x9: {
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
bool to_vfp_register = (instr->VLValue() == 0x1);
if (to_vfp_register) {
Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
@@ -1356,6 +1535,93 @@
}
}
+
+void Decoder::DecodeSpecialCondition(Instruction* instr) {
+ switch (instr->SpecialValue()) {
+ case 5:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl signed
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 7:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl unsigned
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 8:
+ if (instr->Bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vst1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else if (instr->Bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vld1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0xA:
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ int Rn = instr->Bits(19, 16);
+ int offset = instr->Bits(11, 0);
+ if (offset == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d]", Rn);
+ } else if (instr->Bit(23) == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #-%d]", Rn, offset);
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #+%d]", Rn, offset);
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
+ default:
+ Unknown(instr);
+ break;
+ }
+}
+
#undef VERIFIY
bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
@@ -1367,7 +1633,7 @@
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return instruction_bits & kConstantPoolLengthMask;
+ return DecodeConstantPoolLength(instruction_bits);
} else {
return -1;
}
@@ -1378,20 +1644,27 @@
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
- Unknown(instr);
+ DecodeSpecialCondition(instr);
return Instruction::kInstrSize;
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "constant pool begin (length %d)",
- instruction_bits &
- kConstantPoolLengthMask);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "constant pool begin (length %d)",
+ DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
+ } else if (instruction_bits == kCodeAgeJumpInstruction) {
+ // The code age prologue has a constant immediatly following the jump
+ // instruction.
+ Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
+ DecodeType2(instr);
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ " (0x%08x)", target->InstructionBits());
+ return 2 * Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
case 0:
@@ -1442,7 +1715,7 @@
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
@@ -1505,8 +1778,9 @@
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ v8::internal::PrintF(
+ f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index a805d28..fde4a17 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -1,42 +1,43 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "frames-inl.h"
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/macro-assembler.h"
+
+#include "src/arm/assembler-arm-inl.h"
+#include "src/arm/assembler-arm.h"
+#include "src/arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ DCHECK(FLAG_enable_ool_constant_pool);
+ return pp;
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ DCHECK(FLAG_enable_ool_constant_pool);
+ return pp;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ DCHECK(FLAG_enable_ool_constant_pool);
+ const int offset = ExitFrameConstants::kConstantPoolOffset;
+ return Memory::Object_at(fp() + offset);
}
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index a10acd0..ce65e88 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_
@@ -52,8 +29,6 @@
const int kNumJSCallerSaved = 4;
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns r0.code() == 0
int JSCallerSavedCode(int n);
@@ -64,8 +39,8 @@
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
- 1 << 7 | // r7 v4
- 1 << 8 | // r8 v5 (cp in JavaScript code)
+ 1 << 7 | // r7 v4 (cp in JavaScript code)
+ 1 << 8 | // r8 v5 (pp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
@@ -100,28 +75,22 @@
// ----------------------------------------------------
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -3 * kPointerSize;
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kFrameSize = FLAG_enable_ool_constant_pool ?
+ 3 * kPointerSize : 2 * kPointerSize;
+
+ static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
+ -3 * kPointerSize : 0;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
@@ -134,20 +103,6 @@
};
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = 1 * kPointerSize;
- static const int kCallerSPOffset = 2 * kPointerSize;
-};
-
-
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
@@ -163,14 +118,30 @@
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
class InternalFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
@@ -181,6 +152,11 @@
}
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
} } // namespace v8::internal
#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 69b12ce..eb60c3f 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1,46 +1,24 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
-#include "arm/code-stubs-arm.h"
-#include "arm/macro-assembler-arm.h"
+#include "src/arm/code-stubs-arm.h"
+#include "src/arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@@ -63,26 +41,23 @@
}
~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
+ DCHECK(patch_site_.is_bound() == info_emitted_);
}
// When initially emitting this ensure that a jump is always generated to skip
// the inlined smi code.
void EmitJumpIfNotSmi(Register reg, Label* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ DCHECK(!patch_site_.is_bound() && !info_emitted_);
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
- // Don't use b(al, ...) as that might emit the constant pool right after the
- // branch. After patching when the branch is no longer unconditional
- // execution can continue into the constant pool.
__ b(eq, target); // Always taken before patched.
}
// When initially emitting this ensure that a jump is never generated to skip
// the inlined smi code.
void EmitJumpIfSmi(Register reg, Label* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ DCHECK(!patch_site_.is_bound() && !info_emitted_);
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
@@ -90,6 +65,8 @@
}
void EmitPatchInfo() {
+ // Block literal pool emission whilst recording patch site information.
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
@@ -112,13 +89,6 @@
};
-// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
-int FullCodeGenerator::self_optimization_header_size() {
- UNREACHABLE();
- return 24;
-}
-
-
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -127,6 +97,7 @@
// The live registers are:
// o r1: the JS function object being called (i.e., ourselves)
// o cp: our context
+// o pp: our caller's constant pool pointer (if FLAG_enable_ool_constant_pool)
// o fp: our caller's frame pointer
// o sp: stack pointer
// o lr: return address
@@ -137,29 +108,36 @@
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
- __ cmp(r5, Operand(0));
- __ b(eq, &ok);
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ ldr(r2, MemOperand(sp, receiver_offset));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &ok);
+
+ __ ldr(r2, GlobalObjectOperand());
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
+
__ str(r2, MemOperand(sp, receiver_offset));
+
__ bind(&ok);
}
@@ -168,20 +146,44 @@
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- int locals_count = info->scope()->num_stack_slots();
-
- __ Push(lr, fp, cp, r1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(info->IsCodePreAgingActive());
+ info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ DCHECK(!info->function()->is_generator() || locals_count == 0);
+ if (locals_count > 0) {
+ if (locals_count >= 128) {
+ Label ok;
+ __ sub(r9, sp, Operand(locals_count * kPointerSize));
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ __ cmp(r9, Operand(r2));
+ __ b(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(r2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ push(r9);
+ }
+ // Continue loop if not done.
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(&loop_header, ne);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ push(r9);
+ }
}
}
@@ -190,19 +192,27 @@
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ // Argument to NewContext is the function, which is still in r1.
+ Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(r1);
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
+ __ push(r1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Context is returned in r0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, r0);
+ __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -217,8 +227,15 @@
__ str(r0, target);
// Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
}
@@ -246,14 +263,14 @@
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(type);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, r0, r1, r2);
@@ -270,35 +287,37 @@
scope()->VisitIllegalRedeclaration(this);
} else {
- PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableProxy* proxy = scope()->function();
- ASSERT(proxy->var()->mode() == CONST ||
- proxy->var()->mode() == CONST_HARMONY);
- ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
- EmitDeclaration(proxy, proxy->var()->mode(), NULL);
+ VariableDeclaration* function = scope()->function();
+ DCHECK(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_LEGACY);
+ DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(masm_,
+ masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+ __ Call(stack_check, RelocInfo::CODE_TARGET);
__ bind(&ok);
}
{ Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
+ DCHECK(loop_depth() == 0);
VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
+ DCHECK(loop_depth() == 0);
}
}
@@ -310,7 +329,7 @@
EmitReturnSequence();
// Force emit the constant pool, so it doesn't get emitted in the middle
- // of the stack check table.
+ // of the back edge table.
masm()->CheckConstPool(true, false);
}
@@ -322,65 +341,66 @@
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ mov(r2, Operand(profiling_counter_));
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
__ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
}
+#ifdef CAN_USE_ARMV7_INSTRUCTIONS
+static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize;
+#else
+static const int kProfileCounterResetSequenceLength = 7 * Assembler::kInstrSize;
+#endif
+
+
void FullCodeGenerator::EmitProfilingCounterReset() {
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
+ PredictableCodeSizeScope predictable_code_size_scope(
+ masm_, kProfileCounterResetSequenceLength);
+ Label start;
+ __ bind(&start);
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- if (isolate()->IsDebuggerActive()) {
+ if (info_->is_debug()) {
// Detect debug break requests as soon as possible.
- reset_value = 10;
+ reset_value = FLAG_interrupt_budget >> 4;
}
__ mov(r2, Operand(profiling_counter_));
+ // The mov instruction above can be either 1 to 3 (for ARMv7) or 1 to 5
+ // instructions (for ARMv6) depending upon whether it is an extended constant
+ // pool - insert nop to compensate.
+ int expected_instr_count =
+ (kProfileCounterResetSequenceLength / Assembler::kInstrSize) - 2;
+ DCHECK(masm_->InstructionsGeneratedSince(&start) <= expected_instr_count);
+ while (masm_->InstructionsGeneratedSince(&start) != expected_instr_count) {
+ __ nop();
+ }
__ mov(r3, Operand(Smi::FromInt(reset_value)));
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 142;
-
-
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ // Block literal pools whilst emitting back edge code.
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
- }
- EmitProfilingCounterDecrement(weight);
- __ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- }
+ DCHECK(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ __ b(pl, &ok);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -403,56 +423,50 @@
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ b(pl, &ok);
- __ push(r0);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(r2);
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- InterruptStub stub;
- __ CallStub(&stub);
- }
- __ pop(r0);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ b(pl, &ok);
+ __ push(r0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(r0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
+ __ bind(&check_exit_codesize);
#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
+ PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
- masm_->mov(sp, fp);
- masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
+ int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
#ifdef DEBUG
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceInstructions <=
+ DCHECK(Assembler::kJSReturnSequenceInstructions <=
masm_->InstructionsGeneratedSince(&check_exit_codesize));
#endif
}
@@ -460,25 +474,25 @@
void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
}
void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
__ push(result_register());
}
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
@@ -543,7 +557,7 @@
true,
true_label_,
false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -570,7 +584,7 @@
void FullCodeGenerator::EffectContext::DropAndPlug(int count,
Register reg) const {
- ASSERT(count > 0);
+ DCHECK(count > 0);
__ Drop(count);
}
@@ -578,7 +592,7 @@
void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
int count,
Register reg) const {
- ASSERT(count > 0);
+ DCHECK(count > 0);
__ Drop(count);
__ Move(result_register(), reg);
}
@@ -586,7 +600,7 @@
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
- ASSERT(count > 0);
+ DCHECK(count > 0);
if (count > 1) __ Drop(count - 1);
__ str(reg, MemOperand(sp, 0));
}
@@ -594,7 +608,7 @@
void FullCodeGenerator::TestContext::DropAndPlug(int count,
Register reg) const {
- ASSERT(count > 0);
+ DCHECK(count > 0);
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
@@ -605,7 +619,7 @@
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
+ DCHECK(materialize_true == materialize_false);
__ bind(materialize_true);
}
@@ -629,19 +643,18 @@
Label done;
__ bind(materialize_true);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
__ jmp(&done);
__ bind(materialize_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
__ bind(&done);
+ __ push(ip);
}
void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
+ DCHECK(materialize_true == true_label_);
+ DCHECK(materialize_false == false_label_);
}
@@ -681,18 +694,9 @@
Label* if_true,
Label* if_false,
Label* fall_through) {
- if (CpuFeatures::IsSupported(VFP3)) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub);
- __ tst(result_register(), result_register());
- } else {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
- }
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -713,7 +717,7 @@
MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- ASSERT(var->IsStackAllocated());
+ DCHECK(var->IsStackAllocated());
// Offset is negative because higher indexes are at lower addresses.
int offset = -var->index() * kPointerSize;
// Adjust by a (parameter or local) base offset.
@@ -727,7 +731,7 @@
MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
@@ -749,10 +753,10 @@
Register src,
Register scratch0,
Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!scratch0.is(src));
- ASSERT(!scratch0.is(scratch1));
- ASSERT(!scratch1.is(src));
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+ DCHECK(!scratch0.is(src));
+ DCHECK(!scratch0.is(scratch1));
+ DCHECK(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ str(src, location);
@@ -789,62 +793,52 @@
}
-void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- VariableMode mode,
- FunctionLiteral* function) {
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext);
+ __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext);
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool binding_needs_init = (function == NULL) &&
- (mode == CONST || mode == CONST_HARMONY || mode == LET);
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
- ++global_count_;
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
break;
case Variable::PARAMETER:
case Variable::LOCAL:
- if (function != NULL) {
- Comment cmnt(masm_, "[ Declaration");
- VisitForAccumulatorValue(function);
- __ str(result_register(), StackOperand(variable));
- } else if (binding_needs_init) {
- Comment cmnt(masm_, "[ Declaration");
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, StackOperand(variable));
}
break;
case Variable::CONTEXT:
- // The variable in the decl always resides in the current function
- // context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
- // Check that we're not inside a with or catch context.
- __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
- __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
- __ Check(ne, "Declaration in with context.");
- __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
- __ Check(ne, "Declaration in catch context.");
- }
- if (function != NULL) {
- Comment cmnt(masm_, "[ Declaration");
- VisitForAccumulatorValue(function);
- __ str(result_register(), ContextOperand(cp, variable->index()));
- int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- r2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (binding_needs_init) {
- Comment cmnt(masm_, "[ Declaration");
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, ContextOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
@@ -853,38 +847,146 @@
break;
case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ Declaration");
+ Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR ||
- mode == CONST ||
- mode == CONST_HARMONY ||
- mode == LET);
- PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
- ? READ_ONLY : NONE;
+ DCHECK(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
- if (function != NULL) {
- __ Push(cp, r2, r1);
- // Push initial value for function declaration.
- VisitForStackValue(function);
- } else if (binding_needs_init) {
+ if (hole_init) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ Push(cp, r2, r1, r0);
} else {
__ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
__ Push(cp, r2, r1, r0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
break;
}
}
}
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ str(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ str(result_register(), ContextOperand(cp, variable->index()));
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ r2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ __ mov(r2, Operand(variable->name()));
+ __ mov(r1, Operand(Smi::FromInt(NONE)));
+ __ Push(cp, r2, r1);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ DCHECK(variable->location() == Variable::CONTEXT);
+ DCHECK(variable->interface()->IsFrozen());
+
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
+ __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ str(r1, ContextOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ r1,
+ r3,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
@@ -896,6 +998,14 @@
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -945,11 +1055,22 @@
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
- __ cmp(r0, Operand(0));
+ Label skip;
+ __ b(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
+ __ cmp(r0, Operand::Zero());
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target());
@@ -981,15 +1102,15 @@
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
@@ -1039,38 +1160,41 @@
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
- __ mov(r2, r0);
- __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
+ __ cmp(r2, ip);
__ b(ne, &fixed_array);
// We got a map in register r0. Get the enumeration cache from it.
+ Label no_descriptors;
__ bind(&use_cache);
- __ LoadInstanceDescriptors(r0, r1);
- __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
- __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ __ EnumLength(r1, r0);
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ __ b(eq, &no_descriptors);
+
+ __ LoadInstanceDescriptors(r0, r2);
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(r0); // Map.
- __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r2, r1, r0);
__ jmp(&loop);
+ __ bind(&no_descriptors);
+ __ Drop(1);
+ __ jmp(&exit);
+
// We got a fixed array in register r0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
- RecordTypeFeedbackCell(stmt->PrepareId(), cell);
- __ LoadHeapObject(r1, cell);
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
+ __ Move(r1, FeedbackVector());
+ __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1095,7 +1219,7 @@
// Get the current entry of the array into register r3.
__ ldr(r2, MemOperand(sp, 2 * kPointerSize));
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r3, MemOperand::PointerAddressFromSmiKey(r2, r0));
// Get the expected map from the stack or a smi in the
// permanent slow case into register r2.
@@ -1142,7 +1266,7 @@
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
@@ -1156,6 +1280,48 @@
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1169,9 +1335,8 @@
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ mov(r0, Operand(info));
- __ push(r0);
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ __ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
__ mov(r0, Operand(info));
@@ -1190,7 +1355,26 @@
}
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ ldr(LoadDescriptor::ReceiverRegister(),
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
+
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+ __ cmp(r0, Operand(isolate()->factory()->undefined_value()));
+ Label done;
+ __ b(ne, &done);
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
Register current = cp;
@@ -1200,7 +1384,7 @@
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1213,7 +1397,7 @@
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1223,9 +1407,9 @@
__ Move(next, current);
}
__ bind(&loop);
- // Terminate at global context.
+ // Terminate at native context.
__ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
__ cmp(temp, ip);
__ b(eq, &fast);
// Check that extension is NULL.
@@ -1238,26 +1422,30 @@
__ bind(&fast);
}
- __ ldr(r0, GlobalObjectOperand());
- __ mov(r2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, mode);
+ __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+ }
+
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
}
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Label* slow) {
- ASSERT(var->IsContextSlot());
+ DCHECK(var->IsContextSlot());
Register context = cp;
Register next = r3;
Register temp = r4;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1280,7 +1468,7 @@
}
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow,
Label* done) {
@@ -1289,19 +1477,19 @@
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
+ Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
@@ -1322,13 +1510,14 @@
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in r2 and the global
- // object (receiver) in r0.
- __ ldr(r0, GlobalObjectOperand());
- __ mov(r2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ Comment cmnt(masm_, "[ Global variable");
+ __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+ }
+ CallLoadIC(CONTEXTUAL);
context()->Plug(r0);
break;
}
@@ -1336,16 +1525,15 @@
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
// always looked up dynamically, i.e. in that case
// var->location() == LOOKUP.
// always holds.
- ASSERT(var->scope() != NULL);
+ DCHECK(var->scope() != NULL);
// Check if the binding really needs an initialization check. The check
// can be skipped in the following situation: we have a LET or CONST
@@ -1368,9 +1556,9 @@
skip_init_check = false;
} else {
// Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+ DCHECK(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1378,7 +1566,7 @@
// Let and const need a read barrier.
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
@@ -1389,7 +1577,7 @@
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ DCHECK(var->mode() == CONST_LEGACY);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
context()->Plug(r0);
@@ -1401,15 +1589,15 @@
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ bind(&done);
context()->Plug(r0);
}
@@ -1448,13 +1636,12 @@
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(r5);
__ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
+ __ Push(r5, r0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ pop(r5);
@@ -1463,7 +1650,7 @@
// r0: Newly allocated regexp.
// r5: Materialized regexp.
// r2: temp.
- __ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
+ __ CopyFields(r0, r5, d0, size / kPointerSize);
context()->Plug(r0);
}
@@ -1480,8 +1667,10 @@
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_properties));
@@ -1492,15 +1681,14 @@
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
- __ Push(r3, r2, r1, r0);
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ Push(r3, r2, r1, r0);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
- FastCloneShallowObjectStub stub(properties_count);
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
@@ -1511,9 +1699,9 @@
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
- AccessorTable accessor_table(isolate()->zone());
+ AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1528,39 +1716,47 @@
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ mov(r2, Operand(key->handle()));
- __ ldr(r1, MemOperand(sp));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ DCHECK(StoreDescriptor::ValueRegister().is(r0));
+ __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+ __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
}
break;
}
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ ldr(r0, MemOperand(sp));
__ push(r0);
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes
__ push(r0);
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
__ Drop(3);
}
break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ ldr(r0, MemOperand(sp));
+ __ push(r0);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
+
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
@@ -1582,11 +1778,11 @@
EmitAccessor(it->second->setter);
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
if (expr->has_function()) {
- ASSERT(result_saved);
+ DCHECK(result_saved);
__ ldr(r0, MemOperand(sp));
__ push(r0);
__ CallRuntime(Runtime::kToFastProperties, 1);
@@ -1603,40 +1799,38 @@
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
+ DCHECK_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_elements));
- __ Push(r3, r2, r1);
- if (has_fast_elements && constant_elements_values->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- __ CallStub(&stub);
- __ IncrementCounter(
- isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
+ __ mov(r0, Operand(Smi::FromInt(flags)));
+ __ Push(r3, r2, r1, r0);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
- ASSERT(constant_elements_kind == FAST_ELEMENTS ||
- constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode = has_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
@@ -1648,20 +1842,18 @@
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
__ push(r0);
+ __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (constant_elements_kind == FAST_ELEMENTS) {
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ ldr(r6, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
__ str(result_register(), FieldMemOperand(r1, offset));
// Update the write barrier for the array store.
@@ -1669,11 +1861,8 @@
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
__ mov(r3, Operand(Smi::FromInt(i)));
- __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
- StoreArrayLiteralElementStub stub;
+ StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
@@ -1681,6 +1870,7 @@
}
if (result_saved) {
+ __ pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(r0);
@@ -1689,13 +1879,9 @@
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ DCHECK(expr->target()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1715,9 +1901,9 @@
break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ push(result_register());
+ // We need the receiver both on the stack and in the register.
+ VisitForStackValue(property->obj());
+ __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
}
@@ -1725,9 +1911,10 @@
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- __ ldr(r1, MemOperand(sp, 0));
- __ push(r0);
+ VisitForStackValue(property->key());
+ __ ldr(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1746,11 +1933,11 @@
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@@ -1801,21 +1988,374 @@
}
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::kSuspend:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::kInitial: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
+ __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+ __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+ __ mov(r1, cp);
+ __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ cmp(sp, r1);
+ __ b(eq, &post_runtime);
+ __ push(r0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::kFinal: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ __ str(r1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::kDelegating: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
+
+ // Initial send value is undefined.
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(load_name, r3, r0); // "throw", iter, except
+ __ jmp(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ bind(&l_try);
+ __ pop(r0); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(r0); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ ldr(r0, MemOperand(sp, generator_object_depth));
+ __ push(r0); // g
+ DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
+ __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+ __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+ __ mov(r1, cp);
+ __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(r0); // result
+ EmitReturnSequence();
+ __ bind(&l_resume); // received in r0
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ bind(&l_next);
+
+ __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(load_name, r3, r0); // "next", iter, received
+
+ // result = receiver[f](arg);
+ __ bind(&l_call);
+ __ ldr(load_receiver, MemOperand(sp, kPointerSize));
+ __ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
+ }
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(r1, r0);
+ __ str(r1, MemOperand(sp, 2 * kPointerSize));
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ __ Move(load_receiver, r0);
+
+ __ push(load_receiver); // save result
+ __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(expr->DoneFeedbackSlot())));
+ }
+ CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ cmp(r0, Operand(0));
+ __ b(eq, &l_try);
+
+ // result.value
+ __ pop(load_receiver); // result
+ __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(expr->ValueFeedbackSlot())));
+ }
+ CallLoadIC(NOT_CONTEXTUAL); // r0=result.value
+ context()->DropAndPlug(2, r0); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in r0, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // r1 will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ pop(r1);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ cmp(r3, Operand(Smi::FromInt(0)));
+ __ b(eq, &closed_state);
+ __ b(lt, &wrong_state);
+
+ // Load suspended function and context.
+ __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
+ __ push(r2);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+ Label push_argument_holes, push_frame;
+ __ bind(&push_argument_holes);
+ __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
+ __ b(mi, &push_frame);
+ __ push(r2);
+ __ jmp(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ bind(&push_frame);
+ __ bl(&resume_frame);
+ __ jmp(&done);
+ __ bind(&resume_frame);
+ // lr = return address.
+ // fp = caller's frame pointer.
+ // pp = caller's constant pool (if FLAG_enable_ool_constant_pool),
+ // cp = callee's context,
+ // r4 = callee's JS function.
+ __ PushFixedFrame(r4);
+ // Adjust FP to point to saved FP.
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Load the operand stack size.
+ __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+ __ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ SmiUntag(r3);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ cmp(r3, Operand(0));
+ __ b(ne, &slow_resume);
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ if (FLAG_enable_ool_constant_pool) {
+ // Load the new code object's constant pool pointer.
+ __ ldr(pp,
+ MemOperand(r3, Code::kConstantPoolOffset - Code::kHeaderSize));
+ }
+
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r2);
+ __ add(r3, r3, r2);
+ __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ Jump(r3);
+ }
+ __ bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label push_operand_holes, call_resume;
+ __ bind(&push_operand_holes);
+ __ sub(r3, r3, Operand(1), SetCC);
+ __ b(mi, &call_resume);
+ __ push(r2);
+ __ b(&push_operand_holes);
+ __ bind(&call_resume);
+ DCHECK(!result_register().is(r1));
+ __ Push(r1, result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ stop("not-reached");
+
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ push(r2);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrow, 1);
+ }
+ __ jmp(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ bind(&wrong_state);
+ __ push(r1);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
+
+ __ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&allocated);
+ __ mov(r1, Operand(map));
+ __ pop(r2);
+ __ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
+ __ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
+ DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
+ __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ str(r2,
+ FieldMemOperand(r0, JSGeneratorObject::kResultValuePropertyOffset));
+ __ str(r3,
+ FieldMemOperand(r0, JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset,
+ r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ mov(r2, Operand(key->handle()));
- // Call load IC. It has arguments receiver and property name r0 and r2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+
+ __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
+ } else {
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ SuperReference* super_ref = prop->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ Push(r0);
+ VisitForStackValue(super_ref->this_var());
+ __ Push(key->value());
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
+ CallIC(ic);
+ } else {
+ CallIC(ic, prop->PropertyFeedbackId());
+ }
}
@@ -1841,34 +2381,28 @@
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
// Smi case. This code works the same way as the smi-smi case in the type
// recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
- __ b(&stub_call);
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ mov(right, Operand(left, ASR, scratch1));
__ bic(right, right, Operand(kSmiTagMask));
break;
case Token::SHL: {
- __ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &stub_call);
- __ SmiTag(right, scratch1);
+ __ TrySmiTag(right, scratch1, &stub_call);
break;
}
case Token::SHR: {
- __ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSR, scratch2));
@@ -1893,7 +2427,7 @@
__ mov(ip, Operand(scratch1, ASR, 31));
__ cmp(ip, Operand(scratch2));
__ b(ne, &stub_call);
- __ cmp(scratch1, Operand(0));
+ __ cmp(scratch1, Operand::Zero());
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ b(ne, &done);
__ add(scratch2, right, Operand(left), SetCC);
@@ -1923,21 +2457,16 @@
Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- BinaryOpStub stub(op, mode);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ DCHECK(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1960,25 +2489,22 @@
case NAMED_PROPERTY: {
__ push(r0); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ mov(r1, r0);
- __ pop(r0); // Restore value.
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ __ Move(StoreDescriptor::ReceiverRegister(), r0);
+ __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ mov(r1, r0);
- __ pop(r2);
- __ pop(r0); // Restore value.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Move(StoreDescriptor::NameRegister(), r0);
+ __ Pop(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
@@ -1987,96 +2513,81 @@
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ mov(r3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(r2, Operand(var->name()));
- __ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+ __ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ CallStoreIC();
- } else if (op == Token::INIT_CONST) {
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ ldr(r1, StackOperand(var));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
- __ str(result_register(), StackOperand(var));
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r2, location);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r3, location);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &assign);
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ // Perform the assignment.
+ __ bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
+ // Assignment to var.
__ push(r0); // Value.
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode())));
__ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1);
- __ ldr(r3, location);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(ne, &assign);
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- // Perform the assignment.
- __ bind(&assign);
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- }
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
- MemOperand location = VarOperand(var, r1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ Check(eq, "Let binding re-initialization.");
+ __ Check(eq, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ str(r0, location);
- if (var->IsContextSlot()) {
- __ mov(r3, r0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2086,46 +2597,16 @@
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is now under value.
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
+ DCHECK(prop != NULL);
+ DCHECK(prop->key()->IsLiteral());
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- // Load receiver to r1. Leave a copy in the stack if needed for turning the
- // receiver into fast case.
- if (expr->ends_initialization_block()) {
- __ ldr(r1, MemOperand(sp));
- } else {
- __ pop(r1);
- }
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ pop(StoreDescriptor::ReceiverRegister());
+ CallStoreIC(expr->AssignmentFeedbackId());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- // Receiver is under the result value.
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- __ Drop(1);
- }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
}
@@ -2134,44 +2615,14 @@
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- // Receiver is now under the key and value.
- __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ pop(r1); // Key.
- // Load receiver to r2. Leave a copy in the stack if needed for turning the
- // receiver into fast case.
- if (expr->ends_initialization_block()) {
- __ ldr(r2, MemOperand(sp));
- } else {
- __ pop(r2);
- }
+ __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(r0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CallIC(ic, expr->AssignmentFeedbackId());
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- // Receiver is under the result value.
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- __ Drop(1);
- }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
}
@@ -2182,13 +2633,20 @@
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), r0);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ EmitNamedSuperPropertyLoad(expr);
+ }
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(r0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(r1);
+ __ Move(LoadDescriptor::NameRegister(), r0);
+ __ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
context()->Plug(r0);
}
@@ -2196,72 +2654,113 @@
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
ic_total_count_++;
- __ Call(code, rmode, ast_id);
+ // All calls must have a predictable size in full-codegen code to ensure that
+ // the debugger can patch them correctly.
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
+ NEVER_INLINE_TARGET_ADDRESS);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
+
+ // Get the target function.
+ if (call_type == CallICState::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
}
- __ mov(r2, Operand(name));
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ } else {
+ // Load the function from the receiver.
+ DCHECK(callee->IsProperty());
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ str(r0, MemOperand(sp, kPointerSize));
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(r0);
+
+ EmitCall(expr, call_type);
}
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ const Register scratch = r1;
+ SuperReference* super_ref = prop->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ Push(r0);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(r0);
+ __ ldr(scratch, MemOperand(sp, kPointerSize));
+ __ Push(scratch);
+ __ Push(r0);
+ __ Push(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - home_object <-- LoadFromSuper will pop here and below.
+ // - this (receiver)
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ str(r0, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(r1);
- __ push(r0);
- __ push(r1);
+ Expression* callee = expr->expression();
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0); // Drop the key still on the stack.
+ // Load the function from the receiver.
+ DCHECK(callee->IsProperty());
+ __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Move(LoadDescriptor::NameRegister(), r0);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ str(r0, MemOperand(sp, kPointerSize));
+
+ EmitCall(expr, CallICState::METHOD);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+ // Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2269,11 +2768,17 @@
VisitForStackValue(args->at(i));
}
}
- // Record source position for debugger.
+
+ // Record source position of the IC call.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, flags);
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2282,28 +2787,30 @@
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
+ // r5: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ __ ldr(r5, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
}
- __ push(r1);
- // Push the receiver of the enclosing function.
+ // r4: the receiver of the enclosing function.
+ __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ // r3: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
- __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
- __ push(r1);
- // Push the language mode.
- __ mov(r1, Operand(Smi::FromInt(language_mode())));
- __ push(r1);
+ __ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
- // Push the start position of the scope the calls resides in.
+ // r2: strict mode.
+ __ mov(r2, Operand(Smi::FromInt(strict_mode())));
+
+ // r1: the start position of the scope the calls resides in.
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
- __ push(r1);
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ Push(r5);
+ __ Push(r4, r3, r2, r1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
@@ -2316,12 +2823,11 @@
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2351,35 +2857,34 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ ldr(r0, GlobalObjectOperand());
- __ push(r0);
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
}
__ bind(&slow);
// Call the runtime to find the function to call (returned in r0)
// and the object holding it (returned in edx).
- __ push(context_register());
+ DCHECK(!context_register().is(r2));
__ mov(r2, Operand(proxy->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ Push(context_register(), r2);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ Push(r0, r1); // Function, receiver.
// If fast case code has been generated, emit code to push the
@@ -2393,42 +2898,46 @@
__ push(r0);
// The receiver is implicitly the global receiver. Indicate this
// by passing the hole to the call function stub.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ push(r1);
__ bind(&call);
}
// The receiver is either the global receiver or an object found
- // by LoadContextSlot. That object could be the hole if the
- // receiver is implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
- RelocInfo::CODE_TARGET);
+ // by LoadContextSlot.
+ EmitCall(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ bool is_named_call = property->key()->IsPropertyName();
+ // super.x() is handled in EmitCallWithLoadIC.
+ if (property->IsSuperAccess() && is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithIC(expr, property->key());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
}
} else {
+ DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ push(r1);
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCall(expr);
}
#ifdef DEBUG
// RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
+ DCHECK(expr->return_is_recorded_);
#endif
}
@@ -2459,21 +2968,17 @@
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code, but not in the snapshot.
- CallFunctionFlags flags;
- if (!Serializer::enabled()) {
- flags = RECORD_CALL_TARGET;
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ mov(r2, Operand(cell));
- } else {
- flags = NO_CALL_FUNCTION_FLAGS;
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
}
- CallConstructStub stub(flags);
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
@@ -2482,7 +2987,7 @@
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2494,7 +2999,7 @@
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask));
+ __ SmiTst(r0);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2503,7 +3008,7 @@
void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2515,7 +3020,7 @@
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask | 0x80000000));
+ __ NonNegativeSmiTst(r0);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2524,7 +3029,7 @@
void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2557,7 +3062,7 @@
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2579,7 +3084,7 @@
void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2604,23 +3109,23 @@
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(r0);
+ __ AssertNotSmi(r0);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
__ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, if_true);
+ __ b(ne, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
__ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
@@ -2629,67 +3134,70 @@
__ cmp(r2, ip);
__ b(eq, if_false);
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ LoadInstanceDescriptors(r1, r4);
- __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: descriptor array
- // r3: length of descriptor array
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ // Look for valueOf name in the descriptor array, and indicate false if
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(r3, r1);
+ __ cmp(r3, Operand::Zero());
+ __ b(eq, &done);
+
+ __ LoadInstanceDescriptors(r1, r4);
+ // r4: descriptor array.
+ // r3: valid entries in the descriptor array.
+ __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
+ __ mul(r3, r3, ip);
// Calculate location of the first key name.
- __ add(r4,
- r4,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag +
- DescriptorArray::kFirstIndex * kPointerSize));
+ __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
+ // Calculate the end of the descriptor array.
+ __ mov(r2, r4);
+ __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
+
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- // The use of ip to store the valueOf symbol asumes that it is not otherwise
+ // string "valueOf" the result is false.
+ // The use of ip to store the valueOf string assumes that it is not otherwise
// used in the loop below.
- __ mov(ip, Operand(FACTORY->value_of_symbol()));
+ __ mov(ip, Operand(isolate()->factory()->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ ldr(r3, MemOperand(r4, 0));
__ cmp(r3, ip);
__ b(eq, if_false);
- __ add(r4, r4, Operand(kPointerSize));
+ __ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmp(r4, Operand(r2));
__ b(ne, &loop);
- // If a valueOf property is not found on the object check that it's
+ __ bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+
+ __ bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
__ JumpIfSmi(r2, if_false);
__ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ cmp(r2, r3);
- __ b(ne, if_false);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2709,9 +3217,35 @@
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ cmp(r2, Operand(0x80000000));
+ __ cmp(r1, Operand(0x00000000), eq);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2733,7 +3267,7 @@
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2755,7 +3289,7 @@
void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
+ DCHECK(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2768,14 +3302,11 @@
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
// Check the marker in the calling frame.
- __ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -2787,7 +3318,7 @@
void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
// Load the two objects into registers and perform the comparison.
VisitForStackValue(args->at(0));
@@ -2811,22 +3342,22 @@
void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
// parameter count in r0.
VisitForAccumulatorValue(args->at(0));
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(r0);
}
void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label exit;
+ DCHECK(expr->arguments()->length() == 0);
+
// Get the number of formal parameters.
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2834,20 +3365,18 @@
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &exit);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset), eq);
- __ bind(&exit);
context()->Plug(r0);
}
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
Label done, null, function, non_function_constructor;
VisitForAccumulatorValue(args->at(0));
@@ -2887,12 +3416,12 @@
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+ __ LoadRoot(r0, Heap::kFunction_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(r0, Heap::kObject_symbolRootIndex);
+ __ LoadRoot(r0, Heap::kObject_stringRootIndex);
__ jmp(&done);
// Non-JS objects have class null.
@@ -2906,86 +3435,11 @@
}
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r4, Operand(r0));
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in r0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP3)) {
- __ PrepareCallCFunction(1, r0);
- __ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatures::Scope scope(VFP3);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- __ mov(r0, r4);
- } else {
- __ PrepareCallCFunction(2, r0);
- __ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ mov(r0, Operand(r4));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
-
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- SubStringStub stub;
+ SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
+ DCHECK(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
@@ -2996,9 +3450,9 @@
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
+ RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
+ DCHECK(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
@@ -3010,7 +3464,7 @@
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label done;
@@ -3018,8 +3472,7 @@
__ JumpIfSmi(r0, &done);
// If the object is not a value type, return the object.
__ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
- __ b(ne, &done);
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset), eq);
__ bind(&done);
context()->Plug(r0);
@@ -3028,26 +3481,25 @@
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+ DCHECK(args->length() == 2);
+ DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done;
+ Label runtime, done, not_date_object;
Register object = r0;
Register result = r0;
Register scratch0 = r9;
Register scratch1 = r1;
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ JumpIfSmi(object, ¬_date_object);
__ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
- __ Assert(eq, "Trying to get date field from non-date.");
-#endif
+ __ b(ne, ¬_date_object);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@@ -3064,31 +3516,99 @@
__ PrepareCallCFunction(2, scratch1);
__ mov(r1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
+ __ jmp(&done);
}
+
+ __ bind(¬_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
context()->Plug(r0);
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(3, args->length());
+
+ Register string = r0;
+ Register index = r1;
+ Register value = r2;
+
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
+ __ Pop(index, value);
+
+ if (FLAG_debug_code) {
+ __ SmiTst(value);
+ __ Check(eq, kNonSmiValue);
+ __ SmiTst(index);
+ __ Check(eq, kNonSmiIndex);
+ __ SmiUntag(index, index);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ SmiTag(index, index);
+ }
+
+ __ SmiUntag(value, value);
+ __ add(ip,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(3, args->length());
+
+ Register string = r0;
+ Register index = r1;
+ Register value = r2;
+
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
+ __ Pop(index, value);
+
+ if (FLAG_debug_code) {
+ __ SmiTst(value);
+ __ Check(eq, kNonSmiValue);
+ __ SmiTst(index);
+ __ Check(eq, kNonSmiIndex);
+ __ SmiUntag(index, index);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index, index);
+ }
+
+ __ SmiUntag(value, value);
+ __ add(ip,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ strh(value, MemOperand(ip, index));
+ context()->Plug(string);
+}
+
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(VFP3)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
+ __ CallStub(&stub);
context()->Plug(r0);
}
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(r1); // r0 = value. r1 = object.
@@ -3116,11 +3636,11 @@
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ DCHECK_EQ(args->length(), 1);
+ // Load the argument into r0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
- NumberToStringStub stub;
+ NumberToStringStub stub(isolate());
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3128,7 +3648,7 @@
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
Label done;
@@ -3146,7 +3666,7 @@
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
@@ -3191,7 +3711,7 @@
void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
@@ -3219,7 +3739,7 @@
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
@@ -3238,11 +3758,12 @@
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
+ DCHECK_EQ(2, args->length());
VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ __ pop(r1);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3250,77 +3771,19 @@
void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
+ DCHECK_EQ(2, args->length());
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringCompareStub stub;
+ StringCompareStub stub(isolate());
__ CallStub(&stub);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
+ DCHECK(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
for (int i = 0; i < arg_count + 1; i++) {
@@ -3328,20 +3791,20 @@
}
VisitForAccumulatorValue(args->last()); // Function.
- // Check for proxy.
- Label proxy, done;
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
- __ b(eq, &proxy);
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(r0, &runtime);
+ __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+ __ b(ne, &runtime);
// InvokeFunction requires the function in r1. Move it in there.
__ mov(r1, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, count, CALL_FUNCTION, NullCallWrapper());
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
- __ bind(&proxy);
+ __ bind(&runtime);
__ push(r0);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3351,12 +3814,14 @@
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
+ RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
+ DCHECK(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ pop(r1);
+ __ pop(r2);
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3364,14 +3829,14 @@
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ DCHECK_EQ(2, args->length());
+ DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
+ isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
+ __ Abort(kAttemptToUseUndefinedCache);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
context()->Plug(r0);
return;
@@ -3381,21 +3846,19 @@
Register key = r0;
Register cache = r1;
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+ __ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
__ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ ldr(cache,
FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
Label done, not_found;
- // tmp now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
// r2 now holds finger offset as a smi.
__ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// r3 now points to the start of fixed array elements.
- __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+ __ ldr(r2, MemOperand::PointerAddressFromSmiKey(r3, r2, PreIndex));
// Note side effect of PreIndex: r3 now points to the key of the pair.
__ cmp(key, r2);
__ b(ne, ¬_found);
@@ -3413,47 +3876,6 @@
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = r0;
- Register left = r1;
- Register tmp = r2;
- Register tmp2 = r3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, Operand(right));
- __ b(eq, &ok);
- // Fail if either is a non-HeapObject.
- __ and_(tmp, left, Operand(right));
- __ JumpIfSmi(tmp, &fail);
- __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
- __ b(ne, &fail);
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ cmp(tmp, Operand(tmp2));
- __ b(ne, &fail);
- __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ cmp(tmp, tmp2);
- __ b(eq, &ok);
- __ bind(&fail);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -3476,12 +3898,10 @@
void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(r0);
- }
+ __ AssertString(r0);
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0);
@@ -3490,13 +3910,12 @@
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
+ Label bailout, done, one_char_separator, long_separator, non_trivial_array,
+ not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(0));
@@ -3511,25 +3930,24 @@
Register string = r4;
Register element = r5;
Register elements_end = r6;
- Register scratch1 = r7;
- Register scratch2 = r9;
+ Register scratch = r9;
// Separator operand is on the stack.
__ pop(separator);
// Check that the array is a JSArray.
__ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
__ b(ne, &bailout);
// Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
+ __ CheckFastElements(scratch, array_length, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
__ SmiUntag(array_length, SetCC);
__ b(ne, &non_trivial_array);
- __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(r0, Heap::kempty_stringRootIndex);
__ b(&done);
__ bind(&non_trivial_array);
@@ -3539,9 +3957,9 @@
__ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
array = no_reg; // End of array's live range.
- // Check that all array elements are sequential ASCII strings, and
+ // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, Operand(0));
+ __ mov(string_length, Operand::Zero());
__ add(element,
elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
@@ -3553,18 +3971,18 @@
// string_length: Accumulated sum of string lengths (smi).
// element: Current array element.
// elements_end: Array end.
- if (FLAG_debug_code) {
- __ cmp(array_length, Operand(0));
- __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
+ if (generate_debug_code_) {
+ __ cmp(array_length, Operand::Zero());
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
}
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch1));
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
+ __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
@@ -3583,25 +4001,25 @@
// string_length: Sum of string lengths (smi).
// elements: FixedArray of strings.
- // Check that the separator is a flat ASCII string.
+ // Check that the separator is a flat one-byte string.
__ JumpIfSmi(separator, &bailout);
- __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch1));
- __ smull(scratch2, ip, array_length, scratch1);
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ sub(string_length, string_length, Operand(scratch));
+ __ smull(scratch, ip, array_length, scratch);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
- __ cmp(ip, Operand(0));
+ __ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
- __ tst(scratch2, Operand(0x80000000));
+ __ tst(scratch, Operand(0x80000000));
__ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch2));
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);
@@ -3616,12 +4034,10 @@
// separator: Separator string
// string_length: Length of result string (not smi)
// array_length: Length of the array.
- __ AllocateAsciiString(result,
- string_length,
- scratch1,
- scratch2,
- elements_end,
- &bailout);
+ __ AllocateOneByteString(result, string_length, scratch,
+ string, // used as scratch
+ elements_end, // used as scratch
+ &bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
// character.
@@ -3630,11 +4046,11 @@
array_length = no_reg;
__ add(result_pos,
result,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(1)));
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ cmp(scratch, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@@ -3649,17 +4065,19 @@
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
+ DCHECK(result.is(r0));
__ b(&done);
// One-character separator case
__ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ // Replace separator with its one-byte character value.
+ __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&one_char_separator_loop_entry);
@@ -3669,7 +4087,7 @@
// result_pos: the position to which we are currently copying characters.
// element: Current array element.
// elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
+ // separator: Single separator one-byte char (in lower byte).
// Copy the separator character to the result.
__ strb(separator, MemOperand(result_pos, 1, PostIndex));
@@ -3679,11 +4097,13 @@
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
+ DCHECK(result.is(r0));
__ b(&done);
// Long separator case (separator is more than one character). Entry is at the
@@ -3700,18 +4120,20 @@
__ SmiUntag(string_length);
__ add(string,
separator,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
+ DCHECK(result.is(r0));
__ b(&done);
__ bind(&bailout);
@@ -3721,9 +4143,20 @@
}
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+ DCHECK(expr->arguments()->length() == 0);
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ __ mov(ip, Operand(debug_is_active));
+ __ ldrb(r0, MemOperand(ip));
+ __ SmiTag(r0);
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -3731,34 +4164,56 @@
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ ldr(r0, GlobalObjectOperand());
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
- __ push(r0);
- }
-
- // Push the arguments ("left-to-right").
int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- __ mov(r2, Operand(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ // Push the builtins object as the receiver.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ __ ldr(receiver, GlobalObjectOperand());
+ __ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
+ __ push(receiver);
+
+ // Load the function from the receiver.
+ __ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
+ } else {
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+ }
+
+ // Push the target function under the receiver.
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ str(r0, MemOperand(sp, kPointerSize));
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, r0);
} else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
+ context()->Plug(r0);
}
- context()->Plug(r0);
}
@@ -3772,9 +4227,7 @@
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
+ __ mov(r1, Operand(Smi::FromInt(strict_mode())));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -3782,11 +4235,11 @@
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ DCHECK(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ mov(r0, Operand(Smi::FromInt(SLOPPY)));
__ Push(r2, r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -3797,10 +4250,10 @@
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ DCHECK(!context_register().is(r2));
__ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ Push(context_register(), r2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
context()->Plug(r0);
}
} else {
@@ -3838,7 +4291,7 @@
// for control and plugging the control flow into the context,
// because we need to prepare a pair of extra administrative AST ids
// for the optimizing compiler.
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
Label materialize_true, materialize_false, done;
VisitForControl(expr->expression(),
&materialize_false,
@@ -3868,60 +4321,18 @@
break;
}
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ DCHECK(expr->expression()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -3936,7 +4347,7 @@
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
@@ -3946,15 +4357,16 @@
__ push(ip);
}
if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in the accumulator.
- VisitForAccumulatorValue(prop->obj());
- __ push(r0);
+ // Put the object both on the stack and in the register.
+ VisitForStackValue(prop->obj());
+ __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ ldr(r1, MemOperand(sp, 0));
- __ push(r0);
+ VisitForStackValue(prop->key());
+ __ ldr(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
EmitKeyedPropertyLoad(prop);
}
}
@@ -3964,15 +4376,47 @@
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
- PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- __ JumpIfSmi(r0, &no_conversion);
- ToNumberStub convert_stub;
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(r0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r0);
+ break;
+ case NAMED_PROPERTY:
+ __ str(r0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
+ __ b(vc, &done);
+ // Call stub. Undo operation first.
+ __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
+ __ jmp(&stub_call);
+ __ bind(&slow);
+ }
+ ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3995,29 +4439,16 @@
}
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
- __ b(vs, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(r0, &done);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
- }
- __ mov(r1, Operand(Smi::FromInt(count_value)));
+ __ bind(&stub_call);
+ __ mov(r1, r0);
+ __ mov(r0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4044,12 +4475,10 @@
}
break;
case NAMED_PROPERTY: {
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- __ pop(r1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ pop(StoreDescriptor::ReceiverRegister());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4061,12 +4490,11 @@
break;
}
case KEYED_PROPERTY: {
- __ pop(r1); // Key.
- __ pop(r2); // Receiver.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Pop(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4082,30 +4510,34 @@
void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
+ DCHECK(!context()->IsEffect());
+ DCHECK(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ ldr(r0, GlobalObjectOperand());
- __ mov(r2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Comment cmnt(masm_, "[ Global variable");
+ __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+ }
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
__ mov(r0, Operand(proxy->name()));
__ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4132,13 +4564,14 @@
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(r0, if_true);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(r0, if_false);
// Check for undetectable objects => false.
__ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
@@ -4146,16 +4579,16 @@
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (String::Equals(check, factory->symbol_string())) {
+ __ JumpIfSmi(r0, if_false);
+ __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->boolean_string())) {
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(eq, if_true);
__ CompareRoot(r0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
- __ CompareRoot(r0, Heap::kNullValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (String::Equals(check, factory->undefined_string())) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
@@ -4165,19 +4598,17 @@
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(ne, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(r0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
__ b(eq, if_true);
__ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(r0, if_false);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(r0, Heap::kNullValueRootIndex);
- __ b(eq, if_true);
- }
+ __ CompareRoot(r0, Heap::kNullValueRootIndex);
+ __ b(eq, if_true);
// Check for JS objects => true.
__ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ b(lt, if_false);
@@ -4225,7 +4656,7 @@
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
@@ -4236,29 +4667,7 @@
default: {
VisitForAccumulatorValue(expr->right());
- Condition cond = eq;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cond = eq;
- break;
- case Token::LT:
- cond = lt;
- break;
- case Token::GT:
- cond = gt;
- break;
- case Token::LTE:
- cond = le;
- break;
- case Token::GTE:
- cond = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cond = CompareIC::ComputeCondition(op);
__ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
@@ -4274,11 +4683,11 @@
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
Split(cond, if_true, if_false, fall_through);
}
}
@@ -4301,28 +4710,18 @@
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(r1, nil_value);
- __ cmp(r0, r1);
if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(r1, nil_value);
+ __ cmp(r0, r1);
Split(eq, if_true, if_false, fall_through);
} else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- __ b(eq, if_true);
- __ LoadRoot(r1, other_nil_value);
- __ cmp(r0, r1);
- __ b(eq, if_true);
- __ JumpIfSmi(r0, if_false);
- // It can be an undetectable object.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
- __ cmp(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ cmp(r0, Operand(0));
+ Split(ne, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -4345,7 +4744,7 @@
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
}
@@ -4357,8 +4756,9 @@
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope()) {
- // Contexts nested in the global context have a canonical empty function
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@@ -4369,7 +4769,7 @@
// code. Fetch it from the context.
__ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
} else {
- ASSERT(declaration_scope->is_function_scope());
+ DCHECK(declaration_scope->is_function_scope());
__ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
__ push(ip);
@@ -4380,26 +4780,68 @@
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(r1));
+ DCHECK(!result_register().is(r1));
// Store result register while executing finally block.
__ push(result_register());
// Cook return address in link register to stack (smi encoded Code* delta)
__ sub(r1, lr, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r1, r1, Operand(r1)); // Convert to smi.
+ __ SmiTag(r1);
+
+ // Store result register while executing finally block.
+ __ push(r1);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ ldr(r1, MemOperand(ip));
+ __ push(r1);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(ip, Operand(has_pending_message));
+ STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
+ __ ldrb(r1, MemOperand(ip));
+ __ SmiTag(r1);
+ __ push(r1);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(ip, Operand(pending_message_script));
+ __ ldr(r1, MemOperand(ip));
__ push(r1);
}
void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(r1));
+ DCHECK(!result_register().is(r1));
+ // Restore pending message from stack.
+ __ pop(r1);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(ip, Operand(pending_message_script));
+ __ str(r1, MemOperand(ip));
+
+ __ pop(r1);
+ __ SmiUntag(r1);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(ip, Operand(has_pending_message));
+ STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
+ __ strb(r1, MemOperand(ip));
+
+ __ pop(r1);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ str(r1, MemOperand(ip));
+
// Restore result register from stack.
__ pop(r1);
+
// Uncook return address and return.
__ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
+ __ SmiUntag(r1);
__ add(pc, r1, Operand(masm_->CodeObject()));
}
@@ -4434,6 +4876,146 @@
#undef __
+
+static Address GetInterruptImmediateLoadAddress(Address pc) {
+ Address load_address = pc - 2 * Assembler::kInstrSize;
+ if (!FLAG_enable_ool_constant_pool) {
+ DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
+ } else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
+ // This is an extended constant pool lookup.
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ load_address -= 2 * Assembler::kInstrSize;
+ DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
+ DCHECK(Assembler::IsMovT(
+ Memory::int32_at(load_address + Assembler::kInstrSize)));
+ } else {
+ load_address -= 4 * Assembler::kInstrSize;
+ DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + Assembler::kInstrSize)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + 3 * Assembler::kInstrSize)));
+ }
+ } else if (CpuFeatures::IsSupported(ARMv7) &&
+ Assembler::IsMovT(Memory::int32_at(load_address))) {
+ // This is a movw / movt immediate load.
+ load_address -= Assembler::kInstrSize;
+ DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
+ } else if (!CpuFeatures::IsSupported(ARMv7) &&
+ Assembler::IsOrrImmed(Memory::int32_at(load_address))) {
+ // This is a mov / orr immediate load.
+ load_address -= 3 * Assembler::kInstrSize;
+ DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + Assembler::kInstrSize)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
+ } else {
+ // This is a small constant pool lookup.
+ DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
+ }
+ return load_address;
+}
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
+ CodePatcher patcher(branch_address, 1);
+ switch (target_state) {
+ case INTERRUPT:
+ {
+ // <decrement profiling counter>
+ // bpl ok
+ // ; load interrupt stub address into ip - either of (for ARMv7):
+ // ; <small cp load> | <extended cp load> | <immediate load>
+ // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
+ // | movt ip, #imm | movw ip, #imm
+ // | ldr ip, [pp, ip]
+ // ; or (for ARMv6):
+ // ; <small cp load> | <extended cp load> | <immediate load>
+ // ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // blx ip
+ // <reset profiling counter>
+ // ok-label
+
+ // Calculate branch offset to the ok-label - this is the difference
+ // between the branch address and |pc| (which points at <blx ip>) plus
+ // kProfileCounterResetSequence instructions
+ int branch_offset = pc - Instruction::kPCReadOffset - branch_address +
+ kProfileCounterResetSequenceLength;
+ patcher.masm()->b(branch_offset, pl);
+ break;
+ }
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // mov r0, r0 (NOP)
+ // ; load on-stack replacement address into ip - either of (for ARMv7):
+ // ; <small cp load> | <extended cp load> | <immediate load>
+ // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
+ // | movt ip, #imm> | movw ip, #imm
+ // | ldr ip, [pp, ip]
+ // ; or (for ARMv6):
+ // ; <small cp load> | <extended cp load> | <immediate load>
+ // ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // blx ip
+ // <reset profiling counter>
+ // ok-label
+ patcher.masm()->nop();
+ break;
+ }
+
+ // Replace the call address.
+ Assembler::set_target_address_at(pc_immediate_load_address, unoptimized_code,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_immediate_load_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ DCHECK(Assembler::IsBlxIp(Memory::int32_at(pc - Assembler::kInstrSize)));
+
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
+ Address interrupt_address = Assembler::target_address_at(
+ pc_immediate_load_address, unoptimized_code);
+
+ if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
+ DCHECK(interrupt_address ==
+ isolate->builtins()->InterruptCheck()->entry());
+ return INTERRUPT;
+ }
+
+ DCHECK(Assembler::IsNop(Assembler::instr_at(branch_address)));
+
+ if (interrupt_address ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ DCHECK(interrupt_address ==
+ isolate->builtins()->OsrAfterStackCheck()->entry());
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
deleted file mode 100644
index c88c257..0000000
--- a/src/arm/ic-arm.cc
+++ /dev/null
@@ -1,1767 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "assembler-arm.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "disasm.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register t0,
- Register t1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // elements: holds the property dictionary on fall through.
- // Scratch registers:
- // t0: used to holds the receiver map.
- // t1: used to holds the receiver instance type, receiver bit mask and
- // elements map.
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, miss);
-
- // If this assert fails, we have to check upper bound too.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, t1, miss);
-
- // Check that the global object does not require access checks.
- __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor)));
- __ b(ne, miss);
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(t1, ip);
- __ b(ne, miss);
-}
-
-
-// Helper function used from LoadIC/CallIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
- __ b(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
- support_wrappers);
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- Register scratch,
- int interceptor_bit,
- Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ b(ne, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(JS_OBJECT_TYPE));
- __ b(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
- //
- // scratch2 - used to hold the loaded value.
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ b(ne, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
- // Check that the key (index) is within bounds.
- __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch1));
- __ b(hs, out_of_range);
- // Fast case: Do the load.
- __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(scratch2,
- MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ b(eq, out_of_range);
- __ mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if a key is a symbol.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_symbol) {
- // The key is not a smi.
- // Is it a string?
- __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
- __ b(ge, not_symbol);
-
- // Is the string an array index, with cached numeric value?
- __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
- __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
- __ b(eq, index_string);
-
- // Is the string a symbol?
- // map: key map
- __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ tst(hash, Operand(kIsSymbolMask));
- __ b(eq, not_symbol);
-}
-
-
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r1 : receiver
- // -- r2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- NORMAL,
- argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(r1, &number);
- __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
- __ b(ne, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
- __ b(hs, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &boolean);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // r1: function
-
- // Check that the value isn't a smi.
- __ JumpIfSmi(r1, miss);
-
- // Check that the value is a JSFunction.
- __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
-
- // r0: elements
- // Search the dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
-
- GenerateFunctionTailCall(masm, argc, &miss, r4);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4);
- }
-
- // Get the receiver of the function from the stack.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
-
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ JumpIfSmi(r2, &invoke);
- __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
- __ b(eq, &global);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(r2, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3);
-
- __ bind(&do_call);
- // receiver in r1 is not used after this point.
- // r2: key
- // r1: function
- GenerateFunctionTailCall(masm, argc, &slow_call, r0);
-
- __ bind(&check_number_dictionary);
- // r2: key
- // r3: elements map
- // r4: elements
- // Check whether the elements is a number dictionary.
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow_load);
- __ mov(r0, Operand(r2, ASR, kSmiTagSize));
- // r0: untagged index
- __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- }
- __ mov(r1, r0);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
-
- // The key is known to be a symbol.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(r3, r2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ JumpIfSmi(r2, &miss);
- __ IsObjectJSStringType(r2, r0, &miss);
-
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r0, r2, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
-
- // r1: elements
- GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
-
- __ mov(r3, r0);
- __ Push(r3, r2);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the map check
- // later, we do not need to check for interceptors or whether it
- // requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
- __ b(lt, slow_case);
-
- // Check that the key is a positive smi.
- __ tst(key, Operand(0x80000001));
- __ b(ne, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
- __ cmp(key, Operand(scratch2));
- __ b(cs, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kOffset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, key, scratch3);
- __ add(scratch3, scratch3, Operand(kOffset));
-
- __ ldr(scratch2, MemOperand(scratch1, scratch3));
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, scratch3);
- __ b(eq, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, scratch2, scratch3);
- __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
- return MemOperand(scratch1, scratch3);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
- DONT_DO_SMI_CHECK);
- __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch));
- __ b(cs, slow_case);
- __ mov(scratch, Operand(kPointerSize >> 1));
- __ mul(scratch, key, scratch);
- __ add(scratch,
- scratch,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- return MemOperand(backing_store, scratch);
-}
-
-
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, ¬in, &slow);
- __ ldr(r0, mapped_location);
- __ Ret();
- __ bind(¬in);
- // The unmapped lookup expects that the parameter map is in r2.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
- __ ldr(r2, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &slow);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, false);
-}
-
-
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow);
- __ str(r0, mapped_location);
- __ add(r6, r3, r5);
- __ mov(r9, r0);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(¬in);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
- __ str(r0, unmapped_location);
- __ add(r6, r3, r4);
- __ mov(r9, r0);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, false);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Load receiver.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, ¬in, &slow);
- __ ldr(r1, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(¬in);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
- __ ldr(r1, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, r3);
- __ b(eq, &slow);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
-
- __ Push(r1, r0);
-
- // Perform tail call to the entry.
- ExternalReference ref = force_generic
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = r0;
- Register receiver = r1;
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r2, r3, &check_number_dictionary);
-
- GenerateFastArrayLoad(
- masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r0: key
- // r3: elements map
- // r4: elements
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow);
- __ mov(r2, Operand(r0, ASR, kSmiTagSize));
- __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
- __ Ret();
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
- 1, r2, r3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
- __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
- __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(r3, r3, Operand(mask));
-
- // Load the key (consisting of map and symbol) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ mov(r4, Operand(cache_keys));
- __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and move r4 to next entry.
- __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
- __ cmp(r2, r5);
- __ b(ne, &try_next_entry);
- __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol
- __ cmp(r0, r5);
- __ b(eq, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- // Last entry: Load map and move r4 to symbol.
- __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
- __ cmp(r2, r5);
- __ b(ne, &slow);
- __ ldr(r5, MemOperand(r4));
- __ cmp(r0, r5);
- __ b(ne, &slow);
-
- // Get field offset.
- // r0 : key
- // r1 : receiver
- // r2 : receiver's map
- // r3 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ mov(r4, Operand(cache_field_offsets));
- if (i != 0) {
- __ add(r3, r3, Operand(i));
- }
- __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
- __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
- __ sub(r5, r5, r6, SetCC);
- __ b(ge, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ add(r6, r6, r5); // Index from start of object.
- __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r2, r3);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r2, r3);
- __ Ret();
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r1: receiver
- // r0: key
- // r3: elements
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
- // Load the property to r0.
- GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1, r2, r3);
- __ Ret();
-
- __ bind(&index_string);
- __ IndexFromHash(r3, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key (index)
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Register receiver = r1;
- Register index = r0;
- Register scratch = r3;
- Register result = r0;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm, false);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r1, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
- __ b(ne, &slow);
-
- // Get the map of the receiver.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
- __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
- __ b(ne, &slow);
-
- // Everything is fine, call runtime.
- __ Push(r1, r0); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate()),
- 2,
- 1);
-
- __ bind(&slow);
- GenerateMiss(masm, false);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- ExternalReference ref = force_generic
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r2 : receiver
- // -- r3 : target map
- // -- lr : return address
- // -----------------------------------
- // Must return the modified receiver in r0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&fail);
- }
-
- __ push(r2);
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r2 : receiver
- // -- r3 : target map
- // -- lr : return address
- // -----------------------------------
- // Must return the modified receiver in r0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&fail);
- }
-
- __ push(r2);
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, array, extra, check_if_double_array;
- Label fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
- Label transition_smi_elements, finish_object_store, non_double_value;
- Label transition_double_elements;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- Register receiver_map = r3;
- Register elements_map = r6;
- Register elements = r7; // Elements array of the receiver.
- // r4 and r5 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
- // Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ cmp(r4, Operand(JS_ARRAY_TYPE));
- __ b(eq, &array);
- // Check that the object is some kind of JSObject.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, &slow);
-
- // Object case: Check key against length in the elements array.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(lo, &fast_object_with_map_check);
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // r0: value.
- // r1: key.
- // r2: receiver.
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- __ b(ne, &slow); // Only support writing to writing to array[array.length].
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &slow);
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, &check_if_double_array);
- // Calculate key + 1 as smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r4, key, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ b(&fast_object_without_map_check);
-
- __ bind(&check_if_double_array);
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ b(ne, &slow);
- // Add 1 to key, and go to common element store code for doubles.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r4, key, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ jmp(&fast_double_without_map_check);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &extra);
- // Fall through to fast case.
-
- __ bind(&fast_object_with_map_check);
- Register scratch_value = r4;
- Register address = r5;
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, &fast_double_with_map_check);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ b(ne, &slow);
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- receiver,
- elements,
- r3,
- r4,
- r5,
- r6,
- &transition_double_elements);
- __ Ret();
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- r4,
- &slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- &slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- &slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
-
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(r1, r2, r0);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = r1;
- Register value = r0;
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
- __ b(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
-
- GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(),
- 1, r4, r5);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(r1, r2, r0);
-
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(strict_mode)));
- __ Push(r1, r0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address());
- }
-}
-
-
-void PatchInlinedSmiCode(Address address) {
- Address cmp_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a cmp rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(cmp_instruction_address);
- if (!Assembler::IsCmpImmediate(instr)) {
- return;
- }
-
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int delta = Assembler::GetCmpImmediateRawImmediate(instr);
- delta +=
- Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
- // If the delta is 0 the instruction is cmp r0, #0 which also signals that
- // nothing was inlined.
- if (delta == 0) {
- return;
- }
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
- address, cmp_instruction_address, delta);
- }
-#endif
-
- Address patch_address =
- cmp_instruction_address - delta * Instruction::kInstrSize;
- Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
- ASSERT(Assembler::IsCmpRegister(instr_at_patch));
- ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
- Assembler::GetRm(instr_at_patch).code());
- ASSERT(Assembler::IsBranch(branch_instr));
- if (Assembler::GetCondition(branch_instr) == eq) {
- // This is patching a "jump if not smi" site to be active.
- // Changing
- // cmp rx, rx
- // b eq, <target>
- // to
- // tst rx, #kSmiTagMask
- // b ne, <target>
- CodePatcher patcher(patch_address, 2);
- Register reg = Assembler::GetRn(instr_at_patch);
- patcher.masm()->tst(reg, Operand(kSmiTagMask));
- patcher.EmitCondition(ne);
- } else {
- ASSERT(Assembler::GetCondition(branch_instr) == ne);
- // This is patching a "jump if smi" site to be active.
- // Changing
- // cmp rx, rx
- // b ne, <target>
- // to
- // tst rx, #kSmiTagMask
- // b eq, <target>
- CodePatcher patcher(patch_address, 2);
- Register reg = Assembler::GetRn(instr_at_patch);
- patcher.masm()->tst(reg, Operand(kSmiTagMask));
- patcher.EmitCondition(eq);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
new file mode 100644
index 0000000..9bbc1f5
--- /dev/null
+++ b/src/arm/interface-descriptors-arm.cc
@@ -0,0 +1,323 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return r1; }
+const Register LoadDescriptor::NameRegister() { return r2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return r3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return r1; }
+const Register StoreDescriptor::NameRegister() { return r2; }
+const Register StoreDescriptor::ValueRegister() { return r0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r3; }
+
+
+const Register InstanceofDescriptor::left() { return r0; }
+const Register InstanceofDescriptor::right() { return r1; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return r1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return r0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return r2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return r2; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+ return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3, r2, r1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3, r2, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1, r3};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments
+ // r1 : the function to call
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {cp, r0, r1, r2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0, r1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // r0 -- number of arguments
+ // r1 -- function
+ // r2 -- allocation site with elements kind
+ Register registers[] = {cp, r1, r2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, r1, r2, r0};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // r0 -- number of arguments
+ // r1 -- constructor function
+ Register registers[] = {cp, r1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, r1, r0};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor noInlineDescriptor =
+ PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r2, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &noInlineDescriptor);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor noInlineDescriptor =
+ PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r2, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &noInlineDescriptor);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r0, // receiver
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r1, // JSFunction
+ r0, // actual number of arguments
+ r2, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r0, // callee
+ r4, // call_data
+ r2, // holder
+ r1, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/interface-descriptors-arm.h b/src/arm/interface-descriptors-arm.h
new file mode 100644
index 0000000..6201adc
--- /dev/null
+++ b/src/arm/interface-descriptors-arm.h
@@ -0,0 +1,26 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
+#define V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformInterfaceDescriptor {
+ public:
+ explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) {}
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index c3dd1cb..13a46a2 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1,35 +1,12 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium-allocator-inl.h"
-#include "arm/lithium-arm.h"
-#include "arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-codegen-arm.h"
+#include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
namespace v8 {
namespace internal {
@@ -41,54 +18,28 @@
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
#ifdef DEBUG
void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as temporaries and
// outputs because all registers are blocked by the calling convention.
// Inputs operands must use a fixed register or use-at-start policy or
// a non-register policy.
- ASSERT(Output() == NULL ||
+ DCHECK(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
+ DCHECK(operand->HasFixedPolicy() ||
operand->IsUsedAtStart());
}
for (TempIterator it(this); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -108,22 +59,21 @@
}
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- for (int i = 0; i < inputs_.length(); i++) {
+ for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- inputs_[i]->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- for (int i = 0; i < results_.length(); i++) {
- if (i > 0) stream->Add(" ");
- results_[i]->PrintTo(stream);
- }
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
}
@@ -182,6 +132,7 @@
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -192,6 +143,11 @@
}
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -199,81 +155,72 @@
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- InputAt(0)->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
+ right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
- InputAt(0)->PrintTo(stream);
- InputAt(1)->PrintTo(stream);
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@@ -283,79 +230,85 @@
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
stream->Add("#%d / ", arity());
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ function()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[r2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
-
stream->Add(" length ");
length()->PrintTo(stream);
-
stream->Add(" index ");
index()->PrintTo(stream);
}
@@ -363,9 +316,9 @@
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
+ OStringStream os;
+ os << hydrogen()->access() << " <- ";
+ stream->Add(os.c_str());
value()->PrintTo(stream);
}
@@ -373,27 +326,41 @@
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", base_offset());
+ } else {
+ stream->Add("]");
+ }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", base_offset());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ DCHECK(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
}
@@ -412,148 +379,38 @@
}
-LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) {
-}
-
-
-int LChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index);
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
} else {
- return LStackSlot::Create(index);
+ DCHECK(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
}
}
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
+LPlatformChunk* LChunkBuilder::Build() {
+ DCHECK(is_unused());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
-}
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap);
- index = instructions_.length();
- instructions_.Add(instr);
- } else {
- index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new(zone()) LChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -566,21 +423,6 @@
}
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
- status_ = ABORTED;
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -659,6 +501,11 @@
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -676,8 +523,7 @@
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -685,40 +531,35 @@
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
+ LTemplateResultInstruction<1>* instr, int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
+ LTemplateResultInstruction<1>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -726,44 +567,24 @@
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ &argument_index_accumulator,
+ &objects_to_materialize));
return instr;
}
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id) {
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = ast_id;
- return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- instr = SetInstructionPendingDeoptimizationEnvironment(
- instr, sim->ast_id());
- }
-
// If instruction does not have side-effects lazy deoptimization
// after the call will try to deoptimize to the point before the call.
// Thus we still need to attach environment to this call even if
@@ -773,21 +594,17 @@
!hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
}
return instr;
}
-LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- instr->MarkAsSaveDoubles();
- return instr;
-}
-
-
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_));
+ DCHECK(!instr->HasPointerMap());
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -795,22 +612,39 @@
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
return operand;
}
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
+ DCHECK(operand->HasFixedPolicy());
return operand;
}
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
+ DCHECK(operand->HasFixedPolicy());
return operand;
}
@@ -820,8 +654,14 @@
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
}
@@ -832,85 +672,83 @@
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
+ }
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
- bool does_deopt = false;
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
- }
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = UseFixedDouble(instr->right(), d1);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
+ DCHECK(left->representation().IsTagged());
+ DCHECK(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
+ DCHECK(is_building());
current_block_ = block;
next_block_ = next_block;
if (block->IsStartBlock()) {
@@ -919,13 +757,13 @@
} else if (block->predecessors()->length() == 1) {
// We have a single predecessor => copy environment and outgoing
// argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
+ DCHECK(block->phis()->length() == 0);
HBasicBlock* pred = block->predecessors()->at(0);
HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
+ DCHECK(last_environment != NULL);
// Only copy the environment, if it is later used again.
if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
+ DCHECK(pred->end()->FirstSuccessor() == block);
} else {
if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
@@ -933,7 +771,7 @@
}
}
block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
+ DCHECK(pred->argument_count() >= 0);
argument_count_ = pred->argument_count();
} else {
// We are at a state join => process phis.
@@ -942,11 +780,15 @@
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
}
for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
}
block->UpdateEnvironment(last_environment);
// Pick up the outgoing argument count of one of the predecessors.
@@ -975,95 +817,142 @@
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ DCHECK(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+ }
+
+ argument_count_ += current->argument_delta();
+ DCHECK(argument_count_ >= 0);
if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- instr->set_hydrogen_value(current);
- chunk_->AddInstruction(instr, current_block_);
+ AddInstruction(instr, current);
}
+
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
- if (hydrogen_env == NULL) return NULL;
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer);
- int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
}
- result->AddValue(op, value->representation());
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ DCHECK(fixed == 0 || used_at_start == 0);
}
+#endif
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
}
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
- return result;
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
}
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
-
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment.
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
- return AssignEnvironment(result);
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
- return result;
+ return branch;
}
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
return new(zone()) LCmpMapAndBranch(value, temp);
@@ -1071,20 +960,23 @@
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new(zone()) LArgumentsLength(value));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsElements);
}
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
- UseFixed(instr->right(), r1));
+ new(zone()) LInstanceOf(context, UseFixed(instr->left(), r0),
+ UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1092,8 +984,10 @@
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
- FixedTemp(r4));
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), r0),
+ FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1102,7 +996,7 @@
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
}
@@ -1119,10 +1013,30 @@
}
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = Use(instr->argument());
- return new(zone()) LPushArgument(argument);
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = Use(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1134,125 +1048,208 @@
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
-}
+ if (instr->HasNoUses()) return NULL;
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
+ }
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
+ return DefineAsRegister(new(zone()) LContext);
}
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), r1);
+
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, r0), instr);
}
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ CallInterfaceDescriptor descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, r0), instr);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
- } else if (op == kMathPowHalf) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LOperand* temp = FixedTemp(d3);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- return DefineFixedDouble(result, d2);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineAsRegister(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- default:
- UNREACHABLE();
- return NULL;
- }
+ switch (instr->op()) {
+ case kMathFloor:
+ return DoMathFloor(instr);
+ case kMathRound:
+ return DoMathRound(instr);
+ case kMathFround:
+ return DoMathFround(instr);
+ case kMathAbs:
+ return DoMathAbs(instr);
+ case kMathLog:
+ return DoMathLog(instr);
+ case kMathExp:
+ return DoMathExp(instr);
+ case kMathSqrt:
+ return DoMathSqrt(instr);
+ case kMathPowHalf:
+ return DoMathPowHalf(instr);
+ case kMathClz32:
+ return DoMathClz32(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
- LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = TempDoubleRegister();
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFround* result = new (zone()) LMathFround(input);
+ return DefineAsRegister(result);
}
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LMathAbs(context, input));
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = TempDoubleRegister();
+ LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input);
+ return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* constructor = UseFixed(instr->constructor(), r1);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
- instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, r0), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
}
@@ -1272,119 +1269,284 @@
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LBitNotI(value));
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp =
+ CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ (!CpuFeatures::IsSupported(SUDIV) ||
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- // TODO(1042) The fixed register allocation
- // is needed because we call TypeRecordingBinaryOpStub from
- // the generated code, which requires registers r0
- // and r1 to be used. We should remove that
- // when we provide a native implementation.
- LOperand* dividend = UseFixed(instr->left(), r0);
- LOperand* divisor = UseFixed(instr->right(), r1);
- return AssignEnvironment(AssignPointerMap(
- DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
}
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
- LModI* mod;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
- } else {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- mod = new(zone()) LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(d10),
- FixedTemp(d11));
- }
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
- return AssignEnvironment(DefineAsRegister(mod));
- } else {
- return DefineAsRegister(mod);
- }
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp =
+ CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+ LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = UseFixedDouble(instr->right(), d2);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp =
+ CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+ LOperand* temp2 =
+ CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor, temp, temp2));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
+ } else {
+ return DoModI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
}
}
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
- temp = TempRegister();
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ int32_t constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
} else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
}
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ LMulI* mul = new(zone()) LMulI(left_op, right_op);
+ if (can_overflow || bailout_on_minus_zero) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
+ if (instr->HasOneUse() && (instr->uses().value()->IsAdd() ||
+ instr->uses().value()->IsSub())) {
+ HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
+ if (use->IsAdd() && instr == use->left()) {
+ // This mul is the lhs of an add. The add and mul will be folded into a
+ // multiply-add in DoAdd.
+ return NULL;
+ }
+ if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) {
+ // This mul is the rhs of an add, where the lhs is not another mul.
+ // The add and mul will be folded into a multiply-add in DoAdd.
+ return NULL;
+ }
+ if (instr == use->right() && use->IsSub()) {
+ // This mul is the rhs of a sub. The sub and mul will be folded into a
+ // multiply-sub in DoSub.
+ return NULL;
+ }
+ }
+
+ return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1392,9 +1554,15 @@
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+ if (instr->left()->IsConstant()) {
+ // If lhs is constant, do reverse subtraction instead.
+ return DoRSub(instr);
+ }
+
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1404,6 +1572,10 @@
}
return result;
} else if (instr->representation().IsDouble()) {
+ if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
+ return DoMultiplySub(instr->left(), HMul::cast(instr->right()));
+ }
+
return DoArithmeticD(Token::SUB, instr);
} else {
return DoArithmeticT(Token::SUB, instr);
@@ -1411,79 +1583,146 @@
}
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+ // Note: The lhs of the subtraction becomes the rhs of the
+ // reverse-subtraction.
+ LOperand* left = UseRegisterAtStart(instr->right());
+ LOperand* right = UseOrConstantAtStart(instr->left());
+ LRSubI* rsb = new(zone()) LRSubI(left, right);
+ LInstruction* result = DefineAsRegister(rsb);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+ LOperand* addend_op = UseRegisterAtStart(addend);
+ return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+ multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+ LOperand* minuend_op = UseRegisterAtStart(minuend);
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+
+ return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op,
+ multiplier_op,
+ multiplicand_op));
+}
+
+
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
+ } else if (instr->representation().IsExternal()) {
+ DCHECK(instr->left()->representation().IsExternal());
+ DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ return result;
} else if (instr->representation().IsDouble()) {
+ if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
+ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+ }
+
+ if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
+ DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse());
+ return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+ }
+
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), d2) :
- UseFixed(instr->right(), r2);
+ DCHECK(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right =
+ exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d1)
+ : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d3),
+ return MarkAsCall(DefineFixedDouble(result, d2),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, d7), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
- LCmpT* result = new(zone()) LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(r));
+ DCHECK(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
+ DCHECK(r.IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
@@ -1496,21 +1735,23 @@
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpConstantEqAndBranch(value);
+ return new(zone()) LCmpHoleAndBranch(value);
}
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
return new(zone()) LIsObjectAndBranch(value, temp);
@@ -1518,7 +1759,7 @@
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
return new(zone()) LIsStringAndBranch(value, temp);
@@ -1526,14 +1767,14 @@
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
return new(zone()) LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
}
@@ -1541,19 +1782,20 @@
LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
+ new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LHasInstanceTypeAndBranch(value);
}
@@ -1561,7 +1803,7 @@
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
@@ -1570,7 +1812,7 @@
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
return new(zone()) LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(instr->value()));
}
@@ -1578,49 +1820,62 @@
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return new(zone()) LClassOfTestAndBranch(value, TempRegister());
}
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineAsRegister(result);
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
}
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
- LDateField* result = new LDateField(object, FixedTemp(r1), instr->index());
- return MarkAsCall(DefineFixed(result, r0), instr);
+ LDateField* result =
+ new(zone()) LDateField(object, FixedTemp(r1), instr->index());
+ return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ return new(zone()) LSeqStringSetChar(context, string, index, value);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterAtStart(instr->index());
- LOperand* length = UseRegister(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseRegisterOrConstantAtStart(instr->length())
+ : UseRegisterAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
}
@@ -1631,12 +1886,6 @@
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -1653,66 +1902,94 @@
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ HValue* val = instr->value();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
if (from.IsTagged()) {
if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegisterAtStart(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
- LInstruction* res = NULL;
- if (!needs_check) {
- res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
- } else {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11)
- : NULL;
- res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
- temp1,
- temp2,
- temp3));
- res = AssignEnvironment(res);
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
- return res;
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+ } else {
+ DCHECK(to.IsInteger32());
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempDoubleRegister();
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ }
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
-
- // Make sure that the temp and result_temp registers are
- // different.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- Define(result, result_temp);
- return AssignPointerMap(result);
+ return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
- return AssignEnvironment(DefineAsRegister(res));
+ DCHECK(to.IsInteger32());
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegisterAtStart(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegisterAtStart(val);
return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
}
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
} else {
- ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+ DCHECK(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+ } else {
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+ }
}
}
UNREACHABLE();
@@ -1720,9 +1997,19 @@
}
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
}
@@ -1733,30 +2020,21 @@
}
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(result);
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckMap(value);
- return AssignEnvironment(result);
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
+ }
+ return result;
}
@@ -1765,30 +2043,54 @@
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
- ASSERT(input_rep.IsTagged());
+ DCHECK(input_rep.IsSmiOrTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve d1 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
+ LClampTToUint8* result =
+ new(zone()) LClampTToUint8(reg, TempDoubleRegister());
return AssignEnvironment(DefineAsRegister(result));
}
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ DCHECK(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), r0));
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), r0), context,
+ parameter_count);
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
return DefineAsRegister(new(zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
} else if (r.IsTagged()) {
return DefineAsRegister(new(zone()) LConstantT);
} else {
@@ -1807,8 +2109,15 @@
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ }
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object, vector);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1823,20 +2132,14 @@
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), r1);
- LOperand* value = UseFixed(instr->value(), r0);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1851,36 +2154,30 @@
value = UseRegister(instr->value());
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), r0);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, r0), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ }
+
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), r0);
return MarkAsCall(result, instr);
}
@@ -1892,264 +2189,255 @@
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* elements = UseTempRegister(instr->elements());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ DCHECK(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
- AssignEnvironment(load_instr) : load_instr;
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LInstruction* result = NULL;
+
+ if (!instr->is_typed_elements()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseRegister(instr->elements());
+ } else {
+ DCHECK(instr->representation().IsSmiOrTagged());
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ } else {
+ DCHECK(
+ (instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ }
+
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), r1);
- LOperand* key = UseFixed(instr->key(), r0);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ }
LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+ r0);
return MarkAsCall(result, instr);
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_typed_elements()) {
+ DCHECK(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* key = NULL;
+ LOperand* val = NULL;
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegister(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ DCHECK(instr->value()->representation().IsSmiOrTagged());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ }
+ return new(zone()) LStoreKeyed(object, key, val);
+ }
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
+ DCHECK(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstant(instr->key());
-
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ DCHECK((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+ LOperand* val = UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), r2);
- LOperand* key = UseFixed(instr->key(), r1);
- LOperand* val = UseFixed(instr->value(), r0);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->object()->representation().IsTagged());
+ DCHECK(instr->key()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
}
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
- instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
- return DefineSameAsFirst(result);
+ new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+ return result;
} else {
LOperand* object = UseFixed(instr->object(), r0);
- LOperand* fixed_object_reg = FixedTemp(r2);
- LOperand* new_map_reg = FixedTemp(r3);
+ LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ new(zone()) LTransitionElementsKind(object, context, NULL);
+ return MarkAsCall(result, instr);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
+ bool needs_write_barrier_for_map = instr->has_transition() &&
+ instr->NeedsWriteBarrierForMap();
- LOperand* obj = needs_write_barrier
- ? UseTempRegister(instr->object())
- : UseRegisterAtStart(instr->object());
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = is_in_object
+ ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map
+ ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val;
+ if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ } else if (instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
- return new(zone()) LStoreNamedField(obj, val);
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), r1);
- LOperand* val = UseFixed(instr->value(), r0);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
- instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
+ instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
+ return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister());
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), r0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* key = UseFixed(instr->key(), r1);
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), r0), instr);
}
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2157,24 +2445,42 @@
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ DCHECK(info()->IsStub());
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
}
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Retry(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), r0), instr);
}
@@ -2187,13 +2493,20 @@
}
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = UseRegister(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ info()->MarkAsRequiresFrame();
+ LOperand* args = UseRegister(instr->arguments());
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2205,13 +2518,14 @@
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+ return new(zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
}
@@ -2222,58 +2536,39 @@
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new(zone()) LLazyBailout;
- result = AssignEnvironment(result);
- instruction_pending_deoptimization_environment_->
- set_deoptimization_environment(result->environment());
- ClearInstructionPendingDeoptimizationEnvironment();
- return result;
- }
-
+ instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
}
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
- ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+ DCHECK(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->is_construct());
- if (instr->arguments() != NULL) {
- inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
}
+ inner->BindContext(instr->closure_context());
+ inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2281,32 +2576,35 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LDrop(argument_count);
+ DCHECK(instr->argument_delta() == -argument_count);
+ }
+
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* object = UseRegisterAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return pop;
}
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->enumerable(), r0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
}
@@ -2319,9 +2617,26 @@
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
}
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
} } // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 62cde6e..f9feaf6 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_LITHIUM_ARM_H_
#define V8_ARM_LITHIUM_ARM_H_
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
+#include "src/hydrogen.h"
+#include "src/lithium.h"
+#include "src/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -40,155 +17,166 @@
// Forward declarations.
class LCodeGen;
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(AllocateObject) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckNonSmi) \
- V(CheckMap) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeclareGlobals) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(In) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
- V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(Random) \
- V(RegExpLiteral) \
- V(Return) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
- V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringLength) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(Throw) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(Allocate) \
+ V(AllocateBlockContext) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
+ V(CallFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CheckInstanceType) \
+ V(CheckNonSmi) \
+ V(CheckMaps) \
+ V(CheckMapValue) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadRoot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyed) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathFround) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulI) \
+ V(MultiplyAddD) \
+ V(MultiplySubD) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreFrameContext) \
+ V(StoreGlobalCell) \
+ V(StoreKeyed) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(RSubI) \
+ V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
+ virtual const char* Mnemonic() const FINAL OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ DCHECK(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -198,20 +186,21 @@
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false),
- is_save_doubles_(false) { }
- virtual ~LInstruction() { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ }
+
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) = 0;
- virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
enum Opcode {
// Declare a unique enum value for each instruction.
@@ -235,6 +224,9 @@
virtual bool IsControl() const { return false; }
+ // Try deleting this instruction if possible.
+ virtual bool TryDelete() { return false; }
+
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
bool HasEnvironment() const { return environment_ != NULL; }
@@ -246,45 +238,67 @@
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- void set_deoptimization_environment(LEnvironment* env) {
- deoptimization_environment_.set(env);
- }
- LEnvironment* deoptimization_environment() const {
- return deoptimization_environment_.get();
- }
- bool HasDeoptimizationEnvironment() const {
- return deoptimization_environment_.is_set();
- }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
- void MarkAsCall() { is_call_ = true; }
- void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
- bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
+ virtual LOperand* result() const = 0;
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
#ifdef DEBUG
void VerifyCall();
#endif
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
private:
+ // Iterator support.
+ friend class InputIterator;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- SetOncePointer<LEnvironment> deoptimization_environment_;
- bool is_call_;
- bool is_save_doubles_;
+ int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const FINAL OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
};
@@ -292,31 +306,22 @@
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- int InputCount() { return I; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
protected:
- EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() FINAL OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() FINAL OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -327,10 +332,10 @@
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
+ DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
}
@@ -347,8 +352,10 @@
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
return parallel_moves_[pos];
}
@@ -362,30 +369,35 @@
};
-class LInstructionGap: public LGap {
+class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+ return !IsRedundant();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ virtual bool IsControl() const OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -401,23 +413,45 @@
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ LDummy() {}
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
-class LLabel: public LGap {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ virtual bool IsControl() const OVERRIDE { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
Label* label() { return &label_; }
LLabel* replacement() const { return replacement_; }
void set_replacement(LLabel* label) { replacement_ = label; }
@@ -429,47 +463,100 @@
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
public:
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
- virtual bool IsControl() const { return true; }
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const FINAL OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
+
+ Label* false_label_;
+ Label* true_label_;
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -477,13 +564,14 @@
}
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -504,7 +592,7 @@
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -518,292 +606,605 @@
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
}
+ LOperand* elements() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- LArgumentsElements() { }
-
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 3> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- // Used when the right hand is a constant power of 2.
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
}
- // Used for the standard case.
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI FINAL : public LTemplateInstruction<1, 2, 2> {
+ public:
+ LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
-class LDivI: public LTemplateInstruction<1, 2, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDivI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
}
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
temps_[0] = temp;
}
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = minuend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* minuend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LUnaryMathOperation(LOperand* value, LOperand* temp) {
+ explicit LMathFloor(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
};
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
public:
- explicit LIsNilAndBranch(LOperand* value) {
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -811,33 +1212,53 @@
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
temps_[0] = temp;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -848,16 +1269,20 @@
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
@@ -865,7 +1290,7 @@
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -874,7 +1299,8 @@
}
Token::Value op() const { return op_; }
-
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
@@ -885,19 +1311,37 @@
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LRSubI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LRSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -906,7 +1350,16 @@
};
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -915,187 +1368,173 @@
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LJSArrayLength(LOperand* value) {
+ explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
};
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ LOperand* date() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
private:
Smi* index_;
};
-class LSetDateField: public LTemplateInstruction<1, 2, 1> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSetDateField(LOperand* date, LOperand* value, LOperand* temp, int index)
- : index_(index) {
- inputs_[0] = date;
- inputs_[1] = value;
- temps_[0] = temp;
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-set-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
- int index() const { return index_; }
-
- private:
- int index_;
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
};
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRandom(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1104,497 +1543,582 @@
}
Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 1, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value) {
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
}
+ LOperand* value() { return inputs_[0]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ DCHECK(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ temps_[0] = vector;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
- }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
+ bool is_external() const {
+ return hydrogen()->is_external();
+ }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+ LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ temps_[0] = vector;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
- LOperand* global_object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
- LOperand* value() { return inputs_[0]; }
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- LOperand* global_object() { return InputAt(0); }
- Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(1); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LOuterContext(LOperand* context) {
+ explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
}
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+ LOperand* context() { return inputs_[0]; }
- LOperand* context() { return InputAt(0); }
-};
-
-
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
- public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+ LOperand* function() { return inputs_[0]; }
- LOperand* context() { return InputAt(0); }
-};
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-
- LOperand* global() { return InputAt(0); }
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ CallInterfaceDescriptor descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+};
+
+
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- LOperand* function() { return inputs_[0]; }
-
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
- }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- LOperand* function() { return inputs_[0]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LNumberTagI(LOperand* value) {
+ explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1603,343 +2127,341 @@
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* temp,
+ LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
+ LOperand* value() { return inputs_[0]; }
bool needs_check() const { return needs_check_; }
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
private:
bool needs_check_;
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
public:
- LStoreNamedField(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = val;
+ bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
}
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ bool NeedsCanonicalization() {
+ if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+ hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+ return false;
+ }
+ return hydrogen()->NeedsCanonicalization();
+ }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
- LOperand* new_map_temp,
- LOperand* temp_reg) {
+ LOperand* context,
+ LOperand* new_map_temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
- temps_[1] = temp_reg;
}
+ LOperand* context() { return inputs_[1]; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_reg() { return temps_[0]; }
- LOperand* temp_reg() { return temps_[1]; }
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
+};
+
+
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
- LOperand* char_code() { return inputs_[0]; }
};
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
- LOperand* value() { return InputAt(0); }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
};
-class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMap(LOperand* value) {
+ explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
- DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
- public:
- LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LClampDToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
+ explicit LClampDToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
}
LOperand* unclamped() { return inputs_[0]; }
@@ -1948,10 +2470,10 @@
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
}
LOperand* unclamped() { return inputs_[0]; }
@@ -1960,154 +2482,171 @@
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
-class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LAllocateObject(LOperand* temp1, LOperand* temp2) {
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
+ inputs_[1] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+ LOperand* context() { return inputs_[0]; }
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
+ LOsrEntry() {}
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
}
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+ LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2118,33 +2657,21 @@
};
-class LIn: public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
inputs_[1] = object;
}
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2160,7 +2687,7 @@
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2174,7 +2701,7 @@
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2188,112 +2715,89 @@
};
-class LChunkBuilder;
-class LChunk: public ZoneObject {
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph);
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
}
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
+ LOperand* context() { return inputs_[0]; }
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
- }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
};
-class LChunkBuilder BASE_EMBEDDED {
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
+
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- zone_(graph->isolate()->zone()),
- status_(UNUSED),
+ : LChunkBuilderBase(info, graph),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+ allocator_(allocator) {}
// Build the sequence for the graph.
- LChunk* Build();
+ LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+ LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+ LInstruction* DoRSub(HSub* instr);
+
+ static bool HasMagicNumberForDivisor(int32_t divisor);
+
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathFround(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* format, ...);
-
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -2330,33 +2834,31 @@
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LUnallocated* TempDoubleRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@@ -2369,37 +2871,21 @@
LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LInstruction* MarkAsSaveDoubles(LInstruction* instr);
-
- LInstruction* SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id);
- void ClearInstructionPendingDeoptimizationEnvironment();
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
- LChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Zone* zone_;
- Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 82b80a2..a06ed73 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1,42 +1,23 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "arm/lithium-codegen-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
+#include "src/arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -44,11 +25,11 @@
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const { }
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -62,132 +43,158 @@
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
+ LPhase phase("Z_Code generation", chunk());
+ DCHECK(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope1(VFP3);
- CpuFeatures::Scope scope2(ARMv7);
-
- CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
}
void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
+ DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
}
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
+void LCodeGen::SaveCallerDoubles() {
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
}
- status_ = ABORTED;
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
+void LCodeGen::RestoreCallerDoubles() {
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
}
bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
+ DCHECK(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
#endif
- // r1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
+ // r1: Callee's JS function.
+ // cp: Callee's context.
+ // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ cmp(r5, Operand(0));
- __ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+ __ ldr(r2, MemOperand(sp, receiver_offset));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &ok);
+
+ __ ldr(r2, GlobalObjectOperand());
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
+
+ __ str(r2, MemOperand(sp, receiver_offset));
+
+ __ bind(&ok);
+ }
}
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ mov(r0, Operand(slots));
- __ mov(r2, Operand(kSlotsZapValue));
+ __ sub(sp, sp, Operand(slots * kPointerSize));
+ __ push(r0);
+ __ push(r1);
+ __ add(r0, sp, Operand(slots * kPointerSize));
+ __ mov(r1, Operand(kSlotsZapValue));
Label loop;
__ bind(&loop);
- __ push(r2);
- __ sub(r0, r0, Operand(1), SetCC);
+ __ sub(r0, r0, Operand(kPointerSize));
+ __ str(r1, MemOperand(r0, 2 * kPointerSize));
+ __ cmp(r0, sp);
__ b(ne, &loop);
+ __ pop(r1);
+ __ pop(r0);
} else {
__ sub(sp, sp, Operand(slots * kPointerSize));
}
}
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is in r1.
- __ push(r1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
+ __ push(r1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ mov(cp, r0);
+ __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -201,53 +208,96 @@
MemOperand target = ContextOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier. This clobbers r3 and r0.
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp,
+ target.offset(),
+ r0,
+ r3,
+ GetLinkRegisterState(),
+ kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
Comment(";;; End allocate local context");
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
- if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- instr->CompileToNative(this);
- }
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ DCHECK(slots >= 0);
+ __ sub(sp, sp, Operand(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
}
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
}
bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
+ DCHECK(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- Comment(";;; Deferred code @%d: %s.",
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ DCHECK(!frame_is_built_);
+ DCHECK(info()->IsStub());
+ frame_is_built_ = true;
+ __ PushFixedFrame();
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ Comment(";;; Deferred code");
+ }
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ DCHECK(frame_is_built_);
+ __ pop(ip);
+ __ PopFixedFrame();
+ frame_is_built_ = false;
+ }
__ jmp(code->exit());
}
}
@@ -260,7 +310,7 @@
}
-bool LCodeGen::GenerateDeoptJumpTable() {
+bool LCodeGen::GenerateJumpTable() {
// Check that the jump table is accessible from everywhere in the function
// code, i.e. that offsets to the table can be encoded in the 24bit signed
// immediate of a branch instruction.
@@ -269,23 +319,87 @@
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 2)) {
- Abort("Generated code is too large");
+ jump_table_.length() * 7)) {
+ Abort(kGeneratedCodeIsTooLarge);
}
- // Block the constant pool emission during the jump table emission.
- __ BlockConstPoolFor(deopt_jump_table_.length());
- __ RecordComment("[ Deoptimisation jump table");
- Label table_start;
- __ bind(&table_start);
- for (int i = 0; i < deopt_jump_table_.length(); i++) {
- __ bind(&deopt_jump_table_[i].label);
- __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
- __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+ if (jump_table_.length() > 0) {
+ Label needs_frame, call_deopt_entry;
+
+ Comment(";;; -------------------- Jump table --------------------");
+ Address base = jump_table_[0].address;
+
+ Register entry_offset = scratch0();
+
+ int length = jump_table_.length();
+ for (int i = 0; i < length; i++) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+
+ DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
+
+ // Second-level deopt table entries are contiguous and small, so instead
+ // of loading the full, absolute address of each one, load an immediate
+ // offset which will be added to the base address later.
+ __ mov(entry_offset, Operand(entry - base));
+
+ if (table_entry->needs_frame) {
+ DCHECK(!info()->saves_caller_doubles());
+ if (needs_frame.is_bound()) {
+ __ b(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ Comment(";;; call deopt with frame");
+ __ PushFixedFrame();
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(ip);
+ __ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ bind(&call_deopt_entry);
+ // Add the base address to the offset previously loaded in
+ // entry_offset.
+ __ add(entry_offset, entry_offset,
+ Operand(ExternalReference::ForDeoptEntry(base)));
+ __ blx(entry_offset);
+ }
+
+ masm()->CheckConstPool(false, false);
+ } else {
+ // The last entry can fall through into `call_deopt_entry`, avoiding a
+ // branch.
+ bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
+
+ if (need_branch) __ b(&call_deopt_entry);
+
+ masm()->CheckConstPool(false, !need_branch);
+ }
+ }
+
+ if (!call_deopt_entry.is_bound()) {
+ Comment(";;; call deopt");
+ __ bind(&call_deopt_entry);
+
+ if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+
+ // Add the base address to the offset previously loaded in entry_offset.
+ __ add(entry_offset, entry_offset,
+ Operand(ExternalReference::ForDeoptEntry(base)));
+ __ blx(entry_offset);
+ }
}
- ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
- deopt_jump_table_.length() * 2);
- __ RecordComment("]");
+
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
// The deoptimization jump table is the last part of the instruction
// sequence. Mark the generated code as done unless we bailed out.
@@ -295,7 +409,7 @@
bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
+ DCHECK(is_done());
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -306,13 +420,13 @@
}
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
+ return DwVfpRegister::FromAllocationIndex(index);
}
Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
+ DCHECK(op->IsRegister());
return ToRegister(op->index());
}
@@ -322,23 +436,20 @@
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
+ DCHECK(literal->IsNumber());
__ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
- Abort("EmitLoadRegister: Unsupported double immediate.");
+ Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
- ASSERT(r.IsTagged());
- if (literal->IsSmi()) {
- __ mov(scratch, Operand(literal));
- } else {
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
- }
+ DCHECK(r.IsSmiOrTagged());
+ __ Move(scratch, literal);
}
return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
__ ldr(scratch, ToMemOperand(op));
return scratch;
}
@@ -347,33 +458,34 @@
}
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
+DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ DCHECK(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch) {
+DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch) {
if (op->IsDoubleRegister()) {
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
+ DCHECK(literal->IsNumber());
__ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
__ vmov(flt_scratch, ip);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
return dbl_scratch;
} else if (r.IsDouble()) {
- Abort("unsupported double immediate");
+ Abort(kUnsupportedDoubleImmediate);
} else if (r.IsTagged()) {
- Abort("unsupported tagged immediate");
+ Abort(kUnsupportedTaggedImmediate);
}
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
// TODO(regis): Why is vldr not taking a MemOperand?
// __ vldr(dbl_scratch, ToMemOperand(op));
MemOperand mem_op = ToMemOperand(op);
@@ -386,84 +498,107 @@
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
+ HConstant* constant = chunk_->LookupConstant(op);
+ DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
}
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ DCHECK(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- return value->Number();
+ HConstant* constant = chunk_->LookupConstant(op);
+ DCHECK(constant->HasDoubleValue());
+ return constant->DoubleValue();
}
Operand LCodeGen::ToOperand(LOperand* op) {
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- return Operand(static_cast<int32_t>(literal->Number()));
+ if (r.IsSmi()) {
+ DCHECK(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ DCHECK(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
- Abort("ToOperand Unsupported double immediate.");
+ Abort(kToOperandUnsupportedDoubleImmediate);
}
- ASSERT(r.IsTagged());
- return Operand(literal);
+ DCHECK(r.IsTagged());
+ return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
- Abort("ToOperand IsDoubleRegister unimplemented");
- return Operand(0);
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand::Zero();
}
// Stack slots not implemented, use ToMemOperand instead.
UNREACHABLE();
- return Operand(0);
+ return Operand::Zero();
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+ DCHECK(index < 0);
+ return -(index + 1) * kPointerSize;
}
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- ASSERT(!op->IsRegister());
- ASSERT(!op->IsDoubleRegister());
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize);
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
} else {
- // Incoming parameter. Skip the return address.
- return MemOperand(fp, -(index - 1) * kPointerSize);
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
}
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, context,
- // and the first word of the double in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+ DCHECK(op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
} else {
- // Incoming parameter. Skip the return address and the first word of
- // the double.
- return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(
+ sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
}
}
@@ -473,12 +608,17 @@
if (environment == NULL) return;
// The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
+ int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -486,63 +626,91 @@
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
+ case JS_GETTER:
+ DCHECK(translation_size == 1);
+ DCHECK(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ DCHECK(translation_size == 2);
+ DCHECK(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
- default:
- UNREACHABLE();
}
+
+ int object_index = 0;
+ int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false);
- }
- }
-
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
}
}
-void LCodeGen::AddToTranslation(Translation* translation,
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
- bool is_tagged) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject();
- } else if (op->IsStackSlot()) {
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@@ -550,8 +718,8 @@
DoubleRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -559,21 +727,34 @@
}
+int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
+ int size = masm()->CallSize(code, mode);
+ if (code->kind() == Code::BINARY_OP_IC ||
+ code->kind() == Code::COMPARE_IC) {
+ size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
+ }
+ return size;
+}
+
+
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+ LInstruction* instr,
+ TargetAddressStorageMode storage_mode) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
}
void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
- SafepointMode safepoint_mode) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ Call(code, mode);
+ SafepointMode safepoint_mode,
+ TargetAddressStorageMode storage_mode) {
+ DCHECK(instr != NULL);
+ // Block literal pool emission to ensure nop indicating no inlined smi code
+ // is in the correct position.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
+ __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
@@ -587,20 +768,36 @@
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
+ DCHECK(instr != NULL);
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ ldr(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -609,6 +806,7 @@
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -631,76 +829,139 @@
++jsframe_count;
}
}
- Translation translation(&translations_, frame_count, jsframe_count);
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment);
+ deoptimizations_.Add(environment, zone());
}
}
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail,
+ Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
+ DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ DCHECK(info()->IsOptimizing() || info()->IsStub());
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort("bailout was not prepared");
+ Abort(kBailoutWasNotPrepared);
return;
}
- ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Register scratch = scratch0();
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
- if (FLAG_deopt_every_n_times == 1 &&
- info_->shared_info()->opt_count() == id) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- return;
+ // Store the condition on the stack if necessary
+ if (condition != al) {
+ __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
+ __ mov(scratch, Operand(1), LeaveCC, condition);
+ __ push(scratch);
+ }
+
+ __ push(r1);
+ __ mov(scratch, Operand(count));
+ __ ldr(r1, MemOperand(scratch));
+ __ sub(r1, r1, Operand(1), SetCC);
+ __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
+ __ str(r1, MemOperand(scratch));
+ __ pop(r1);
+
+ if (condition != al) {
+ // Clean up the stack before the deoptimizer call
+ __ pop(scratch);
+ }
+
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
+
+ // 'Restore' the condition in a slightly hacky way. (It would be better
+ // to use 'msr' and 'mrs' instructions here, but they are not supported by
+ // our ARM simulator).
+ if (condition != al) {
+ condition = ne;
+ __ cmp(scratch, Operand::Zero());
+ }
}
- if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
+ if (info()->ShouldTrapOnDeopt()) {
+ __ stop("trap_on_deopt", condition);
+ }
- if (cc == al) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
+ DCHECK(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (condition == al && frame_is_built_ &&
+ !info()->saves_caller_doubles()) {
+ DeoptComment(reason);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry)) {
- deopt_jump_table_.Add(JumpTableEntry(entry));
+ if (jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
}
- __ b(cc, &deopt_jump_table_.last().label);
+ __ b(condition, &jump_table_.last().label);
}
}
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(condition, instr, detail, bailout_type);
+}
+
+
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
}
- data->SetLiteralArray(*literals);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@@ -715,13 +976,13 @@
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
- deoptimization_literals_.Add(literal);
+ deoptimization_literals_.Add(literal, zone());
return result;
}
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
+ DCHECK(deoptimization_literals_.length() == 0);
const ZoneList<Handle<JSFunction> >* inlined_closures =
chunk()->inlined_closures();
@@ -741,7 +1002,7 @@
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
} else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
@@ -753,7 +1014,7 @@
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode deopt_mode) {
- ASSERT(expected_safepoint_kind_ == kind);
+ DCHECK(expected_safepoint_kind_ == kind);
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
@@ -761,14 +1022,14 @@
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
+ safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
+ if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
+ // Register pp always contains a pointer to the constant pool.
+ safepoint.DefinePointerRegister(pp, zone());
}
}
@@ -780,7 +1041,7 @@
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -793,27 +1054,26 @@
}
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
- LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
}
-void LCodeGen::RecordPosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
}
void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
__ bind(label->label());
current_block_ = label->block_id();
DoGap(label);
@@ -847,42 +1107,21 @@
void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(r0));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
+ RegExpExecStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ SubStringStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- __ ldr(r0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
+ StringCompareStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
@@ -893,293 +1132,529 @@
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ DCHECK(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ cmp(dividend, Operand::Zero());
+ __ b(pl, ÷nd_is_not_negative);
+ // Note that this is correct even for kMinInt operands.
+ __ rsb(dividend, dividend, Operand::Zero());
+ __ and_(dividend, dividend, Operand(mask));
+ __ rsb(dividend, dividend, Operand::Zero(), SetCC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr);
+ }
+ __ b(&done);
+ }
+
+ __ bind(÷nd_is_not_negative);
+ __ and_(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr);
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ mov(ip, Operand(Abs(divisor)));
+ __ smull(result, ip, result, ip);
+ __ sub(result, dividend, result, SetCC);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ b(ne, &remainder_not_zero);
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(lt, instr);
+ __ bind(&remainder_not_zero);
+ }
}
void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
+ HMod* hmod = instr->hydrogen();
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm(), SUDIV);
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
- if (divisor < 0) divisor = -divisor;
-
- Label positive_dividend, done;
- __ cmp(dividend, Operand(0));
- __ b(pl, &positive_dividend);
- __ rsb(result, dividend, Operand(0));
- __ and_(result, result, Operand(divisor - 1), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ Label done;
+ // Check for x % 0, sdiv might signal an exception. We have to deopt in this
+ // case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(right_reg, Operand::Zero());
+ DeoptimizeIf(eq, instr);
}
- __ rsb(result, result, Operand(0));
- __ b(&done);
- __ bind(&positive_dividend);
- __ and_(result, dividend, Operand(divisor - 1));
+
+ // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmp(left_reg, Operand(kMinInt));
+ __ b(ne, &no_overflow_possible);
+ __ cmp(right_reg, Operand(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr);
+ } else {
+ __ b(ne, &no_overflow_possible);
+ __ mov(result_reg, Operand::Zero());
+ __ jmp(&done);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ // For 'r3 = r1 % r2' we can have the following ARM code:
+ // sdiv r3, r1, r2
+ // mls r3, r3, r2, r1
+
+ __ sdiv(result_reg, left_reg, right_reg);
+ __ Mls(result_reg, result_reg, right_reg, left_reg);
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmp(result_reg, Operand::Zero());
+ __ b(ne, &done);
+ __ cmp(left_reg, Operand::Zero());
+ DeoptimizeIf(lt, instr);
+ }
__ bind(&done);
+
+ } else {
+ // General case, without any SDIV support.
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
+ Register scratch = scratch0();
+ DCHECK(!scratch.is(left_reg));
+ DCHECK(!scratch.is(right_reg));
+ DCHECK(!scratch.is(result_reg));
+ DwVfpRegister dividend = ToDoubleRegister(instr->temp());
+ DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
+ DCHECK(!divisor.is(dividend));
+ LowDwVfpRegister quotient = double_scratch0();
+ DCHECK(!quotient.is(dividend));
+ DCHECK(!quotient.is(divisor));
+
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(right_reg, Operand::Zero());
+ DeoptimizeIf(eq, instr);
+ }
+
+ __ Move(result_reg, left_reg);
+ // Load the arguments in VFP registers. The divisor value is preloaded
+ // before. Be careful that 'right_reg' is only live on entry.
+ // TODO(svenpanne) The last comments seems to be wrong nowadays.
+ __ vmov(double_scratch0().low(), left_reg);
+ __ vcvt_f64_s32(dividend, double_scratch0().low());
+ __ vmov(double_scratch0().low(), right_reg);
+ __ vcvt_f64_s32(divisor, double_scratch0().low());
+
+ // We do not care about the sign of the divisor. Note that we still handle
+ // the kMinInt % -1 case correctly, though.
+ __ vabs(divisor, divisor);
+ // Compute the quotient and round it to a 32bit integer.
+ __ vdiv(quotient, dividend, divisor);
+ __ vcvt_s32_f64(quotient.low(), quotient);
+ __ vcvt_f64_s32(quotient, quotient.low());
+
+ // Compute the remainder in result.
+ __ vmul(double_scratch0(), divisor, quotient);
+ __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
+ __ vmov(scratch, double_scratch0().low());
+ __ sub(result_reg, left_reg, scratch, SetCC);
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ b(ne, &done);
+ __ cmp(left_reg, Operand::Zero());
+ DeoptimizeIf(mi, instr);
+ }
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+ DCHECK(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr);
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr);
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ tst(dividend, Operand(mask));
+ DeoptimizeIf(ne, instr);
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ rsb(result, dividend, Operand(0));
+ return;
+ }
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ mov(result, dividend);
+ } else if (shift == 1) {
+ __ add(result, dividend, Operand(dividend, LSR, 31));
+ } else {
+ __ mov(result, Operand(dividend, ASR, 31));
+ __ add(result, dividend, Operand(result, LSR, 32 - shift));
+ }
+ if (shift > 0) __ mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr);
return;
}
- // These registers hold untagged 32 bit values.
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
- DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
- DwVfpRegister quotient = double_scratch0();
-
- ASSERT(!dividend.is(divisor));
- ASSERT(!dividend.is(quotient));
- ASSERT(!divisor.is(quotient));
- ASSERT(!scratch.is(left));
- ASSERT(!scratch.is(right));
- ASSERT(!scratch.is(result));
-
- Label done, vfp_modulo, both_positive, right_negative;
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
- DeoptimizeIf(eq, instr->environment());
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr);
}
- __ Move(result, left);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
- // (0 % x) must yield 0 (if x is finite, which is the case here).
- __ cmp(left, Operand(0));
- __ b(eq, &done);
- // Preload right in a vfp register.
- __ vmov(divisor.low(), right);
- __ b(lt, &vfp_modulo);
-
- __ cmp(left, Operand(right));
- __ b(lt, &done);
-
- // Check for (positive) power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
- scratch,
- &right_negative,
- &both_positive);
- // Perform modulo operation (scratch contains right - 1).
- __ and_(result, scratch, Operand(left));
- __ b(&done);
-
- __ bind(&right_negative);
- // Negate right. The sign of the divisor does not matter.
- __ rsb(right, right, Operand(0));
-
- __ bind(&both_positive);
- const int kUnfolds = 3;
- // If the right hand side is smaller than the (nonnegative)
- // left hand side, the left hand side is the result.
- // Else try a few subtractions of the left hand side.
- __ mov(scratch, left);
- for (int i = 0; i < kUnfolds; i++) {
- // Check if the left hand side is less or equal than the
- // the right hand side.
- __ cmp(scratch, Operand(right));
- __ mov(result, scratch, LeaveCC, lt);
- __ b(lt, &done);
- // If not, reduce the left hand side by the right hand
- // side and check again.
- if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(ip, Operand(divisor));
+ __ smull(scratch0(), ip, result, ip);
+ __ sub(scratch0(), scratch0(), dividend, SetCC);
+ DeoptimizeIf(ne, instr);
}
-
- __ bind(&vfp_modulo);
- // Load the arguments in VFP registers.
- // The divisor value is preloaded before. Be careful that 'right' is only live
- // on entry.
- __ vmov(dividend.low(), left);
- // From here on don't use right as it may have been reallocated (for example
- // to scratch2).
- right = no_reg;
-
- __ vcvt_f64_s32(dividend, dividend.low());
- __ vcvt_f64_s32(divisor, divisor.low());
-
- // We do not care about the sign of the divisor.
- __ vabs(divisor, divisor);
- // Compute the quotient and round it to a 32bit integer.
- __ vdiv(quotient, dividend, divisor);
- __ vcvt_s32_f64(quotient.low(), quotient);
- __ vcvt_f64_s32(quotient, quotient.low());
-
- // Compute the remainder in result.
- DwVfpRegister double_scratch = dividend;
- __ vmul(double_scratch, divisor, quotient);
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
- __ vmov(scratch, double_scratch.low());
-
- if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ sub(result, left, scratch);
- } else {
- Label ok;
- // Check for -0.
- __ sub(scratch2, left, scratch, SetCC);
- __ b(ne, &ok);
- __ cmp(left, Operand(0));
- DeoptimizeIf(mi, instr->environment());
- __ bind(&ok);
- // Load the result and we are done.
- __ mov(result, scratch2);
- }
-
- __ bind(&done);
}
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
void LCodeGen::DoDivI(LDivI* instr) {
- class DeferredDivI: public LDeferredCode {
- public:
- DeferredDivI(LCodeGen* codegen, LDivI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LDivI* instr_;
- };
-
- const Register left = ToRegister(instr->InputAt(0));
- const Register right = ToRegister(instr->InputAt(1));
- const Register scratch = scratch0();
- const Register result = ToRegister(instr->result());
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
// Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
- DeoptimizeIf(eq, instr->environment());
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(divisor, Operand::Zero());
+ DeoptimizeIf(eq, instr);
}
// Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive;
+ if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
+ // Do the test only if it hadn't be done above.
+ __ cmp(divisor, Operand::Zero());
+ }
+ __ b(pl, &positive);
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr);
+ __ bind(&positive);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ (!CpuFeatures::IsSupported(SUDIV) ||
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ // We don't need to check for overflow when truncating with sdiv
+ // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
+ __ cmp(dividend, Operand(kMinInt));
+ __ cmp(divisor, Operand(-1), eq);
+ DeoptimizeIf(eq, instr);
+ }
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ sdiv(result, dividend, divisor);
+ } else {
+ DoubleRegister vleft = ToDoubleRegister(instr->temp());
+ DoubleRegister vright = double_scratch0();
+ __ vmov(double_scratch0().low(), dividend);
+ __ vcvt_f64_s32(vleft, double_scratch0().low());
+ __ vmov(double_scratch0().low(), divisor);
+ __ vcvt_f64_s32(vright, double_scratch0().low());
+ __ vdiv(vleft, vleft, vright); // vleft now contains the result.
+ __ vcvt_s32_f64(double_scratch0().low(), vleft);
+ __ vmov(result, double_scratch0().low());
+ }
+
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = scratch0();
+ __ Mls(remainder, result, divisor, dividend);
+ __ cmp(remainder, Operand::Zero());
+ DeoptimizeIf(ne, instr);
+ }
+}
+
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DwVfpRegister addend = ToDoubleRegister(instr->addend());
+ DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ DCHECK(addend.is(ToDoubleRegister(instr->result())));
+
+ __ vmla(addend, multiplier, multiplicand);
+}
+
+
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+ DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
+ DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ DCHECK(minuend.is(ToDoubleRegister(instr->result())));
+
+ __ vmls(minuend, multiplier, multiplicand);
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+
+ // If the divisor is 1, return the dividend.
+ if (divisor == 1) {
+ __ Move(result, dividend);
+ return;
+ }
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ mov(result, Operand(dividend, ASR, shift));
+ return;
+ }
+
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ cmp(left, Operand(0));
- __ b(ne, &left_not_zero);
- __ cmp(right, Operand(0));
- DeoptimizeIf(mi, instr->environment());
- __ bind(&left_not_zero);
+ DeoptimizeIf(eq, instr);
}
- // Check for (-kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
- DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ DeoptimizeIf(vs, instr);
+ }
+ return;
}
- Label done, deoptimize;
- // Test for a few common cases first.
- __ cmp(right, Operand(1));
- __ mov(result, left, LeaveCC, eq);
- __ b(eq, &done);
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ mov(result, Operand(result, ASR, shift));
+ return;
+ }
- __ cmp(right, Operand(2));
- __ tst(left, Operand(1), eq);
- __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
- __ b(eq, &done);
+ __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
+ __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
+}
- __ cmp(right, Operand(4));
- __ tst(left, Operand(3), eq);
- __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
- __ b(eq, &done);
- // Call the stub. The numbers in r0 and r1 have
- // to be tagged to Smis. If that is not possible, deoptimize.
- DeferredDivI* deferred = new DeferredDivI(this, instr);
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
- __ TrySmiTag(left, &deoptimize, scratch);
- __ TrySmiTag(right, &deoptimize, scratch);
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr);
+ return;
+ }
- __ b(al, deferred->entry());
- __ bind(deferred->exit());
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr);
+ }
- // If the result in r0 is a Smi, untag it, else deoptimize.
- __ JumpIfNotSmi(result, &deoptimize);
- __ SmiUntag(result);
- __ b(&done);
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ return;
+ }
- __ bind(&deoptimize);
- DeoptimizeIf(al, instr->environment());
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ DCHECK(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Operand::Zero());
+ __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ sub(result, result, Operand(1));
__ bind(&done);
}
-template<int T>
-void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
- Token::Value op) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register left = ToRegister(instr->dividend());
+ Register right = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
- // Move left to r1 and right to r0 for the stub call.
- if (left.is(r1)) {
- __ Move(r0, right);
- } else if (left.is(r0) && right.is(r1)) {
- __ Swap(r0, r1, r2);
- } else if (left.is(r0)) {
- ASSERT(!right.is(r1));
- __ mov(r1, r0);
- __ mov(r0, right);
- } else {
- ASSERT(!left.is(r0) && !right.is(r0));
- __ mov(r0, right);
- __ mov(r1, left);
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(right, Operand::Zero());
+ DeoptimizeIf(eq, instr);
}
- BinaryOpStub stub(op, OVERWRITE_LEFT);
- __ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
- 0,
- Safepoint::kNoLazyDeopt);
- // Overwrite the stored value of r0 with the result of the stub.
- __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive;
+ if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
+ // Do the test only if it hadn't be done above.
+ __ cmp(right, Operand::Zero());
+ }
+ __ b(pl, &positive);
+ __ cmp(left, Operand::Zero());
+ DeoptimizeIf(eq, instr);
+ __ bind(&positive);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ (!CpuFeatures::IsSupported(SUDIV) ||
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ // We don't need to check for overflow when truncating with sdiv
+ // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
+ __ cmp(left, Operand(kMinInt));
+ __ cmp(right, Operand(-1), eq);
+ DeoptimizeIf(eq, instr);
+ }
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ sdiv(result, left, right);
+ } else {
+ DoubleRegister vleft = ToDoubleRegister(instr->temp());
+ DoubleRegister vright = double_scratch0();
+ __ vmov(double_scratch0().low(), left);
+ __ vcvt_f64_s32(vleft, double_scratch0().low());
+ __ vmov(double_scratch0().low(), right);
+ __ vcvt_f64_s32(vright, double_scratch0().low());
+ __ vdiv(vleft, vleft, vright); // vleft now contains the result.
+ __ vcvt_s32_f64(double_scratch0().low(), vleft);
+ __ vmov(result, double_scratch0().low());
+ }
+
+ Label done;
+ Register remainder = scratch0();
+ __ Mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ __ b(eq, &done);
+ __ eor(remainder, remainder, Operand(right));
+ __ add(result, result, Operand(remainder, ASR, 31));
+ __ bind(&done);
}
void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
Register result = ToRegister(instr->result());
// Note that result may alias left.
- Register left = ToRegister(instr->InputAt(0));
- LOperand* right_op = instr->InputAt(1);
+ Register left = ToRegister(instr->left());
+ LOperand* right_op = instr->right();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
+ if (right_op->IsConstantOperand()) {
int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- __ cmp(left, Operand(0));
- DeoptimizeIf(eq, instr->environment());
+ __ cmp(left, Operand::Zero());
+ DeoptimizeIf(eq, instr);
}
switch (constant) {
case -1:
- __ rsb(result, left, Operand(0));
+ if (overflow) {
+ __ rsb(result, left, Operand::Zero(), SetCC);
+ DeoptimizeIf(vs, instr);
+ } else {
+ __ rsb(result, left, Operand::Zero());
+ }
break;
case 0:
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- __ cmp(left, Operand(0));
- DeoptimizeIf(mi, instr->environment());
+ __ cmp(left, Operand::Zero());
+ DeoptimizeIf(mi, instr);
}
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
break;
case 1:
__ Move(result, left);
@@ -1191,23 +1666,21 @@
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ mov(result, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ add(result, left, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ rsb(result, left, Operand(left, LSL, shift));
- }
-
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ mov(result, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
- if (constant < 0) __ rsb(result, result, Operand(0));
-
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ add(result, left, Operand(left, LSL, shift));
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ rsb(result, left, Operand(left, LSL, shift));
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
} else {
// Generate standard code.
__ mov(ip, Operand(constant));
@@ -1216,27 +1689,36 @@
}
} else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ orr(ToRegister(instr->TempAt(0)), left, right);
- }
+ DCHECK(right_op->IsRegister());
+ Register right = ToRegister(right_op);
- if (can_overflow) {
+ if (overflow) {
+ Register scratch = scratch0();
// scratch:result = left * right.
- __ smull(result, scratch, left, right);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ smull(result, scratch, result, right);
+ } else {
+ __ smull(result, scratch, left, right);
+ }
__ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
- __ mul(result, left, right);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ mul(result, result, right);
+ } else {
+ __ mul(result, left, right);
+ }
}
if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
Label done;
- __ cmp(result, Operand(0));
- __ b(ne, &done);
- __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
- DeoptimizeIf(mi, instr->environment());
+ __ teq(left, Operand(right));
+ __ b(pl, &done);
+ // Bail out if the result is minus zero.
+ __ cmp(result, Operand::Zero());
+ DeoptimizeIf(eq, instr);
__ bind(&done);
}
}
@@ -1244,17 +1726,17 @@
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->InputAt(0);
- LOperand* right_op = instr->InputAt(1);
- ASSERT(left_op->IsRegister());
+ LOperand* left_op = instr->left();
+ LOperand* right_op = instr->right();
+ DCHECK(left_op->IsRegister());
Register left = ToRegister(left_op);
Register result = ToRegister(instr->result());
Operand right(no_reg);
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, ip));
} else {
- ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+ DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
right = ToOperand(right_op);
}
@@ -1266,7 +1748,11 @@
__ orr(result, left, right);
break;
case Token::BIT_XOR:
- __ eor(result, left, right);
+ if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+ __ mvn(result, Operand(left));
+ } else {
+ __ eor(result, left, right);
+ }
break;
default:
UNREACHABLE();
@@ -1278,21 +1764,24 @@
void LCodeGen::DoShiftI(LShiftI* instr) {
// Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
// result may alias either of them.
- LOperand* right_op = instr->InputAt(1);
- Register left = ToRegister(instr->InputAt(0));
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
if (right_op->IsRegister()) {
// Mask the right_op operand.
__ and_(scratch, ToRegister(right_op), Operand(0x1F));
switch (instr->op()) {
+ case Token::ROR:
+ __ mov(result, Operand(left, ROR, scratch));
+ break;
case Token::SAR:
__ mov(result, Operand(left, ASR, scratch));
break;
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
} else {
__ mov(result, Operand(left, LSR, scratch));
}
@@ -1309,6 +1798,13 @@
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ mov(result, Operand(left, ROR, shift_count));
+ } else {
+ __ Move(result, left);
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ mov(result, Operand(left, ASR, shift_count));
@@ -1322,14 +1818,25 @@
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ Move(result, left);
}
break;
case Token::SHL:
if (shift_count != 0) {
- __ mov(result, Operand(left, LSL, shift_count));
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ mov(result, Operand(left, LSL, shift_count - 1));
+ __ SmiTag(result, result, SetCC);
+ } else {
+ __ SmiTag(result, left, SetCC);
+ }
+ DeoptimizeIf(vs, instr);
+ } else {
+ __ mov(result, Operand(left, LSL, shift_count));
+ }
} else {
__ Move(result, left);
}
@@ -1343,116 +1850,99 @@
void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
__ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
+ }
+}
+
+
+void LCodeGen::DoRSubI(LRSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ SBit set_cond = can_overflow ? SetCC : LeaveCC;
+
+ if (right->IsStackSlot()) {
+ Register right_reg = EmitLoadRegister(right, ip);
+ __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
+ } else {
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+ }
+
+ if (can_overflow) {
+ DeoptimizeIf(vs, instr);
}
}
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
__ mov(ToRegister(instr->result()), Operand(instr->value()));
}
void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
+ DCHECK(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
double v = instr->value();
- __ Vmov(result, v);
+ __ Vmov(result, v, scratch0());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
}
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
- if (value->IsSmi()) {
- __ mov(ToRegister(instr->result()), Operand(value));
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ Move(ToRegister(instr->result()), object);
}
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
- __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
-
- // Load map into |result|.
- __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->TempAt(0));
- Label done;
-
- // If the object is a smi return the object.
- __ tst(input, Operand(kSmiTagMask));
- __ Move(result, input, eq);
- __ b(eq, &done);
-
- // If the object is not a value type, return the object.
- __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
- __ Move(result, input, ne);
- __ b(ne, &done);
- __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
+ Register map = ToRegister(instr->value());
+ __ EnumLength(result, map);
}
void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
Label runtime, done;
- ASSERT(object.is(result));
- ASSERT(object.is(r0));
- ASSERT(!scratch.is(scratch0()));
- ASSERT(!scratch.is(object));
+ DCHECK(object.is(result));
+ DCHECK(object.is(r0));
+ DCHECK(!scratch.is(scratch0()));
+ DCHECK(!scratch.is(object));
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ SmiTst(object);
+ DeoptimizeIf(eq, instr);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- __ Assert(eq, "Trying to get date field from non-date.");
-#endif
+ DeoptimizeIf(ne, instr);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1477,49 +1967,174 @@
}
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- __ mvn(result, Operand(input));
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+ Register scratch = scratch0();
+ DCHECK(!scratch.is(string));
+ DCHECK(!scratch.is(ToRegister(index)));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ add(scratch, string, Operand(ToRegister(index)));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
+ }
+ return FieldMemOperand(scratch, SeqString::kHeaderSize);
}
-void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
- __ push(input_reg);
- CallRuntime(Runtime::kThrow, 1, instr);
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
if (FLAG_debug_code) {
- __ stop("Unreachable code.");
+ Register scratch = scratch0();
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ __ and_(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ ldrb(result, operand);
+ } else {
+ __ ldrh(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+
+ if (FLAG_debug_code) {
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ strb(value, operand);
+ } else {
+ __ strh(value, operand);
}
}
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
__ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
+ }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+ Register left_reg = ToRegister(left);
+ Operand right_op = (right->IsRegister() || right->IsConstantOperand())
+ ? ToOperand(right)
+ : Operand(EmitLoadRegister(right, ip));
+ Register result_reg = ToRegister(instr->result());
+ __ cmp(left_reg, right_op);
+ __ Move(result_reg, left_reg, condition);
+ __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
+ } else {
+ DCHECK(instr->hydrogen()->representation().IsDouble());
+ DwVfpRegister left_reg = ToDoubleRegister(left);
+ DwVfpRegister right_reg = ToDoubleRegister(right);
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+ Label result_is_nan, return_left, return_right, check_zero, done;
+ __ VFPCompareAndSetFlags(left_reg, right_reg);
+ if (operation == HMathMinMax::kMathMin) {
+ __ b(mi, &return_left);
+ __ b(gt, &return_right);
+ } else {
+ __ b(mi, &return_right);
+ __ b(gt, &return_left);
+ }
+ __ b(vs, &result_is_nan);
+ // Left equals right => check for -0.
+ __ VFPCompareAndSetFlags(left_reg, 0.0);
+ if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+ __ b(ne, &done); // left == right != 0.
+ } else {
+ __ b(ne, &return_left); // left == right != 0.
+ }
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ // We could use a single 'vorr' instruction here if we had NEON support.
+ __ vneg(left_reg, left_reg);
+ __ vsub(result_reg, left_reg, right_reg);
+ __ vneg(result_reg, result_reg);
+ } else {
+ // Since we operate on +0 and/or -0, vadd and vand have the same effect;
+ // the decision for vadd is easy because vand is a NEON instruction.
+ __ vadd(result_reg, left_reg, right_reg);
+ }
+ __ b(&done);
+
+ __ bind(&result_is_nan);
+ __ vadd(result_reg, left_reg, right_reg);
+ __ b(&done);
+
+ __ bind(&return_right);
+ __ Move(result_reg, right_reg);
+ if (!left_reg.is(result_reg)) {
+ __ b(&done);
+ }
+
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+
+ __ bind(&done);
}
}
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
- DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
- DoubleRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister left = ToDoubleRegister(instr->left());
+ DwVfpRegister right = ToDoubleRegister(instr->right());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
__ vadd(result, left, right);
@@ -1534,19 +2149,13 @@
__ vdiv(result, left, right);
break;
case Token::MOD: {
- // Save r0-r3 on the stack.
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
-
__ PrepareCallCFunction(0, 2, scratch0());
- __ SetCallCDoubleArguments(left, right);
+ __ MovToFloatParameters(left, right);
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ GetCFunctionDoubleResult(result);
-
- // Restore r0-r3.
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
+ __ MovFromFloatResult(result);
break;
}
default:
@@ -1557,105 +2166,126 @@
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r1));
- ASSERT(ToRegister(instr->InputAt(1)).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r1));
+ DCHECK(ToRegister(instr->right()).is(r0));
+ DCHECK(ToRegister(instr->result()).is(r0));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ // Block literal pool emission to ensure nop indicating no inlined smi code
+ // is in the correct position.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
+ if (right_block == left_block || condition == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
- __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+ __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
} else if (right_block == next_block) {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
+ __ b(condition, chunk_->GetAssemblyLabel(left_block));
} else {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
+ __ b(condition, chunk_->GetAssemblyLabel(left_block));
__ b(chunk_->GetAssemblyLabel(right_block));
}
}
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ b(condition, chunk_->GetAssemblyLabel(false_block));
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ stop("LBreak");
+}
+
+
void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
- __ cmp(reg, Operand(0));
- EmitBranch(true_block, false_block, ne);
+ if (r.IsInteger32() || r.IsSmi()) {
+ DCHECK(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ __ cmp(reg, Operand::Zero());
+ EmitBranch(instr, ne);
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
- Register scratch = scratch0();
-
+ DCHECK(!info()->IsStub());
+ DwVfpRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
- __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
- __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
- EmitBranch(true_block, false_block, eq);
+ __ VFPCompareAndSetFlags(reg, 0.0);
+ __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
+ EmitBranch(instr, ne);
} else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
+ DCHECK(r.IsTagged());
+ Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
+ DCHECK(!info()->IsStub());
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
} else if (type.IsSmi()) {
- __ cmp(reg, Operand(0));
- EmitBranch(true_block, false_block, ne);
+ DCHECK(!info()->IsStub());
+ __ cmp(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (type.IsJSArray()) {
+ DCHECK(!info()->IsStub());
+ EmitBranch(instr, al);
+ } else if (type.IsHeapNumber()) {
+ DCHECK(!info()->IsStub());
+ DwVfpRegister dbl_scratch = double_scratch0();
+ __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
+ __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
+ EmitBranch(instr, ne);
+ } else if (type.IsString()) {
+ DCHECK(!info()->IsStub());
+ __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
+ __ cmp(ip, Operand::Zero());
+ EmitBranch(instr, ne);
} else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ b(eq, false_label);
+ __ b(eq, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ b(eq, true_label);
+ __ b(eq, instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ b(eq, false_label);
+ __ b(eq, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ b(eq, false_label);
+ __ b(eq, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
- __ cmp(reg, Operand(0));
- __ b(eq, false_label);
- __ JumpIfSmi(reg, true_label);
+ __ cmp(reg, Operand::Zero());
+ __ b(eq, instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
- __ tst(reg, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
+ __ SmiTst(reg);
+ DeoptimizeIf(eq, instr);
}
const Register map = scratch0();
@@ -1666,14 +2296,14 @@
// Undetectable -> false.
__ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
- __ b(ne, false_label);
+ __ b(ne, instr->FalseLabel(chunk_));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, true_label);
+ __ b(ge, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -1682,38 +2312,45 @@
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
__ b(ge, ¬_string);
__ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ cmp(ip, Operand(0));
- __ b(ne, true_label);
- __ b(false_label);
+ __ cmp(ip, Operand::Zero());
+ __ b(ne, instr->TrueLabel(chunk_));
+ __ b(instr->FalseLabel(chunk_));
__ bind(¬_string);
}
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, ip, SYMBOL_TYPE);
+ __ b(eq, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
- DoubleRegister dbl_scratch = double_scratch0();
+ DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, ¬_heap_number);
__ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(dbl_scratch, 0.0);
- __ b(vs, false_label); // NaN -> false.
- __ b(eq, false_label); // +0, -0 -> false.
- __ b(true_label);
+ __ cmp(r0, r0, vs); // NaN -> false.
+ __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
+ __ b(instr->TrueLabel(chunk_));
__ bind(¬_heap_number);
}
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(al, instr->environment());
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(al, instr);
+ }
}
}
}
void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
+ if (!IsNextEmittedBlock(block)) {
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
}
}
@@ -1730,6 +2367,10 @@
case Token::EQ_STRICT:
cond = eq;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
case Token::LT:
cond = is_unsigned ? lo : lt;
break;
@@ -1751,20 +2392,20 @@
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Condition cond = TokenToCondition(instr->op(), false);
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op(), is_unsigned);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -1773,85 +2414,86 @@
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
- __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ __ b(vs, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left),
- Operand(ToInteger32(LConstantOperand::cast(right))));
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
+ } else {
+ __ cmp(ToRegister(left), Operand(value));
+ }
} else if (left->IsConstantOperand()) {
- __ cmp(ToRegister(right),
- Operand(ToInteger32(LConstantOperand::cast(left))));
- // We transposed the operands. Reverse the condition.
- cond = ReverseCondition(cond);
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
+ } else {
+ __ cmp(ToRegister(right), Operand(value));
+ }
+ // We commuted the operands, so commute the condition.
+ cond = CommuteCondition(cond);
} else {
__ cmp(ToRegister(left), ToRegister(right));
}
}
- EmitBranch(true_block, false_block, cond);
+ EmitBranch(instr, cond);
}
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
__ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmp(left, Operand(instr->hydrogen()->right()));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register scratch = scratch0();
- Register reg = ToRegister(instr->InputAt(0));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ mov(ip, Operand(factory()->the_hole_value()));
+ __ cmp(input_reg, ip);
+ EmitBranch(instr, eq);
return;
}
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(ip, nil_value);
- __ cmp(reg, ip);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, eq);
+ DwVfpRegister input_reg = ToDoubleRegister(instr->object());
+ __ VFPCompareAndSetFlags(input_reg, input_reg);
+ EmitFalseBranch(instr, vc);
+
+ Register scratch = scratch0();
+ __ VmovHigh(scratch, input_reg);
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ DCHECK(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ __ VFPCompareAndSetFlags(value, 0.0);
+ EmitFalseBranch(instr, ne);
+ __ VmovHigh(scratch, value);
+ __ cmp(scratch, Operand(0x80000000));
} else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ b(eq, true_label);
- __ LoadRoot(ip, other_nil_value);
- __ cmp(reg, ip);
- __ b(eq, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value,
+ scratch,
+ Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()),
+ DO_SMI_CHECK);
+ __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+ __ cmp(scratch, Operand(0x80000000));
+ __ cmp(ip, Operand(0x00000000), eq);
}
+ EmitBranch(instr, eq);
}
@@ -1883,25 +2525,24 @@
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp1 = ToRegister(instr->TempAt(0));
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
Condition true_cond =
- EmitIsObject(reg, temp1, false_label, true_label);
+ EmitIsObject(reg, temp1,
+ instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
__ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
return lt;
@@ -1909,42 +2550,37 @@
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond =
- EmitIsString(reg, temp1, false_label);
+ EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
- EmitBranch(true_block, false_block, eq);
+ Register input_reg = EmitLoadRegister(instr->value(), ip);
+ __ SmiTst(input_reg);
+ EmitBranch(instr, eq);
}
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
__ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
__ tst(temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
+ EmitBranch(instr, ne);
}
@@ -1969,17 +2605,17 @@
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
+ // This instruction also signals no smi code inlined.
+ __ cmp(r0, Operand::Zero());
Condition condition = ComputeCompareCondition(op);
- EmitBranch(true_block, false_block, condition);
+ EmitBranch(instr, condition);
}
@@ -1987,7 +2623,7 @@
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
+ DCHECK(from == to || to == LAST_TYPE);
return from;
}
@@ -2005,27 +2641,22 @@
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
+ __ AssertString(input);
__ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -2034,16 +2665,13 @@
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register scratch = scratch0();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ ldr(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
@@ -2055,13 +2683,13 @@
Register input,
Register temp,
Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!input.is(temp2));
- ASSERT(!temp.is(temp2));
+ DCHECK(!input.is(temp));
+ DCHECK(!input.is(temp2));
+ DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2092,7 +2720,7 @@
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
__ b(ne, is_true);
} else {
__ b(ne, is_false);
@@ -2103,87 +2731,82 @@
__ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ ldr(temp, FieldMemOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
__ cmp(temp, Operand(class_name));
// End with the answer in flags.
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register temp = scratch0();
- Register temp2 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->temp());
Handle<String> class_name = instr->hydrogen()->class_name();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
__ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(temp, Operand(instr->map()));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0.
+ DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1.
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
__ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
}
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
+ &load_bool_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
+ Label* load_bool() { return &load_bool_; }
+
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
+ Label load_bool_;
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register object = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
- ASSERT(object.is(r0));
- ASSERT(result.is(r0));
-
// A Smi is not instance of anything.
__ JumpIfSmi(object, &false_result);
@@ -2193,20 +2816,25 @@
Label cache_miss;
Register map = temp;
__ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- __ cmp(map, Operand(ip));
- __ b(ne, &cache_miss);
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ mov(result, Operand(factory()->the_hole_value()));
+ {
+ // Block constant pool emission to ensure the positions of instructions are
+ // as expected by the patcher. See InstanceofStub::Generate().
+ Assembler::BlockConstPoolScope block_const_pool(masm());
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch with
+ // the cached map.
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+ __ mov(ip, Operand(Handle<Object>(cell)));
+ __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
+ __ cmp(map, Operand(ip));
+ __ b(ne, &cache_miss);
+ __ bind(deferred->load_bool()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch
+ // with true or false.
+ __ mov(result, Operand(factory()->the_hole_value()));
+ }
__ b(&done);
// The inlined call site cache did not match. Check null and string before
@@ -2235,10 +2863,8 @@
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Register result = ToRegister(instr->result());
- ASSERT(result.is(r0));
-
+ Label* map_check,
+ Label* bool_load) {
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
@@ -2246,42 +2872,62 @@
flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
+ InstanceofStub stub(isolate(), flags);
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
+ LoadContextFromDeferred(instr->context());
- // Get the temp register reserved by the instruction. This needs to be r4 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->TempAt(0));
- ASSERT(temp.is(r4));
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 4;
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- __ BlockConstPoolFor(kAdditionalDelta);
- __ mov(temp, Operand(delta * kPointerSize));
- __ StoreToSafepointRegisterSlot(temp, temp);
+ __ Move(InstanceofStub::right(), instr->function());
+
+ int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
+ int additional_delta = (call_size / Assembler::kInstrSize) + 4;
+ // Make sure that code size is predicable, since we use specific constants
+ // offsets in the code to find embedded values..
+ PredictableCodeSizeScope predictable(
+ masm_, (additional_delta + 1) * Assembler::kInstrSize);
+ // Make sure we don't emit any additional entries in the constant pool before
+ // the call to ensure that the CallCodeSize() calculated the correct number of
+ // instructions for the constant pool load.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ int map_check_delta =
+ masm_->InstructionsGeneratedSince(map_check) + additional_delta;
+ int bool_load_delta =
+ masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ __ BlockConstPoolFor(additional_delta);
+ // r5 is used to communicate the offset to the location of the map check.
+ __ mov(r5, Operand(map_check_delta * kPointerSize));
+ // r6 is used to communicate the offset to the location of the bool load.
+ __ mov(r6, Operand(bool_load_delta * kPointerSize));
+ // The mov above can generate one or two instructions. The delta was
+ // computed for two instructions, so we need to pad here in case of one
+ // instruction.
+ while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
+ __ nop();
+ }
+ }
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasDeoptimizationEnvironment());
- LEnvironment* env = instr->deoptimization_environment();
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
+ // Put the result value (r0) into the result register slot and
// restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
+ __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
}
void LCodeGen::DoCmpT(LCmpT* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
+ // This instruction also signals no smi code inlined.
+ __ cmp(r0, Operand::Zero());
Condition condition = ComputeCompareCondition(op);
__ LoadRoot(ToRegister(instr->result()),
@@ -2294,41 +2940,83 @@
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
+ // Runtime::TraceExit returns its parameter in r0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(r0);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(sp_delta));
- __ Jump(lr);
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+ }
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (sp_delta != 0) {
+ __ add(sp, sp, Operand(sp_delta));
+ }
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
+ }
+
+ __ Jump(lr);
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+ }
}
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ Move(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
- __ mov(r2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r0));
+
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ if (FLAG_vector_ics) {
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+ }
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2337,7 +3025,7 @@
Register cell = scratch0();
// Load the cell.
- __ mov(cell, Operand(instr->hydrogen()->cell()));
+ __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2345,30 +3033,18 @@
// it as no longer deleted.
if (instr->hydrogen()->RequiresHoleCheck()) {
// We use a temp to check the payload (CompareRoot might clobber ip).
- Register payload = ToRegister(instr->TempAt(0));
- __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ Register payload = ToRegister(instr->temp());
+ __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Store the value.
- __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2377,7 +3053,7 @@
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
@@ -2398,7 +3074,7 @@
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ b(ne, &skip_assignment);
}
@@ -2406,14 +3082,14 @@
__ str(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
value,
scratch,
- kLRHasBeenSaved,
+ GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
@@ -2424,95 +3100,45 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
- } else {
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name) {
- LookupResult lookup(isolate());
- type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
- if (lookup.type() == FIELD) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- int map_count = instr->hydrogen()->types()->length();
- Handle<String> name = instr->hydrogen()->name();
- if (map_count == 0) {
- ASSERT(instr->hydrogen()->need_generic());
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- Label done;
- __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count - 1; ++i) {
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label next;
- __ cmp(scratch, Operand(map));
- __ b(ne, &next);
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- __ b(&done);
- __ bind(&next);
- }
- Handle<Map> map = instr->hydrogen()->types()->last();
- __ cmp(scratch, Operand(map));
- if (instr->hydrogen()->need_generic()) {
- Label generic;
- __ b(ne, &generic);
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- __ b(&done);
- __ bind(&generic);
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- DeoptimizeIf(ne, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- }
- __ bind(&done);
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = MemOperand(object, offset);
+ __ Load(result, operand, access.representation());
+ return;
}
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vldr(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ object = result;
+ }
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ Load(result, operand, access.representation());
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r0));
// Name is always in r2.
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ if (FLAG_vector_ics) {
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2521,17 +3147,6 @@
Register function = ToRegister(instr->function());
Register result = ToRegister(instr->result());
- // Check that the function really is a function. Load map into the
- // result register.
- __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- DeoptimizeIf(ne, instr->environment());
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ b(ne, &non_instance);
-
// Get the prototype or initial map from the function.
__ ldr(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2539,7 +3154,7 @@
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
// If the function does not have an initial map, we're done.
Label done;
@@ -2548,137 +3163,56 @@
// Get the prototype from the initial map.
__ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- __ bind(&non_instance);
- __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
// All done.
__ bind(&done);
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
- Register scratch = scratch0();
-
- __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- // |scratch| still contains |input|'s map.
- __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ ubfx(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ cmp(scratch, Operand(FAST_ELEMENTS));
- __ b(eq, &done);
- __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(le, &done);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->InputAt(0));
- __ ldr(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
+ __ LoadRoot(result, instr->index());
}
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
-
- // Bailout index is not a valid argument index. Use unsigned check to get
- // negative check for free.
- __ sub(length, length, index, SetCC);
- DeoptimizeIf(ls, instr->environment());
-
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more.
- __ add(length, length, Operand(1));
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
-}
-
-
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Register elements = ToRegister(instr->elements());
- Register key = EmitLoadRegister(instr->key(), scratch0());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Load the result.
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int shift_size =
- ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ if (instr->length()->IsConstantOperand()) {
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ ldr(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ rsb(result, index, Operand(const_length + 1));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
}
- } else {
- key = ToRegister(instr->key());
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ sub(result, length, Operand(loc));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+ } else {
+ __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ }
+ } else {
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ __ sub(result, length, index);
+ __ add(result, result, Operand(1));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
}
-
- Operand operand = key_is_constant
- ? Operand(constant_key * (1 << shift_size) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(elements, elements, operand);
- if (!key_is_constant) {
- __ add(elements, elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
-
- __ vldr(result, elements, 0);
}
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -2686,64 +3220,81 @@
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ int base_offset = instr->base_offset();
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
- ? Operand(constant_key * (1 << shift_size))
+ ? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(result.low(), scratch0(), 0);
- __ vcvt_f64_f32(result, result.low());
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ vldr(double_scratch0().low(), scratch0(), base_offset);
+ __ vcvt_f64_f32(result, double_scratch0().low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), 0);
+ __ vldr(result, scratch0(), base_offset);
}
} else {
Register result = ToRegister(instr->result());
- MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer, constant_key * (1 << shift_size))
- : MemOperand(external_pointer, key, LSL, shift_size));
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size, base_offset);
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ ldrb(result, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ ldrh(result, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ ldr(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ ldr(result, mem_operand);
- __ cmp(result, Operand(0x80000000));
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(cs, instr->environment());
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ cmp(result, Operand(0x80000000));
+ DeoptimizeIf(cs, instr);
+ }
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2751,12 +3302,134 @@
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->key()).is(r0));
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+
+ int base_offset = instr->base_offset();
+ if (key_is_constant) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ base_offset += constant_key * kDoubleSize;
+ }
+ __ add(scratch, elements, Operand(base_offset));
+
+ if (!key_is_constant) {
+ key = ToRegister(instr->key());
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ add(scratch, scratch, Operand(key, LSL, shift_size));
+ }
+
+ __ vldr(result, scratch, 0);
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr);
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = instr->base_offset();
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ store_base = elements;
+ } else {
+ Register key = ToRegister(instr->key());
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
+ __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
+ }
+ __ ldr(result, MemOperand(store_base, offset));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ SmiTst(result);
+ DeoptimizeIf(ne, instr);
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, scratch);
+ DeoptimizeIf(eq, instr);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_typed_elements()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int base_offset) {
+ if (key_is_constant) {
+ return MemOperand(base, (constant_key << element_size) + base_offset);
+ }
+
+ if (base_offset == 0) {
+ if (shift_size >= 0) {
+ return MemOperand(base, key, LSL, shift_size);
+ } else {
+ DCHECK_EQ(-1, shift_size);
+ return MemOperand(base, key, LSR, 1);
+ }
+ }
+
+ if (shift_size >= 0) {
+ __ add(scratch0(), base, Operand(key, LSL, shift_size));
+ return MemOperand(scratch0(), base_offset);
+ } else {
+ DCHECK_EQ(-1, shift_size);
+ __ add(scratch0(), base, Operand(key, ASR, 1));
+ return MemOperand(scratch0(), base_offset);
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+ if (FLAG_vector_ics) {
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+ }
+
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2764,21 +3437,25 @@
Register scratch = scratch0();
Register result = ToRegister(instr->result());
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
- __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ if (instr->hydrogen()->from_inlined()) {
+ __ sub(result, sp, Operand(2 * kPointerSize));
+ } else {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label done, adapted;
+ __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ mov(result, fp, LeaveCC, ne);
- __ mov(result, scratch, LeaveCC, eq);
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ mov(result, fp, LeaveCC, ne);
+ __ mov(result, scratch, LeaveCC, eq);
+ }
}
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->InputAt(0));
+ Register elem = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
Label done;
@@ -2802,26 +3479,29 @@
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
+ Label global_object, result_in_receiver;
- // Do not transform the receiver to object for strict mode
- // functions.
- __ ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(scratch,
- Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
- __ b(ne, &receiver_ok);
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ __ tst(scratch, Operand(mask));
+ __ b(ne, &result_in_receiver);
- // Do not transform the receiver to object for builtins.
- __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &receiver_ok);
+ // Do not transform the receiver to object for builtins.
+ __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &result_in_receiver);
+ }
// Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
@@ -2832,17 +3512,27 @@
__ b(eq, &global_object);
// Deoptimize if the receiver is not a JS object.
- __ tst(receiver, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
+ __ SmiTst(receiver);
+ DeoptimizeIf(eq, instr);
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr->environment());
- __ jmp(&receiver_ok);
+ DeoptimizeIf(lt, instr);
+ __ b(&result_in_receiver);
__ bind(&global_object);
- __ ldr(receiver, GlobalObjectOperand());
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
+ __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ ldr(result,
+ ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+
+ if (result.is(receiver)) {
+ __ bind(&result_in_receiver);
+ } else {
+ Label result_ok;
+ __ b(&result_ok);
+ __ bind(&result_in_receiver);
+ __ mov(result, receiver);
+ __ bind(&result_ok);
+ }
}
@@ -2852,15 +3542,15 @@
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
- ASSERT(receiver.is(r0)); // Used for parameter count.
- ASSERT(function.is(r1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(receiver.is(r0)); // Used for parameter count.
+ DCHECK(function.is(r1)); // Required by InvokeFunction.
+ DCHECK(ToRegister(instr->result()).is(r0));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -2873,7 +3563,7 @@
// stack.
Label invoke, loop;
// length is a small non-negative integer, due to the test above.
- __ cmp(length, Operand(0));
+ __ cmp(length, Operand::Zero());
__ b(eq, &invoke);
__ bind(&loop);
__ ldr(scratch, MemOperand(elements, length, LSL, 2));
@@ -2882,24 +3572,21 @@
__ b(ne, &loop);
__ bind(&invoke);
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
+ LOperand* argument = instr->value();
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort("DoPushArgument not implemented for double type.");
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
} else {
Register argument_reg = EmitLoadRegister(argument, ip);
__ push(argument_reg);
@@ -2907,29 +3594,33 @@
}
+void LCodeGen::DoDrop(LDrop* instr) {
+ __ Drop(instr->count());
+}
+
+
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ LoadHeapObject(result, instr->hydrogen()->closure());
+ __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- __ mov(result, cp);
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ if (info()->IsOptimizing()) {
+ __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ DCHECK(result.is(cp));
+ }
}
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
__ push(cp); // The context is the first argument.
- __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+ __ Move(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
@@ -2937,48 +3628,33 @@
}
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
+ R1State r1_state) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
- __ LoadHeapObject(r1, function);
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ if (r1_state == R1_UNINITIALIZED) {
+ __ Move(r1, function);
}
+ // Change context.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
+ if (dont_adapt_arguments) {
__ mov(r0, Operand(arity));
}
// Invoke function.
- __ SetCallKind(r5, call_kind);
__ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ Call(ip);
@@ -2987,25 +3663,16 @@
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
-
- // Restore context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->InputAt(0));
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ DCHECK(instr->context() != NULL);
+ DCHECK(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3013,7 +3680,7 @@
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
Label done;
Register exponent = scratch0();
@@ -3029,7 +3696,7 @@
// Input is negative. Reverse its sign.
// Preserve the value of all registers.
{
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
// Registers were saved at the safepoint, so we can use
// many scratch registers.
@@ -3048,7 +3715,8 @@
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
// Restore input_reg after call to runtime.
@@ -3070,47 +3738,46 @@
}
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->InputAt(0));
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- __ cmp(input, Operand(0));
+ __ cmp(input, Operand::Zero());
__ Move(result, input, pl);
// We can make rsb conditional because the previous cmp instruction
// will clear the V (overflow) flag and rsb won't set this flag
// if input is positive.
- __ rsb(result, input, Operand(0), SetCC, mi);
+ __ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
- LUnaryMathOperation* instr_;
+ LMathAbs* instr_;
};
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
+ DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
__ vabs(result, input);
- } else if (r.IsInteger32()) {
+ } else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else {
// Representation is tagged.
DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->InputAt(0));
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input, deferred->entry());
// If smi, handle it directly.
@@ -3120,118 +3787,91 @@
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
- SwVfpRegister single_scratch = double_scratch0().low();
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
+ Register input_high = scratch0();
+ Label done, exact;
- __ EmitVFPTruncate(kRoundToMinusInf,
- single_scratch,
- input,
- scratch1,
- scratch2);
- DeoptimizeIf(ne, instr->environment());
+ __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
+ DeoptimizeIf(al, instr);
- // Move the result back to general purpose register r0.
- __ vmov(result, single_scratch);
-
+ __ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
- Label done;
- __ cmp(result, Operand(0));
+ __ cmp(result, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch1, input.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Label done, check_sign_on_zero;
-
- // Extract exponent bits.
- __ vmov(result, input.high());
- __ ubfx(scratch,
- result,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // If the number is in ]-0.5, +0.5[, the result is +/- 0.
- __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
- __ mov(result, Operand(0), LeaveCC, le);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ b(le, &check_sign_on_zero);
- } else {
- __ b(le, &done);
- }
-
- // The following conversion will not work with numbers
- // outside of ]-2^32, 2^32[.
- __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
- DeoptimizeIf(ge, instr->environment());
-
- // Save the original sign for later comparison.
- __ and_(scratch, result, Operand(HeapNumber::kSignMask));
-
- __ Vmov(double_scratch0(), 0.5);
- __ vadd(double_scratch0(), input, double_scratch0());
-
- // Check sign of the result: if the sign changed, the input
- // value was in ]0.5, 0[ and the result should be -0.
- __ vmov(result, double_scratch0().high());
- __ eor(result, result, Operand(scratch), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(mi, instr->environment());
- } else {
- __ mov(result, Operand(0), LeaveCC, mi);
- __ b(mi, &done);
- }
-
- __ EmitVFPTruncate(kRoundToMinusInf,
- double_scratch0().low(),
- double_scratch0(),
- result,
- scratch);
- DeoptimizeIf(ne, instr->environment());
- __ vmov(result, double_scratch0().low());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ cmp(result, Operand(0));
- __ b(ne, &done);
- __ bind(&check_sign_on_zero);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ __ cmp(input_high, Operand::Zero());
+ DeoptimizeIf(mi, instr);
}
__ bind(&done);
}
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- DoubleRegister result = ToDoubleRegister(instr->result());
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
+ DwVfpRegister input_plus_dot_five = double_scratch1;
+ Register input_high = scratch0();
+ DwVfpRegister dot_five = double_scratch0();
+ Label convert, done;
+
+ __ Vmov(dot_five, 0.5, scratch0());
+ __ vabs(double_scratch1, input);
+ __ VFPCompareAndSetFlags(double_scratch1, dot_five);
+ // If input is in [-0.5, -0], the result is -0.
+ // If input is in [+0, +0.5[, the result is +0.
+ // If the input is +0.5, the result is 1.
+ __ b(hi, &convert); // Out of [-0.5, +0.5].
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ VmovHigh(input_high, input);
+ __ cmp(input_high, Operand::Zero());
+ DeoptimizeIf(mi, instr); // [-0.5, -0].
+ }
+ __ VFPCompareAndSetFlags(input, dot_five);
+ __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero.
+ __ mov(result, Operand::Zero(), LeaveCC, ne);
+ __ b(&done);
+
+ __ bind(&convert);
+ __ vadd(input_plus_dot_five, input, dot_five);
+ // Reuse dot_five (double_scratch0) as we no longer need this value.
+ __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
+ &done, &done);
+ DeoptimizeIf(al, instr);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
+ DwVfpRegister output_reg = ToDoubleRegister(instr->result());
+ LowDwVfpRegister scratch = double_scratch0();
+ __ vcvt_f32_f64(scratch.low(), input_reg);
+ __ vcvt_f64_f32(output_reg, scratch.low());
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister temp = double_scratch0();
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
// Math.sqrt(-Infinity) == NaN
Label done;
- __ vmov(temp, -V8_INFINITY);
+ __ vmov(temp, -V8_INFINITY, scratch0());
__ VFPCompareAndSetFlags(input, temp);
__ vneg(result, temp, eq);
__ b(&done, eq);
@@ -3247,296 +3887,330 @@
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
- ToDoubleRegister(instr->InputAt(1)).is(d2));
- ASSERT(!instr->InputAt(1)->IsRegister() ||
- ToRegister(instr->InputAt(1)).is(r2));
- ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
- ASSERT(ToDoubleRegister(instr->result()).is(d3));
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d1));
+ DCHECK(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(tagged_exponent));
+ DCHECK(ToDoubleRegister(instr->left()).is(d0));
+ DCHECK(ToDoubleRegister(instr->result()).is(d2));
- if (exponent_type.IsTagged()) {
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(r2, &no_deopt);
- __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DCHECK(!r6.is(tagged_exponent));
+ __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ __ cmp(r6, Operand(ip));
+ DeoptimizeIf(ne, instr);
__ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
- ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ DCHECK(exponent_type.IsDouble());
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
-void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DwVfpRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(d7));
- ASSERT(ToRegister(instr->InputAt(0)).is(r0));
-
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
- // r2: FixedArray of the global context's random seeds
-
- // Load state[0].
- __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
- __ cmp(r1, Operand(0));
- __ b(eq, deferred->entry());
- // Load state[1].
- __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
- // r1: state[0].
- // r0: state[1].
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ and_(r3, r1, Operand(0xFFFF));
- __ mov(r4, Operand(18273));
- __ mul(r3, r3, r4);
- __ add(r1, r3, Operand(r1, LSR, 16));
- // Save state[0].
- __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ and_(r3, r0, Operand(0xFFFF));
- __ mov(r4, Operand(36969));
- __ mul(r3, r3, r4);
- __ add(r0, r3, Operand(r0, LSR, 16));
- // Save state[1].
- __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ and_(r0, r0, Operand(0x3FFFF));
- __ add(r0, r0, Operand(r1, LSL, 14));
-
- __ bind(deferred->exit());
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
+ MathExpGenerator::EmitMathExp(
+ masm(), input, result, double_scratch1, double_scratch2,
+ temp1, temp2, scratch0());
}
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in r0.
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
- default:
- Abort("Unimplemented type of LUnaryMathOperation.");
- UNREACHABLE();
- }
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ clz(result, input);
}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(instr->HasPointerMap());
- ASSERT(instr->HasDeoptimizationEnvironment());
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->function()).is(r1));
+ DCHECK(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(),
+ instr,
+ R1_CONTAINS_TARGET);
+ }
+}
+
+
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+ DCHECK(receiver.is(r1));
+ DCHECK(name.is(r2));
+
+ Register scratch = r3;
+ Register extra = r4;
+ Register extra2 = r5;
+ Register extra3 = r6;
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+ LoadIC::GenerateMiss(masm());
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ DCHECK(ToRegister(instr->result()).is(r0));
+
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ PlatformInterfaceDescriptor* call_descriptor =
+ instr->descriptor().platform_specific_descriptor();
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
+ call_descriptor->storage_mode());
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ // Make sure we don't emit any additional entries in the constant pool
+ // before the call to ensure that the CallCodeSize() calculated the correct
+ // number of instructions for the constant pool load.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+ __ Call(target);
+ }
+ generator.AfterCall();
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ DCHECK(ToRegister(instr->function()).is(r1));
+ DCHECK(ToRegister(instr->result()).is(r0));
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ mov(r0, Operand(instr->arity()));
+ }
+ // Change context.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
+ // Load the code entry address
+ __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->function()).is(r1));
+ DCHECK(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->constructor()).is(r1));
+ DCHECK(ToRegister(instr->result()).is(r0));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ mov(r0, Operand(instr->arity()));
+ // No cell in r2 for construct type feedback in optimized code
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->constructor()).is(r1));
+ DCHECK(ToRegister(instr->result()).is(r0));
+
+ __ mov(r0, Operand(instr->arity()));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ ldr(r5, MemOperand(sp, 0));
+ __ cmp(r5, Operand::Zero());
+ __ b(eq, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+}
+
+
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ add(result, base, Operand(ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ add(result, base, offset);
+ }
+}
+
+
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
Register scratch = scratch0();
- int offset = instr->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
- ASSERT(!object.is(value));
+ if (access.IsExternalMemory()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = MemOperand(object, offset);
+ __ Store(value, operand, representation);
+ return;
+ }
- if (!instr->transition().is_null()) {
- __ mov(scratch, Operand(instr->transition()));
+ __ AssertNotSmi(object);
+
+ DCHECK(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ DCHECK(!instr->hydrogen()->has_transition());
+ DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ __ vstr(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
+ __ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ Register temp = ToRegister(instr->temp());
+ // Update the write barrier for the map field.
+ __ RecordWriteForMap(object,
+ scratch,
+ temp,
+ GetLinkRegisterState(),
+ kSaveFPRegs);
+ }
}
// Do the store.
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ str(value, FieldMemOperand(object, offset));
+ Register value = ToRegister(instr->value());
+ if (access.IsInobject()) {
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
offset,
value,
scratch,
- kLRHasBeenSaved,
+ GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ str(value, FieldMemOperand(scratch, offset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
@@ -3544,115 +4218,52 @@
offset,
value,
object,
- kLRHasBeenSaved,
+ GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
}
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- // Name is always in r2.
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
- DeoptimizeIf(hs, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Register scratch = scratch0();
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset =
- ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
- __ str(value, FieldMemOperand(elements, offset));
+ Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
+ if (instr->index()->IsConstantOperand()) {
+ Operand index = ToOperand(instr->index());
+ Register length = ToRegister(instr->length());
+ __ cmp(length, index);
+ cc = CommuteCondition(cc);
} else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Register index = ToRegister(instr->index());
+ Operand length = ToOperand(instr->length());
+ __ cmp(index, length);
}
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- DwVfpRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ b(NegateCondition(cc), &done);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
} else {
- key = ToRegister(instr->key());
+ DeoptimizeIf(cc, instr);
}
- int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- Operand operand = key_is_constant
- ? Operand(constant_key * (1 << shift_size) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(scratch, elements, operand);
- if (!key_is_constant) {
- __ add(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- // Check for NaN. All NaNs must be canonicalized.
- __ VFPCompareAndSetFlags(value, value);
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs);
-
- __ bind(¬_nan);
- __ vstr(value, scratch, 0);
}
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
-
- Register external_pointer = ToRegister(instr->external_pointer());
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -3660,52 +4271,78 @@
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
- : Operand(key, LSL, shift_size));
- __ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ __ add(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ add(address, external_pointer, Operand(key, LSL, shift_size));
+ }
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), scratch0(), 0);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, scratch0(), 0);
+ __ vstr(double_scratch0().low(), address, base_offset);
+ } else { // Storing doubles, not floats.
+ __ vstr(value, address, base_offset);
}
} else {
Register value(ToRegister(instr->value()));
- MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer, constant_key * (1 << shift_size))
- : MemOperand(external_pointer, key, LSL, shift_size));
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ base_offset);
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
__ strb(value, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ strh(value, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ str(value, mem_operand);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3713,81 +4350,192 @@
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r2));
- ASSERT(ToRegister(instr->key()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ DwVfpRegister double_scratch = double_scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int base_offset = instr->base_offset();
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ if (key_is_constant) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ __ add(scratch, elements,
+ Operand((constant_key << element_size_shift) + base_offset));
+ } else {
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ add(scratch, elements, Operand(base_offset));
+ __ add(scratch, scratch,
+ Operand(ToRegister(instr->key()), LSL, shift_size));
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ // Force a canonical NaN.
+ if (masm()->emit_debug_code()) {
+ __ vmrs(ip);
+ __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
+ __ Assert(ne, kDefaultNaNModeNotSet);
+ }
+ __ VFPCanonicalizeNaN(double_scratch, value);
+ __ vstr(double_scratch, scratch, 0);
+ } else {
+ __ vstr(value, scratch, 0);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+ : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = instr->base_offset();
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
+ __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
+ }
+ __ str(value, MemOperand(store_base, offset));
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ add(key, store_base, Operand(offset));
+ __ RecordWrite(elements,
+ key,
+ value,
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed,
+ instr->hydrogen()->PointersToHereCheckForValue());
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_typed_elements()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_reg());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
__ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ cmp(scratch, Operand(from_map));
__ b(ne, ¬_applicable);
- __ mov(new_map_reg, Operand(to_map));
- if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ mov(new_map_reg, Operand(to_map));
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- scratch, kLRHasBeenSaved, kDontSaveFPRegs);
- } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
- to_kind == FAST_DOUBLE_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
- ASSERT(fixed_object_reg.is(r2));
- ASSERT(new_map_reg.is(r3));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
- ASSERT(fixed_object_reg.is(r2));
- ASSERT(new_map_reg.is(r3));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
+ __ RecordWriteForMap(object_reg,
+ new_map_reg,
+ scratch,
+ GetLinkRegisterState(),
+ kDontSaveFPRegs);
} else {
- UNREACHABLE();
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(object_reg.is(r0));
+ PushSafepointRegistersScope scope(this);
+ __ Move(r1, to_map);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
__ bind(¬_applicable);
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+ DeoptimizeIf(eq, instr);
+ __ bind(&no_memento_found);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r1));
+ DCHECK(ToRegister(instr->right()).is(r0));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
@@ -3806,9 +4554,9 @@
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
__ push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
@@ -3821,35 +4569,36 @@
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(r0);
- }
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+ instr->context());
+ __ AssertSmi(r0);
__ SmiUntag(r0);
__ StoreToSafepointRegisterSlot(r0, result);
}
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr);
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
+ DCHECK(!char_code.is(result));
- __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
+ __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
__ b(hi, deferred->entry());
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
@@ -3868,28 +4617,21 @@
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
-}
-
-
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
+ DCHECK(output->IsDoubleRegister());
SwVfpRegister single_scratch = double_scratch0().low();
if (input->IsStackSlot()) {
Register scratch = scratch0();
@@ -3902,101 +4644,169 @@
}
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+
+ SwVfpRegister flt_scratch = double_scratch0().low();
+ __ vmov(flt_scratch, ToRegister(input));
+ __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ SIGNED_INT32);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
- Register src = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(instr->value());
Register dst = ToRegister(instr->result());
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTag(dst, src, SetCC);
__ b(vs, deferred->entry());
__ bind(deferred->exit());
}
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
- Label slow;
- Register src = ToRegister(instr->InputAt(0));
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ cmp(input, Operand(Smi::kMaxValue));
+ __ b(hi, deferred->entry());
+ __ SmiTag(result, input);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
+ Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- DoubleRegister dbl_scratch = double_scratch0();
- SwVfpRegister flt_scratch = dbl_scratch.low();
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
+ LowDwVfpRegister dbl_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- Label done;
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ eor(src, src, Operand(0x80000000));
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ eor(src, src, Operand(0x80000000));
+ }
+ __ vmov(dbl_scratch.low(), src);
+ __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
+ } else {
+ __ vmov(dbl_scratch.low(), src);
+ __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
}
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+
if (FLAG_inline_new) {
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- __ Move(dst, r5);
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
__ b(&done);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, Operand::Zero());
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ mov(ip, Operand(0));
- __ StoreToSafepointRegisterSlot(ip, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- __ Move(dst, r0);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
+ __ StoreToSafepointRegisterSlot(r0, dst);
+ }
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sub(ip, dst, Operand(kHeapObjectTag));
- __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
- __ StoreToSafepointRegisterSlot(dst, dst);
+ __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ __ add(dst, dst, Operand(kHeapObjectTag));
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ // We want the untagged address first for performance
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+ DONT_TAG_RESULT);
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ sub(ip, reg, Operand(kHeapObjectTag));
- __ vstr(input_reg, ip, HeapNumber::kValueOffset);
+ __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ // Now that we have finished with the object's real address tag it
+ __ add(reg, reg, Operand(kHeapObjectTag));
}
@@ -4005,83 +4815,104 @@
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
- __ mov(reg, Operand(0));
+ __ mov(reg, Operand::Zero());
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ PushSafepointRegistersScope scope(this);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(r0, reg);
}
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ tst(input, Operand(0xc0000000));
+ DeoptimizeIf(ne, instr);
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTag(output, input, SetCC);
+ DeoptimizeIf(vs, instr);
+ } else {
+ __ SmiTag(output, input);
+ }
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
if (instr->needs_check()) {
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr->environment());
+ DeoptimizeIf(cs, instr);
} else {
__ SmiUntag(result, input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- DoubleRegister result_reg,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env) {
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+ DwVfpRegister result_reg,
+ NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
- ASSERT(!result_reg.is(double_scratch0()));
-
- Label load_smi, heap_number, done;
-
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
- // Heap number map check.
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env);
- } else {
- Label heap_number;
- __ b(eq, &heap_number);
-
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
-
- // Convert undefined to NaN.
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ DCHECK(!result_reg.is(double_scratch0()));
+ Label convert, load_smi, done;
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ // Heap number map check.
+ __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, Operand(ip));
+ if (can_convert_undefined_to_nan) {
+ __ b(ne, &convert);
+ } else {
+ DeoptimizeIf(ne, instr);
+ }
+ // load heap number
+ __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
+ if (deoptimize_on_minus_zero) {
+ __ VmovLow(scratch, result_reg);
+ __ cmp(scratch, Operand::Zero());
+ __ b(ne, &done);
+ __ VmovHigh(scratch, result_reg);
+ __ cmp(scratch, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(eq, instr);
+ }
__ jmp(&done);
-
- __ bind(&heap_number);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, Operand(ip));
+ DeoptimizeIf(ne, instr);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ jmp(&done);
+ }
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
}
- // Heap number to double register conversion.
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- if (deoptimize_on_minus_zero) {
- __ vmov(ip, result_reg.low());
- __ cmp(ip, Operand(0));
- __ b(ne, &done);
- __ vmov(ip, result_reg.high());
- __ cmp(ip, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
- }
- __ jmp(&done);
-
// Smi to double register conversion
__ bind(&load_smi);
// scratch: untagged value of input_reg
@@ -4092,14 +4923,14 @@
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DwVfpRegister double_scratch = double_scratch0();
- SwVfpRegister single_scratch = double_scratch.low();
+ Register scratch2 = ToRegister(instr->temp());
+ LowDwVfpRegister double_scratch = double_scratch0();
+ DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
- ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+ DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+ DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
Label done;
@@ -4107,65 +4938,56 @@
// The carry flag is set when we reach this deferred code as we just executed
// SmiUntag(heap_object, SetCC)
STATIC_ASSERT(kHeapObjectTag == 1);
- __ adc(input_reg, input_reg, Operand(input_reg));
+ __ adc(scratch2, input_reg, Operand(input_reg));
// Heap number map check.
- __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
- Label heap_number;
- __ b(eq, &heap_number);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
- __ mov(input_reg, Operand(0));
+ Label no_heap_number, check_bools, check_false;
+ __ b(ne, &no_heap_number);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
__ b(&done);
- __ bind(&heap_number);
- __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ __ b(ne, &check_bools);
+ __ mov(input_reg, Operand::Zero());
+ __ b(&done);
- __ EmitECMATruncate(input_reg,
- double_scratch2,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ bind(&check_bools);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ __ b(ne, &check_false);
+ __ mov(input_reg, Operand(1));
+ __ b(&done);
+ __ bind(&check_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ DeoptimizeIf(ne, instr, "cannot truncate");
+ __ mov(input_reg, Operand::Zero());
} else {
- CpuFeatures::Scope scope(VFP3);
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a heap number");
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
- __ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
- scratch1,
- scratch2,
- kCheckForInexactConversion);
- DeoptimizeIf(ne, instr->environment());
- // Load the result.
- __ vmov(input_reg, single_scratch);
+ __ sub(ip, scratch2, Operand(kHeapObjectTag));
+ __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
+ __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(input_reg, Operand(0));
+ __ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch1, double_scratch.high());
+ __ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "minus zero");
}
}
__ bind(&done);
@@ -4173,101 +4995,127 @@
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ DCHECK(input->Equals(instr->result()));
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(input_reg, SetCC);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ b(cs, deferred->entry());
- __ bind(deferred->exit());
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ __ SmiUntag(input_reg, SetCC);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ b(cs, deferred->entry());
+ __ bind(deferred->exit());
+ }
}
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
+ DCHECK(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
+ DwVfpRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment());
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
- SwVfpRegister single_scratch = double_scratch0().low();
-
- Label done;
+ DwVfpRegister double_input = ToDoubleRegister(instr->value());
+ LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
- __ EmitECMATruncate(result_reg,
- double_input,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
- VFPRoundingMode rounding_mode = kRoundToMinusInf;
- __ EmitVFPTruncate(rounding_mode,
- single_scratch,
- double_input,
- scratch1,
- scratch2,
- kCheckForInexactConversion);
- // Deoptimize if we had a vfp invalid exception,
- // including inexact operation.
- DeoptimizeIf(ne, instr->environment());
- // Retrieve the result.
- __ vmov(result_reg, single_scratch);
+ __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand::Zero());
+ __ b(ne, &done);
+ __ VmovHigh(scratch1, double_input);
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr);
+ __ bind(&done);
+ }
}
- __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ DwVfpRegister double_input = ToDoubleRegister(instr->value());
+ LowDwVfpRegister double_scratch = double_scratch0();
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand::Zero());
+ __ b(ne, &done);
+ __ VmovHigh(scratch1, double_input);
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr);
+ __ bind(&done);
+ }
+ }
+ __ SmiTag(result_reg, SetCC);
+ DeoptimizeIf(vs, instr);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
+ LOperand* input = instr->value();
+ __ SmiTst(ToRegister(input));
+ DeoptimizeIf(ne, instr);
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ SmiTst(ToRegister(input));
+ DeoptimizeIf(eq, instr);
+ }
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -4282,13 +5130,13 @@
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
- DeoptimizeIf(lo, instr->environment());
+ DeoptimizeIf(lo, instr);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
}
}
} else {
@@ -4296,64 +5144,115 @@
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- ASSERT(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+ DeoptimizeIf(tag == 0 ? ne : eq, instr);
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {
- __ cmp(reg, Operand(target));
+ __ cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Register scratch,
- Handle<Map> map,
- CompareMapMode mode,
- LEnvironment* env) {
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ mov(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r0, scratch0());
+ }
+ __ tst(scratch0(), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr);
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
+ Register map_reg = scratch0();
+
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ Register reg = ToRegister(input);
+
+ __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- __ CompareMap(reg, scratch, map, &success, mode);
- DeoptimizeIf(ne, env);
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
+ __ CompareMap(map_reg, map, &success);
+ __ b(eq, &success);
+ }
+
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
+ __ CompareMap(map_reg, map, &success);
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ __ b(ne, deferred->entry());
+ } else {
+ DeoptimizeIf(ne, instr);
+ }
+
__ bind(&success);
}
-void LCodeGen::DoCheckMap(LCheckMap* instr) {
- Register scratch = scratch0();
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
- Handle<Map> map = instr->hydrogen()->map();
- DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
- instr->environment());
-}
-
-
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+ __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
}
@@ -4368,7 +5267,7 @@
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
@@ -4382,15 +5281,14 @@
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr->environment());
- __ mov(result_reg, Operand(0));
+ DeoptimizeIf(ne, instr);
+ __ mov(result_reg, Operand::Zero());
__ jmp(&done);
// Heap number
__ bind(&heap_number);
- __ vldr(double_scratch0(), FieldMemOperand(input_reg,
- HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+ __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
__ jmp(&done);
// smi
@@ -4401,343 +5299,166 @@
}
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
-
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
-
- // Load prototype object.
- __ LoadHeapObject(temp1, current_prototype);
-
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(temp1, current_prototype);
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DwVfpRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ VmovHigh(result_reg, value_reg);
+ } else {
+ __ VmovLow(result_reg, value_reg);
}
-
- // Check the holder map.
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- DeoptimizeIf(ne, instr->environment());
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+ __ VmovHigh(result_reg, hi_reg);
+ __ VmovLow(result_reg, lo_reg);
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
- LAllocateObject* instr_;
+ LAllocate* instr_;
};
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
- Register scratch2 = ToRegister(instr->TempAt(1));
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
+ Register scratch = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- TAG_OBJECT);
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ str(scratch, FieldMemOperand(result, property_offset));
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
}
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
}
__ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ mov(scratch, Operand(size - kHeapObjectTag));
+ } else {
+ __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
+ }
+ __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ Label loop;
+ __ bind(&loop);
+ __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
+ __ str(scratch2, MemOperand(result, scratch));
+ __ b(ge, &loop);
+ }
}
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Operand(0));
+ __ mov(result, Operand(Smi::FromInt(0)));
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ LoadHeapObject(r0, constructor);
- __ push(r0);
- CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
+ PushSafepointRegistersScope scope(this);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ DCHECK(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size >= 0 && size <= Smi::kMaxValue) {
+ __ Push(Smi::FromInt(size));
+ } else {
+ // We should never get here at runtime => abort
+ __ stop("invalid allocation size");
+ return;
+ }
+ }
+
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Heap* heap = isolate()->heap();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
- // Load map into r2.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- __ cmp(r2, Operand(boilerplate_elements_kind));
- DeoptimizeIf(ne, instr->environment());
- }
-
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
- __ Push(r3, r2, r1);
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset) {
- ASSERT(!source.is(r2));
- ASSERT(!result.is(r2));
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
- int elements_size = has_elements ? elements->Size() : 0;
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ add(r2, result, Operand(elements_offset));
- } else {
- __ ldr(r2, FieldMemOperand(source, i));
- }
- __ str(r2, FieldMemOperand(result, object_offset + i));
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ ldr(r2, FieldMemOperand(source, i));
- __ str(r2, FieldMemOperand(result, elements_offset + i));
- }
-
- // Copy elements backing store content.
- int elements_length = has_elements ? elements->length() : 0;
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- // We only support little endian mode...
- int32_t value_low = value & 0xFFFFFFFF;
- int32_t value_high = value >> 32;
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(r2, Operand(value_low));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ mov(r2, Operand(value_high));
- __ str(r2, FieldMemOperand(result, total_offset + 4));
- }
- } else if (elements->IsFixedArray()) {
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value = JSObject::GetElement(object, i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r4, literals);
- __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r2, Operand(constant_properties));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- __ mov(r1, Operand(Smi::FromInt(flags)));
- __ Push(r4, r3, r2, r1);
-
- // Pick the right runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0));
+ DCHECK(ToRegister(instr->value()).is(r0));
__ push(r0);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
Label materialized;
// Registers will be used as follows:
- // r3 = JS function.
- // r7 = literals array.
+ // r6 = literals array.
// r1 = regexp literal.
// r0 = regexp literal clone.
- // r2 and r4-r6 are used as temporaries.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
- __ ldr(r1, FieldMemOperand(r7, literal_offset));
+ // r2-5 are used as temporaries.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ Move(r6, instr->hydrogen()->literals());
+ __ ldr(r1, FieldMemOperand(r6, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &materialized);
// Create regexp literal using runtime function
// Result will be in r0.
- __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r5, Operand(instr->hydrogen()->pattern()));
- __ mov(r4, Operand(instr->hydrogen()->flags()));
- __ Push(r7, r6, r5, r4);
+ __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(r4, Operand(instr->hydrogen()->pattern()));
+ __ mov(r3, Operand(instr->hydrogen()->flags()));
+ __ Push(r6, r5, r4, r3);
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
@@ -4745,7 +5466,7 @@
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -4756,35 +5477,24 @@
__ bind(&allocated);
// Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ ldr(r3, FieldMemOperand(r1, i));
- __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
- __ str(r3, FieldMemOperand(r0, i));
- __ str(r2, FieldMemOperand(r0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
- __ str(r3, FieldMemOperand(r0, size - kPointerSize));
- }
+ __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
}
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ mov(r1, Operand(shared_info));
- __ push(r1);
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
+ __ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
- __ mov(r2, Operand(shared_info));
- __ mov(r1, Operand(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
+ __ mov(r2, Operand(instr->hydrogen()->shared_info()));
+ __ mov(r1, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
__ Push(cp, r2, r1);
CallRuntime(Runtime::kNewClosure, 3, instr);
}
@@ -4792,25 +5502,21 @@
void LCodeGen::DoTypeof(LTypeof* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
__ push(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ Register input = ToRegister(instr->value());
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
+ Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+ instr->FalseLabel(chunk_),
input,
instr->type_literal());
if (final_branch_condition != kNoCondition) {
- EmitBranch(true_block, false_block, final_branch_condition);
+ EmitBranch(instr, final_branch_condition);
}
}
@@ -4821,63 +5527,64 @@
Handle<String> type_name) {
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(heap()->number_symbol())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label);
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(input, Operand(ip));
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
__ b(ge, false_label);
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (String::Equals(type_name, factory->symbol_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ b(eq, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (String::Equals(type_name, factory->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ b(eq, true_label);
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = ne;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ Register type_reg = scratch;
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
+ __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
__ b(eq, true_label);
- __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (String::Equals(type_name, factory->object_string())) {
+ Register map = scratch;
__ JumpIfSmi(input, false_label);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ b(eq, true_label);
- }
- __ CompareObjectType(input, input, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(lt, false_label);
- __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, false_label);
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ b(eq, true_label);
+ __ CheckObjectTypeRange(input,
+ map,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ false_label);
// Check for undetectable objects => false.
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
} else {
@@ -4889,45 +5596,43 @@
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Register temp1 = ToRegister(instr->temp());
EmitIsConstructCall(temp1, scratch0());
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- ASSERT(!temp1.is(temp2));
+ DCHECK(!temp1.is(temp2));
// Get the frame pointer for the calling frame.
__ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
__ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
__ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
// Check the marker in the calling frame.
- __ bind(&check_frame_marker);
__ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
__ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block literal pool emission for duration of padding.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
}
}
last_lazy_deopt_pc_ = masm()->pc_offset();
@@ -4935,8 +5640,8 @@
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
- ASSERT(instr->HasEnvironment());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -4944,60 +5649,55 @@
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(al, instr->environment());
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+
+ DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Register object = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- Register strict = scratch0();
- __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(object, key, strict);
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
}
-void LCodeGen::DoIn(LIn* instr) {
- Register obj = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- __ Push(key, obj);
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
+ LoadContextFromDeferred(instr->context());
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasEnvironment());
+ DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
- ASSERT(instr->HasEnvironment());
+ DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
// There is no LLazyBailout instruction for stack-checks. We have to
// prepare for lazy deoptimization explicitly here.
@@ -5007,21 +5707,22 @@
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &done);
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(masm(),
+ CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
+ DCHECK(instr->context()->IsRegister());
+ DCHECK(ToRegister(instr->context()).is(cp));
+ CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
+ DCHECK(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5037,34 +5738,32 @@
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
+ DCHECK(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
+
+ GenerateOsrPrologue();
}
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r0, null_value);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
- __ tst(r0, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
+ __ SmiTst(r0);
+ DeoptimizeIf(eq, instr);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
+ DeoptimizeIf(le, instr);
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
@@ -5080,7 +5779,7 @@
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&use_cache);
}
@@ -5088,13 +5787,23 @@
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ cmp(result, Operand(Smi::FromInt(0)));
+ __ b(ne, &load_cache);
+ __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ jmp(&done);
+
+ __ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ ldr(result,
- FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- __ cmp(result, Operand(0));
- DeoptimizeIf(eq, instr->environment());
+ __ cmp(result, Operand::Zero());
+ DeoptimizeIf(eq, instr);
+
+ __ bind(&done);
}
@@ -5103,22 +5812,69 @@
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ Push(object);
+ __ Push(index);
+ __ mov(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r0, result);
}
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, result, object, index);
+
Label out_of_object, done;
- __ cmp(index, Operand(0));
+
+ __ tst(index, Operand(Smi::FromInt(1)));
+ __ b(ne, deferred->entry());
+ __ mov(index, Operand(index, ASR, 1));
+
+ __ cmp(index, Operand::Zero());
__ b(lt, &out_of_object);
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
__ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
__ b(&done);
@@ -5126,13 +5882,30 @@
__ bind(&out_of_object);
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
// Index is equal to negated out of object property index plus 1.
- __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
__ ldr(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ Push(scope_info);
+ __ push(ToRegister(instr->function()));
+ CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index adb6e1b..cb137d1 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -1,38 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
-#include "arm/lithium-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
+#include "src/arm/lithium-arm.h"
+
+#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -41,36 +21,47 @@
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : chunk_(chunk),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4),
- deopt_jump_table_(4),
- deoptimization_literals_(8),
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
- deferred_(8),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
@@ -80,13 +71,15 @@
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
+ DwVfpRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch);
- int ToInteger32(LConstantOperand* op) const;
+ DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch);
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
@@ -94,6 +87,7 @@
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
@@ -106,28 +100,41 @@
void FinishCode(Handle<Code> code);
// Deferred code support.
- template<int T>
- void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
- Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
+
void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ Label* map_check, Label* bool_load);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
+ MemOperand PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int base_offset);
+
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -137,30 +144,13 @@
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
+ StrictMode strict_mode() const { return info()->strict_mode(); }
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
-
- LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
+ LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
- int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
@@ -171,38 +161,47 @@
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* format, ...);
- void Comment(const char* format, ...);
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should
// continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
+ int CallCodeSize(Handle<Code> code, RelocInfo::Mode mode);
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
+ void CallCode(
+ Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
+
+ void CallCodeGeneric(
+ Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode,
+ TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -211,48 +210,55 @@
CallRuntime(function, num_arguments, instr);
}
+ void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
+
+ enum R1State {
+ R1_UNINITIALIZED,
+ R1_CONTAINS_TARGET
+ };
// Generate a direct call to a known function. Expects the function
// to be in r1.
void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind);
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
+ R1State r1_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail, Deoptimizer::BailoutType bailout_type);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail = NULL);
- void AddToTranslation(Translation* translation,
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
- bool is_tagged);
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
+ DwVfpRegister ToDoubleRegister(int index) const;
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
+ MemOperand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
+ void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@@ -264,19 +270,19 @@
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordPosition(int position);
+
+ void RecordAndWritePosition(int position) OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(Register input,
- DoubleRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env);
+
+ // EmitBranch expects to be the last instruction of a block.
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition condition);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DwVfpRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -299,51 +305,41 @@
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
- Label* is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
+ int* offset,
+ AllocationSiteMode mode);
- struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
- : label(),
- address(entry) { }
- Label label;
- Address address;
- };
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
- void EnsureSpaceForLazyDeopt();
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
- LChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -354,39 +350,19 @@
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
public:
- PushSafepointRegistersScope(LCodeGen* codegen,
- Safepoint::Kind kind)
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = kind;
-
- switch (codegen_->expected_safepoint_kind_) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PushSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PushSafepointRegistersAndDoubles();
- break;
- default:
- UNREACHABLE();
- }
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ codegen_->masm_->PushSafepointRegisters();
}
~PushSafepointRegistersScope() {
- Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
- ASSERT((kind & Safepoint::kWithRegisters) != 0);
- switch (kind) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PopSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PopSafepointRegistersAndDoubles();
- break;
- default:
- UNREACHABLE();
- }
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ codegen_->masm_->PopSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}
@@ -401,7 +377,7 @@
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
@@ -410,7 +386,7 @@
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc
index cefca47..2fceec9 100644
--- a/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/arm/lithium-gap-resolver-arm.cc
@@ -1,47 +1,35 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-gap-resolver-arm.h"
namespace v8 {
namespace internal {
-static const Register kSavedValueRegister = { 9 };
+// We use the root register to spill a value while breaking a cycle in parallel
+// moves. We don't need access to roots while resolving the move list and using
+// the root register has two advantages:
+// - It is not in crankshaft allocatable registers list, so it can't interfere
+// with any of the moves we are resolving.
+// - We don't need to push it on the stack, as we can reload it with its value
+// once we have resolved a cycle.
+#define kSavedValueRegister kRootRegister
+
LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
- saved_destination_(NULL) { }
+ : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
+ saved_destination_(NULL), need_to_restore_root_(false) { }
+
+
+#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
+ DCHECK(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
@@ -62,11 +50,17 @@
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
+ DCHECK(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
+ if (need_to_restore_root_) {
+ DCHECK(kSavedValueRegister.is(kRootRegister));
+ __ InitializeRootRegister();
+ need_to_restore_root_ = false;
+ }
+
moves_.Rewind(0);
}
@@ -79,7 +73,7 @@
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
@@ -100,13 +94,13 @@
// An additional complication is that moves to MemOperands with large
// offsets (more than 1K or 4K) require us to spill this spilled value to
// the stack, to free up the register.
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
+ DCHECK(!moves_[index].IsPending());
+ DCHECK(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
@@ -133,7 +127,7 @@
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
+ DCHECK(other_move.IsPending());
BreakCycle(index);
return;
}
@@ -144,31 +138,32 @@
void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
-#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
- ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
- ASSERT(!in_cycle_);
+ // We save in a register the source of that move and we remember its
+ // destination. Then we mark this move as resolved so the cycle is
+ // broken and we can perform the other moves.
+ DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ DCHECK(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
+ need_to_restore_root_ = true;
__ mov(kSavedValueRegister, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
+ need_to_restore_root_ = true;
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
@@ -183,10 +178,9 @@
void LGapResolver::RestoreValue() {
- ASSERT(in_cycle_);
- ASSERT(saved_destination_ != NULL);
+ DCHECK(in_cycle_);
+ DCHECK(saved_destination_ != NULL);
- // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
} else if (saved_destination_->IsStackSlot()) {
@@ -216,31 +210,25 @@
if (destination->IsRegister()) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
- ASSERT(destination->IsStackSlot());
+ DCHECK(destination->IsStackSlot());
__ str(source_register, cgen_->ToMemOperand(destination));
}
-
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ ldr(cgen_->ToRegister(destination), source_operand);
} else {
- ASSERT(destination->IsStackSlot());
+ DCHECK(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- if (!destination_operand.OffsetIsUint12Encodable()) {
- // ip is overwritten while saving the value to the destination.
- // Therefore we can't use ip. It is OK if the read from the source
- // destroys ip, since that happens before the value is read.
- __ vldr(kScratchDoubleReg.low(), source_operand);
- __ vstr(kScratchDoubleReg.low(), destination_operand);
- } else {
- __ ldr(ip, source_operand);
- __ str(ip, destination_operand);
- }
+ if (!destination_operand.OffsetIsUint12Encodable()) {
+ // ip is overwritten while saving the value to the destination.
+ // Therefore we can't use ip. It is OK if the read from the source
+ // destroys ip, since that happens before the value is read.
+ __ vldr(kScratchDoubleReg.low(), source_operand);
+ __ vstr(kScratchDoubleReg.low(), destination_operand);
} else {
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
+ __ ldr(ip, source_operand);
+ __ str(ip, destination_operand);
}
}
@@ -248,30 +236,38 @@
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
+ __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ __ Move(dst, cgen_->ToHandle(constant_source));
}
+ } else if (destination->IsDoubleRegister()) {
+ DwVfpRegister result = cgen_->ToDoubleRegister(destination);
+ double v = cgen_->ToDouble(constant_source);
+ __ Vmov(result, v, ip);
} else {
- ASSERT(destination->IsStackSlot());
- ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ DCHECK(destination->IsStackSlot());
+ DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
+ need_to_restore_root_ = true;
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ mov(kSavedValueRegister,
- Operand(cgen_->ToInteger32(constant_source)));
+ Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(kSavedValueRegister,
- cgen_->ToHandle(constant_source));
+ __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
}
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
- ASSERT(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsDoubleStackSlot());
__ vstr(source_register, cgen_->ToMemOperand(destination));
}
@@ -280,19 +276,14 @@
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {
- ASSERT(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
- // kSavedDoubleValueRegister was used to break the cycle,
- // but kSavedValueRegister is free.
- MemOperand source_high_operand =
- cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
- __ ldr(kSavedValueRegister, source_high_operand);
- __ str(kSavedValueRegister, destination_high_operand);
+ // kScratchDoubleReg was used to break the cycle.
+ __ vstm(db_w, sp, kScratchDoubleReg, kScratchDoubleReg);
+ __ vldr(kScratchDoubleReg, source_operand);
+ __ vstr(kScratchDoubleReg, destination_operand);
+ __ vldm(ia_w, sp, kScratchDoubleReg, kScratchDoubleReg);
} else {
__ vldr(kScratchDoubleReg, source_operand);
__ vstr(kScratchDoubleReg, destination_operand);
diff --git a/src/arm/lithium-gap-resolver-arm.h b/src/arm/lithium-gap-resolver-arm.h
index 9dd09c8..9d7d843 100644
--- a/src/arm/lithium-gap-resolver-arm.h
+++ b/src/arm/lithium-gap-resolver-arm.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium.h"
+#include "src/lithium.h"
namespace v8 {
namespace internal {
@@ -38,7 +15,7 @@
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
@@ -76,6 +53,10 @@
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
+
+ // We use the root register as a scratch in a few places. When that happens,
+ // this flag is set to indicate that it needs to be restored.
+ bool need_to_restore_root_;
};
} } // namespace v8::internal
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 857c2bf..c845a3d 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1,40 +1,21 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "runtime.h"
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
@@ -42,7 +23,6 @@
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -51,68 +31,36 @@
}
-// We always generate arm code, never thumb code, even if V8 is compiled to
-// thumb, so we require inter-working support
-#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
-#error "flag -mthumb-interwork missing"
-#endif
-
-
-// We do not support thumb inter-working with an arm architecture not supporting
-// the blx instruction (below v5t). If you know what CPU you are compiling for
-// you can use -march=armv7 or similar.
-#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
-# error "For thumb inter-working we require an architecture which supports blx"
-#endif
-
-
-// Using bx does not yield better code, so use it only when required
-#if defined(USE_THUMB_INTERWORK)
-#define USE_BX 1
-#endif
-
-
void MacroAssembler::Jump(Register target, Condition cond) {
-#if USE_BX
bx(target, cond);
-#else
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
}
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
-#if USE_BX
- mov(ip, Operand(target, rmode));
- bx(ip, cond);
-#else
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
mov(pc, Operand(target, rmode), LeaveCC, cond);
-#endif
}
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
}
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
+ AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
int MacroAssembler::CallSize(Register target, Condition cond) {
-#if USE_BLX
return kInstrSize;
-#else
- return 2 * kInstrSize;
-#endif
}
@@ -121,40 +69,67 @@
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
-#if USE_BLX
blx(target, cond);
-#else
- // set lr for return at current pc + 8
- mov(lr, Operand(pc), LeaveCC, cond);
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
- ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
+ DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(
Address target, RelocInfo::Mode rmode, Condition cond) {
- int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
- intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
- size += kInstrSize;
- }
- return size;
+ Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
+ return kInstrSize +
+ mov_operand.instructions_required(this, mov_instr) * kInstrSize;
+}
+
+
+int MacroAssembler::CallStubSize(
+ CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
+ return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+}
+
+
+int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
+ Address target,
+ RelocInfo::Mode rmode,
+ Condition cond) {
+ Instr mov_instr = cond | MOV | LeaveCC;
+ Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
+ return kInstrSize +
+ mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
}
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
- Condition cond) {
+ Condition cond,
+ TargetAddressStorageMode mode) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
-#if USE_BLX
- // On ARMv5 and after the recommended call sequence is:
- // ldr ip, [pc, #...]
- // blx ip
+
+ bool old_predictable_code_size = predictable_code_size();
+ if (mode == NEVER_INLINE_TARGET_ADDRESS) {
+ set_predictable_code_size(true);
+ }
+
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallSize(target, rmode, cond);
+#endif
+
+ // Call sequence on V7 or later may be :
+ // movw ip, #... @ call address low 16
+ // movt ip, #... @ call address high 16
+ // blx ip
+ // @ return address
+ // Or for pre-V7 or values that may be back-patched
+ // to avoid ICache flushes:
+ // ldr ip, [pc, #...] @ call address
+ // blx ip
+ // @ return address
// Statement positions are expected to be recorded when the target
// address is loaded. The mov method will automatically record
@@ -165,50 +140,42 @@
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
- ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
-#else
- // Set lr for return at current pc + 8.
- mov(lr, Operand(pc), LeaveCC, cond);
- // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
- ASSERT(kCallTargetAddressOffset == kInstrSize);
-#endif
- ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
+ DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+ if (mode == NEVER_INLINE_TARGET_ADDRESS) {
+ set_predictable_code_size(old_predictable_code_size);
+ }
}
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond) {
+ AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
- Condition cond) {
+ TypeFeedbackId ast_id,
+ Condition cond,
+ TargetAddressStorageMode mode) {
Label start;
bind(&start);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
// 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<Address>(code.location()), rmode, cond);
- ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
- SizeOfCodeGeneratedSince(&start));
+ AllowDeferredHandleDereference embedding_raw_address;
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
void MacroAssembler::Ret(Condition cond) {
-#if USE_BX
bx(lr, cond);
-#else
- mov(pc, Operand(lr), LeaveCC, cond);
-#endif
}
@@ -253,7 +220,19 @@
void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, Operand(value));
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ mov(dst, Operand(value));
+ } else {
+ DCHECK(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ mov(dst, Operand(cell));
+ ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ mov(dst, Operand(value));
+ }
+ }
}
@@ -264,29 +243,38 @@
}
-void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
- CpuFeatures::Scope scope(VFP3);
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
if (!dst.is(src)) {
vmov(dst, src);
}
}
+void MacroAssembler::Mls(Register dst, Register src1, Register src2,
+ Register srcA, Condition cond) {
+ if (CpuFeatures::IsSupported(MLS)) {
+ CpuFeatureScope scope(this, MLS);
+ mls(dst, src1, src2, srcA, cond);
+ } else {
+ DCHECK(!srcA.is(ip));
+ mul(ip, src1, src2, LeaveCC, cond);
+ sub(dst, srcA, ip, LeaveCC, cond);
+ }
+}
+
+
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
- !src2.must_use_constant_pool() &&
+ !src2.must_output_reloc_info(this) &&
src2.immediate() == 0) {
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
-
- } else if (!src2.is_single_instruction() &&
- !src2.must_use_constant_pool() &&
+ mov(dst, Operand::Zero(), LeaveCC, cond);
+ } else if (!(src2.instructions_required(this) == 1) &&
+ !src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) &&
- IsPowerOf2(src2.immediate() + 1)) {
+ base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
-
} else {
and_(dst, src1, src2, LeaveCC, cond);
}
@@ -295,8 +283,8 @@
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ DCHECK(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
@@ -310,8 +298,8 @@
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ DCHECK(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
@@ -334,12 +322,12 @@
int lsb,
int width,
Condition cond) {
- ASSERT(0 <= lsb && lsb < 32);
- ASSERT(0 <= width && width < 32);
- ASSERT(lsb + width < 32);
- ASSERT(!scratch.is(dst));
+ DCHECK(0 <= lsb && lsb < 32);
+ DCHECK(0 <= width && width < 32);
+ DCHECK(lsb + width < 32);
+ DCHECK(!scratch.is(dst));
if (width == 0) return;
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
@@ -351,12 +339,14 @@
}
-void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
+ Condition cond) {
+ DCHECK(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- bic(dst, dst, Operand(mask));
+ bic(dst, src, Operand(mask));
} else {
+ Move(dst, src, cond);
bfc(dst, lsb, width, cond);
}
}
@@ -364,14 +354,14 @@
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7)) {
- ASSERT(!dst.is(pc) && !src.rm().is(pc));
- ASSERT((satpos >= 0) && (satpos <= 31));
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
+ DCHECK(!dst.is(pc) && !src.rm().is(pc));
+ DCHECK((satpos >= 0) && (satpos <= 31));
// These asserts are required to ensure compatibility with the ARMv7
// implementation.
- ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
- ASSERT(src.rs().is(no_reg));
+ DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
+ DCHECK(src.rs().is(no_reg));
Label done;
int satval = (1 << satpos) - 1;
@@ -384,7 +374,7 @@
}
tst(dst, Operand(~satval));
b(eq, &done);
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
+ mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
bind(&done);
} else {
@@ -393,9 +383,55 @@
}
+void MacroAssembler::Load(Register dst,
+ const MemOperand& src,
+ Representation r) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8()) {
+ ldrsb(dst, src);
+ } else if (r.IsUInteger8()) {
+ ldrb(dst, src);
+ } else if (r.IsInteger16()) {
+ ldrsh(dst, src);
+ } else if (r.IsUInteger16()) {
+ ldrh(dst, src);
+ } else {
+ ldr(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src,
+ const MemOperand& dst,
+ Representation r) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ strb(src, dst);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ strh(src, dst);
+ } else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
+ str(src, dst);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
+ if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
+ !predictable_code_size()) {
+ // The CPU supports fast immediate values, and this root will never
+ // change. We will load it as a relocatable immediate value.
+ Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
+ mov(destination, Operand(root), LeaveCC, cond);
+ return;
+ }
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@@ -407,24 +443,11 @@
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- mov(result, Operand(cell));
- ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- mov(result, Operand(object));
- }
-}
-
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
Label* branch) {
- ASSERT(cond == eq || cond == ne);
+ DCHECK(cond == eq || cond == ne);
and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
b(cond, branch);
@@ -439,7 +462,8 @@
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -451,7 +475,7 @@
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kPointerSize));
add(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
@@ -468,15 +492,86 @@
lr_status,
save_fp,
remembered_set_action,
- OMIT_SMI_CHECK);
+ OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+ mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
+ mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber 4 registers: object, map, dst, ip. The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode) {
+ if (emit_debug_code()) {
+ ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ cmp(dst, Operand(isolate()->factory()->meta_map()));
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
+ cmp(ip, map);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+
+ add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+ b(eq, &ok);
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ push(lr);
+ }
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ pop(lr);
+ }
+
+ bind(&done);
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -484,37 +579,42 @@
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
-
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ DCHECK(!object.is(value));
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
- Check(eq, "Wrong address or value passed to RecordWrite");
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- tst(value, Operand(kSmiTagMask));
- b(eq, &done);
+ JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ }
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -525,7 +625,8 @@
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
@@ -533,11 +634,16 @@
bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
+ value);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -569,12 +675,11 @@
if (and_then == kFallThroughAtEnd) {
b(eq, &done);
} else {
- ASSERT(and_then == kReturnAtEnd);
+ DCHECK(and_then == kReturnAtEnd);
Ret(eq);
}
push(lr);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(lr);
bind(&done);
@@ -584,14 +689,34 @@
}
+void MacroAssembler::PushFixedFrame(Register marker_reg) {
+ DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
+ stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
+ cp.bit() |
+ (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
+ fp.bit() |
+ lr.bit());
+}
+
+
+void MacroAssembler::PopFixedFrame(Register marker_reg) {
+ DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
+ ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
+ cp.bit() |
+ (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
+ fp.bit() |
+ lr.bit());
+}
+
+
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0:
- ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
+ DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
+ DCHECK(num_unsaved >= 0);
sub(sp, sp, Operand(num_unsaved * kPointerSize));
stm(db_w, sp, kSafepointSavedRegisters);
}
@@ -604,31 +729,6 @@
}
-void MacroAssembler::PushSafepointRegistersAndDoubles() {
- PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
- kDoubleSize));
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
- vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
- }
-}
-
-
-void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
- vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
- }
- add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
- kDoubleSize));
- PopSafepointRegisters();
-}
-
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
- Register dst) {
- str(src, SafepointRegistersAndDoublesSlot(dst));
-}
-
-
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
str(src, SafepointRegisterSlot(dst));
}
@@ -642,7 +742,7 @@
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
- ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+ DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return reg_code;
}
@@ -653,8 +753,10 @@
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ // Number of d-regs not known at snapshot time.
+ DCHECK(!serializer_enabled());
// General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
+ int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -662,18 +764,17 @@
void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
- ASSERT(src.rm().is(no_reg));
- ASSERT(!dst1.is(lr)); // r14.
- ASSERT_EQ(0, dst1.code() % 2);
- ASSERT_EQ(dst1.code() + 1, dst2.code());
+ DCHECK(src.rm().is(no_reg));
+ DCHECK(!dst1.is(lr)); // r14.
// V8 does not use this addressing mode, so the fallback code
// below doesn't support it yet.
- ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
+ DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
// Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatures::Scope scope(ARMv7);
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
+ (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
+ CpuFeatureScope scope(this, ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
if ((src.am() == Offset) || (src.am() == NegOffset)) {
@@ -687,7 +788,7 @@
ldr(dst2, src2, cond);
}
} else { // PostIndex or NegPostIndex.
- ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
+ DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
if (dst1.is(src.rn())) {
ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
ldr(dst1, src, cond);
@@ -704,18 +805,17 @@
void MacroAssembler::Strd(Register src1, Register src2,
const MemOperand& dst, Condition cond) {
- ASSERT(dst.rm().is(no_reg));
- ASSERT(!src1.is(lr)); // r14.
- ASSERT_EQ(0, src1.code() % 2);
- ASSERT_EQ(src1.code() + 1, src2.code());
+ DCHECK(dst.rm().is(no_reg));
+ DCHECK(!src1.is(lr)); // r14.
// V8 does not use this addressing mode, so the fallback code
// below doesn't support it yet.
- ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
+ DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
// Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatures::Scope scope(ARMv7);
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
+ (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
+ CpuFeatureScope scope(this, ARMv7);
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
@@ -724,7 +824,7 @@
str(src1, dst, cond);
str(src2, dst2, cond);
} else { // PostIndex or NegPostIndex.
- ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
+ DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
dst2.set_offset(dst2.offset() - 4);
str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
str(src2, dst2, cond);
@@ -733,12 +833,30 @@
}
-void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
- const Register scratch,
- const Condition cond) {
- vmrs(scratch, cond);
- bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
- vmsr(scratch, cond);
+void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
+ // If needed, restore wanted bits of FPSCR.
+ Label fpscr_done;
+ vmrs(scratch);
+ if (emit_debug_code()) {
+ Label rounding_mode_correct;
+ tst(scratch, Operand(kVFPRoundingModeMask));
+ b(eq, &rounding_mode_correct);
+ // Don't call Assert here, since Runtime_Abort could re-enter here.
+ stop("Default rounding mode not set");
+ bind(&rounding_mode_correct);
+ }
+ tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
+ b(ne, &fpscr_done);
+ orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
+ vmsr(scratch);
+ bind(&fpscr_done);
+}
+
+
+void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
+ vsub(dst, src, kDoubleRegZero, cond);
}
@@ -777,58 +895,163 @@
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ const Register scratch) {
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
+ DoubleRepresentation value_rep(imm);
// Handle special values first.
- if (value.bits == zero.bits) {
- vmov(dst, kDoubleRegZero, cond);
- } else if (value.bits == minus_zero.bits) {
- vneg(dst, kDoubleRegZero, cond);
+ if (value_rep == zero) {
+ vmov(dst, kDoubleRegZero);
+ } else if (value_rep == minus_zero) {
+ vneg(dst, kDoubleRegZero);
} else {
- vmov(dst, imm, cond);
+ vmov(dst, imm, scratch);
}
}
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
+ if (src.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
+ vmov(dst, loc.high());
+ } else {
+ vmov(dst, VmovIndexHi, src);
+ }
+}
+
+
+void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ vmov(loc.high(), src);
+ } else {
+ vmov(dst, VmovIndexHi, src);
+ }
+}
+
+
+void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
+ if (src.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
+ vmov(dst, loc.low());
+ } else {
+ vmov(dst, VmovIndexLo, src);
+ }
+}
+
+
+void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ vmov(loc.low(), src);
+ } else {
+ vmov(dst, VmovIndexLo, src);
+ }
+}
+
+
+void MacroAssembler::LoadConstantPoolPointerRegister() {
+ if (FLAG_enable_ool_constant_pool) {
+ int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
+ pc_offset() - Instruction::kPCReadOffset;
+ DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
+ ldr(pp, MemOperand(pc, constant_pool_offset));
+ }
+}
+
+
+void MacroAssembler::StubPrologue() {
+ PushFixedFrame();
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ if (FLAG_enable_ool_constant_pool) {
+ LoadConstantPoolPointerRegister();
+ set_constant_pool_available(true);
+ }
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ { PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ add(r0, pc, Operand(-8));
+ ldr(pc, MemOperand(pc, -4));
+ emit_code_stub_address(stub);
+ } else {
+ PushFixedFrame(r1);
+ nop(ip.code());
+ // Adjust FP to point to saved FP.
+ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ }
+ }
+ if (FLAG_enable_ool_constant_pool) {
+ LoadConstantPoolPointerRegister();
+ set_constant_pool_available(true);
+ }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool) {
// r0-r3: preserved
- stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ PushFixedFrame();
+ if (FLAG_enable_ool_constant_pool && load_constant_pool) {
+ LoadConstantPoolPointerRegister();
+ }
mov(ip, Operand(Smi::FromInt(type)));
push(ip);
mov(ip, Operand(CodeObject()));
push(ip);
- add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
+ // Adjust FP to point to saved FP.
+ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+int MacroAssembler::LeaveFrame(StackFrame::Type type) {
// r0: preserved
// r1: preserved
// r2: preserved
// Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer and return address.
- mov(sp, fp);
- ldm(ia_w, sp, fp.bit() | lr.bit());
+ // the caller frame pointer, return address and constant pool pointer
+ // (if FLAG_enable_ool_constant_pool).
+ int frame_ends;
+ if (FLAG_enable_ool_constant_pool) {
+ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ frame_ends = pc_offset();
+ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+ } else {
+ mov(sp, fp);
+ frame_ends = pc_offset();
+ ldm(ia_w, sp, fp.bit() | lr.bit());
+ }
+ return frame_ends;
}
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Set up the frame structure on the stack.
- ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
- ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
- ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
Push(lr, fp);
mov(fp, Operand(sp)); // Set up new frame pointer.
// Reserve room for saved entry sp and code object.
- sub(sp, sp, Operand(2 * kPointerSize));
+ sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
if (emit_debug_code()) {
- mov(ip, Operand(0));
+ mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
+ if (FLAG_enable_ool_constant_pool) {
+ str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ }
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@@ -840,13 +1063,12 @@
// Optionally save all double registers.
if (save_doubles) {
- DwVfpRegister first = d0;
- DwVfpRegister last =
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
- vstm(db_w, sp, first, last);
+ SaveFPRegs(sp, ip);
// Note that d0 will be accessible at
- // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
+ // fp - ExitFrameConstants::kFrameSize -
+ // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
+ // since the sp slot, code slot and constant pool slot (if
+ // FLAG_enable_ool_constant_pool) were pushed after the fp.
}
// Reserve place for the return address and stack space and align the frame
@@ -854,7 +1076,7 @@
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
if (frame_alignment > 0) {
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
}
@@ -870,7 +1092,7 @@
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2) {
- mov(scratch1, Operand(length, LSL, kSmiTagSize));
+ SmiTag(scratch1, length);
LoadRoot(scratch2, map_index);
str(scratch1, FieldMemOperand(string, String::kLengthOffset));
mov(scratch1, Operand(String::kEmptyHashField));
@@ -880,48 +1102,55 @@
int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_ARM)
+#if V8_HOST_ARCH_ARM
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
+ return base::OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_ARM
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_ARM)
+#endif // V8_HOST_ARCH_ARM
}
void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
+ Register argument_count,
+ bool restore_context) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
- const int offset = 2 * kPointerSize;
- sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
- DwVfpRegister first = d0;
- DwVfpRegister last =
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
- vldm(ia, r3, first, last);
+ const int offset = ExitFrameConstants::kFrameSize;
+ sub(r3, fp,
+ Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
+ RestoreFPRegs(r3, ip);
}
// Clear top frame.
- mov(r3, Operand(0, RelocInfo::NONE));
+ mov(r3, Operand::Zero());
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ldr(cp, MemOperand(ip));
+ if (restore_context) {
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ ldr(cp, MemOperand(ip));
+ }
#ifdef DEBUG
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
str(r3, MemOperand(ip));
#endif
// Tear down the exit frame, pop the arguments, and return.
+ if (FLAG_enable_ool_constant_pool) {
+ ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ }
mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
if (argument_count.is_valid()) {
@@ -929,7 +1158,8 @@
}
}
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+
+void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@@ -938,17 +1168,9 @@
}
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be r5 to
- // follow the calling convention which requires the call type to be
- // in r5.
- ASSERT(dst.is(r5));
- if (call_kind == CALL_AS_FUNCTION) {
- mov(dst, Operand(Smi::FromInt(1)));
- } else {
- mov(dst, Operand(Smi::FromInt(0)));
- }
+// On ARM this is just a synonym to make the purpose clear.
+void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
+ MovFromFloatResult(dst);
}
@@ -959,8 +1181,7 @@
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label regular_invoke;
@@ -970,17 +1191,16 @@
// r0: actual arguments count
// r1: function (passed through to callee)
// r2: expected arguments count
- // r3: callee code entry
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(r0));
- ASSERT(expected.is_immediate() || expected.reg().is(r2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
+ DCHECK(actual.is_immediate() || actual.reg().is(r0));
+ DCHECK(expected.is_immediate() || expected.reg().is(r2));
+ DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
+ DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
@@ -1018,14 +1238,12 @@
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(r5, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
b(done);
}
} else {
- SetCallKind(r5, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(®ular_invoke);
@@ -1037,25 +1255,22 @@
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(r5, call_kind);
Call(code);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, call_kind);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(code);
}
@@ -1066,46 +1281,15 @@
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(r5, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(r5, call_kind);
- Jump(code, rmode);
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r1.
- ASSERT(fun.is(r1));
+ DCHECK(fun.is(r1));
Register expected_reg = r2;
Register code_reg = r3;
@@ -1115,33 +1299,44 @@
ldr(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
+ SmiUntag(expected_reg);
ldr(code_reg,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in r1.
+ DCHECK(function.is(r1));
// Get the function and setup the context.
- LoadHeapObject(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(r3, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ Move(r1, function);
+ InvokeFunction(r1, expected, actual, flag, call_wrapper);
}
@@ -1168,7 +1363,7 @@
void MacroAssembler::IsObjectJSStringType(Register object,
Register scratch,
Label* fail) {
- ASSERT(kNotStringTag != 0);
+ DCHECK(kNotStringTag != 0);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -1177,15 +1372,23 @@
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail) {
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ cmp(scratch, Operand(LAST_NAME_TYPE));
+ b(hi, fail);
+}
+
+
void MacroAssembler::DebugBreak() {
- mov(r0, Operand(0, RelocInfo::NONE));
+ mov(r0, Operand::Zero());
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
+ CEntryStub ces(isolate(), 1);
+ DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
@@ -1198,7 +1401,7 @@
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
- // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
+ // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
// We will build up the handler from the bottom by pushing on the stack.
// Set up the code object (r5) and the state (r6) for pushing.
unsigned state =
@@ -1209,9 +1412,9 @@
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
- mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+ mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
+ mov(ip, Operand::Zero()); // NULL frame pointer.
+ stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
} else {
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
}
@@ -1238,12 +1441,17 @@
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r0 = exception, r1 = code object, r2 = state.
+
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ if (FLAG_enable_ool_constant_pool) {
+ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
+ }
ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
+ add(pc, r1, Operand::SmiUntag(r2)); // Jump
}
@@ -1326,61 +1534,60 @@
Label* miss) {
Label same_contexts;
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!holder_reg.is(ip));
- ASSERT(!scratch.is(ip));
+ DCHECK(!holder_reg.is(scratch));
+ DCHECK(!holder_reg.is(ip));
+ DCHECK(!scratch.is(ip));
// Load current lexical context from the stack frame.
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
- cmp(scratch, Operand(0, RelocInfo::NONE));
- Check(ne, "we should not have an empty lexical context");
+ cmp(scratch, Operand::Zero());
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
#endif
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
ldr(scratch, FieldMemOperand(scratch, offset));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the global_context_map.
+ // Read the first word and compare to the native_context_map.
ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::global_context should be a global context.");
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
pop(holder_reg); // Restore holder.
}
// Check if both contexts are the same.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
cmp(scratch, Operand(ip));
b(eq, &same_contexts);
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, ip); // Move ip to its holding place.
LoadRoot(ip, Heap::kNullValueRootIndex);
cmp(holder_reg, ip);
- Check(ne, "JSGlobalProxy::context() should not be null.");
+ Check(ne, kJSGlobalProxyContextShouldNotBeNull);
ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::global_context should be a global context.");
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
// Restore ip is not needed. ip is reloaded below.
pop(holder_reg); // Restore holder.
// Restore ip to holder's context.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
}
// Check that the security token in the calling global object is
@@ -1398,6 +1605,9 @@
}
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
+// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -1460,12 +1670,11 @@
// Compute the capacity mask.
ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
+ SmiUntag(t1);
sub(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use t2 for index calculations and keep the hash intact in t0.
mov(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -1475,14 +1684,14 @@
and_(t2, t2, Operand(t1));
// Scale the index by multiplying by the element size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
// Check if the key is identical to the name.
add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
cmp(key, Operand(ip));
- if (i != kProbes - 1) {
+ if (i != kNumberDictionaryProbes - 1) {
b(eq, &done);
} else {
b(ne, miss);
@@ -1505,12 +1714,13 @@
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1522,38 +1732,37 @@
return;
}
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!scratch1.is(ip));
- ASSERT(!scratch2.is(ip));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!scratch1.is(scratch2));
+ DCHECK(!scratch1.is(ip));
+ DCHECK(!scratch2.is(ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
- ASSERT_EQ(0, object_size & kObjectAlignmentMask);
+ DCHECK_EQ(0, object_size & kObjectAlignmentMask);
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDM.
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
- ASSERT(result.code() < ip.code());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- // Set up allocation top address and object size registers.
+ intptr_t top =
+ reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit =
+ reinterpret_cast<intptr_t>(allocation_limit.address());
+ DCHECK((limit - top) == kPointerSize);
+ DCHECK(result.code() < ip.code());
+
+ // Set up allocation top address register.
Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
- mov(topaddr, Operand(new_space_allocation_top));
- mov(obj_size_reg, Operand(object_size));
+ mov(topaddr, Operand(allocation_top));
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
@@ -1567,15 +1776,50 @@
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
- Check(eq, "Unexpected allocation top");
+ Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
ldr(ip, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand(ip));
+ b(hs, gc_required);
+ }
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- add(scratch2, result, Operand(obj_size_reg), SetCC);
+ // to calculate the new top. We must preserve the ip register at this
+ // point, so we cannot just use add().
+ DCHECK(object_size > 0);
+ Register source = result;
+ Condition cond = al;
+ int shift = 0;
+ while (object_size != 0) {
+ if (((object_size >> shift) & 0x03) == 0) {
+ shift += 2;
+ } else {
+ int bits = object_size & (0xff << shift);
+ object_size -= bits;
+ shift += 8;
+ Operand bits_operand(bits);
+ DCHECK(bits_operand.instructions_required(this) == 1);
+ add(scratch2, source, bits_operand, SetCC, cond);
+ source = scratch2;
+ cond = cc;
+ }
+ }
b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
@@ -1588,12 +1832,12 @@
}
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1607,32 +1851,32 @@
// Assert that the register arguments are different and that none of
// them are ip. ip is used explicitly in the code generated below.
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!object_size.is(ip));
- ASSERT(!result.is(ip));
- ASSERT(!scratch1.is(ip));
- ASSERT(!scratch2.is(ip));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!scratch1.is(scratch2));
+ DCHECK(!object_size.is(ip));
+ DCHECK(!result.is(ip));
+ DCHECK(!scratch1.is(ip));
+ DCHECK(!scratch2.is(ip));
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDM.
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
- ASSERT(result.code() < ip.code());
+ reinterpret_cast<intptr_t>(allocation_limit.address());
+ DCHECK((limit - top) == kPointerSize);
+ DCHECK(result.code() < ip.code());
// Set up allocation top address.
Register topaddr = scratch1;
- mov(topaddr, Operand(new_space_allocation_top));
+ mov(topaddr, Operand(allocation_top));
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
@@ -1646,12 +1890,29 @@
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
- Check(eq, "Unexpected allocation top");
+ Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
ldr(ip, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand(ip));
+ b(hs, gc_required);
+ }
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
@@ -1667,7 +1928,7 @@
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
tst(scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space");
+ Check(eq, kUnalignedAllocationInNewSpace);
}
str(scratch2, MemOperand(topaddr));
@@ -1690,7 +1951,7 @@
mov(scratch, Operand(new_space_allocation_top));
ldr(scratch, MemOperand(scratch));
cmp(object, scratch);
- Check(lt, "Undo allocation of non allocated memory");
+ Check(lt, kUndoAllocationOfNonAllocatedMemory);
#endif
// Write the address of the object to un-allocate as the current top.
mov(scratch, Operand(new_space_allocation_top));
@@ -1706,19 +1967,19 @@
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
add(scratch1, scratch1,
Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -1729,34 +1990,29 @@
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kCharSize == 1);
+ DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ DCHECK(kCharSize == 1);
add(scratch1, length,
- Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
+ Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
- // Allocate ASCII string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ // Allocate one-byte string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -1765,12 +2021,8 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -1780,23 +2032,19 @@
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -1805,12 +2053,8 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -1820,23 +2064,16 @@
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -1844,14 +2081,36 @@
Register map,
Register type_reg,
InstanceType type) {
+ const Register temp = type_reg.is(no_reg) ? ip : type_reg;
+
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(map, type_reg, type);
+ CompareInstanceType(map, temp, type);
+}
+
+
+void MacroAssembler::CheckObjectTypeRange(Register object,
+ Register map,
+ InstanceType min_type,
+ InstanceType max_type,
+ Label* false_label) {
+ STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+ STATIC_ASSERT(LAST_TYPE < 256);
+ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ sub(ip, ip, Operand(min_type));
+ cmp(ip, Operand(max_type - min_type));
+ b(hi, false_label);
}
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
+ // Registers map and type_reg can be ip. These two lines assert
+ // that ip can be used with the two instructions (the constants
+ // will never need ip).
+ STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+ STATIC_ASSERT(LAST_TYPE < 256);
ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
}
@@ -1859,7 +2118,7 @@
void MacroAssembler::CompareRoot(Register obj,
Heap::RootListIndex index) {
- ASSERT(!obj.is(ip));
+ DCHECK(!obj.is(ip));
LoadRoot(ip, index);
cmp(obj, ip);
}
@@ -1868,10 +2127,12 @@
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
@@ -1879,38 +2140,38 @@
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(ls, fail);
- cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(hi, fail);
}
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register receiver_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail) {
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
- Register mantissa_reg = scratch2;
- Register exponent_reg = scratch3;
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ LowDwVfpRegister double_scratch,
+ Label* fail,
+ int elements_offset) {
+ Label smi_value, store;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
@@ -1922,99 +2183,40 @@
fail,
DONT_DO_SMI_CHECK);
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
- ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- cmp(exponent_reg, scratch1);
- b(ge, &maybe_nan);
-
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- add(scratch1, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- str(exponent_reg, FieldMemOperand(scratch1, offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- b(gt, &is_nan);
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- cmp(mantissa_reg, Operand(0));
- b(eq, &have_double_value);
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- jmp(&have_double_value);
+ vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ // Force a canonical NaN.
+ if (emit_debug_code()) {
+ vmrs(ip);
+ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
+ Assert(ne, kDefaultNaNModeNotSet);
+ }
+ VFPCanonicalizeNaN(double_scratch);
+ b(&store);
bind(&smi_value);
- add(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- add(scratch1, scratch1,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- // scratch1 is now effective address of the double element
+ SmiToDouble(double_scratch, value_reg);
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = receiver_reg;
- SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- d0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- s2);
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- vstr(d0, scratch1, 0);
- } else {
- str(mantissa_reg, MemOperand(scratch1, 0));
- str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
- bind(&done);
+ bind(&store);
+ add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
+ vstr(double_scratch,
+ FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
void MacroAssembler::CompareMap(Register obj,
Register scratch,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+ Label* early_success) {
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- cmp(scratch, Operand(map));
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- Map* transitioned_fast_element_map(
- map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
- ASSERT(transitioned_fast_element_map == NULL ||
- map->elements_kind() != FAST_ELEMENTS);
- if (transitioned_fast_element_map != NULL) {
- b(eq, early_success);
- cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
- }
+ CompareMap(scratch, map, early_success);
+}
- Map* transitioned_double_map(
- map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
- ASSERT(transitioned_double_map == NULL ||
- map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
- if (transitioned_double_map != NULL) {
- b(eq, early_success);
- cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
- }
- }
+
+void MacroAssembler::CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success) {
+ cmp(obj_map, Operand(map));
}
@@ -2022,14 +2224,13 @@
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMap(obj, scratch, map, &success, mode);
+ CompareMap(obj, scratch, map, &success);
b(ne, fail);
bind(&success);
}
@@ -2072,14 +2273,15 @@
Register scratch,
Label* miss,
bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- b(ne, miss);
-
+ Label non_instance;
if (miss_on_bound_function) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
+ b(ne, miss);
+
ldr(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
ldr(scratch,
@@ -2087,13 +2289,12 @@
tst(scratch,
Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
b(ne, miss);
- }
- // Make sure that the function has an instance prototype.
- Label non_instance;
- ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- b(ne, &non_instance);
+ // Make sure that the function has an instance prototype.
+ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ b(ne, &non_instance);
+ }
// Get the prototype or initial map from the function.
ldr(result,
@@ -2113,26 +2314,30 @@
// Get the prototype from the initial map.
ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- jmp(&done);
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ if (miss_on_bound_function) {
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ }
// All done.
bind(&done);
}
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
+void MacroAssembler::CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id,
+ Condition cond) {
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
@@ -2142,53 +2347,91 @@
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
+ DCHECK(function_address.is(r1) || function_address.is(r2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ mov(r9, Operand(ExternalReference::is_profiling_address(isolate())));
+ ldrb(r9, MemOperand(r9, 0));
+ cmp(r9, Operand(0));
+ b(eq, &profiler_disabled);
+
+ // Additional parameter is the address of the actual callback.
+ mov(r3, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ Move(r3, function_address);
+ bind(&end_profiler_check);
+
// Allocate HandleScope in callee-save registers.
- mov(r7, Operand(next_address));
- ldr(r4, MemOperand(r7, kNextOffset));
- ldr(r5, MemOperand(r7, kLimitOffset));
- ldr(r6, MemOperand(r7, kLevelOffset));
+ mov(r9, Operand(next_address));
+ ldr(r4, MemOperand(r9, kNextOffset));
+ ldr(r5, MemOperand(r9, kLimitOffset));
+ ldr(r6, MemOperand(r9, kLevelOffset));
add(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, r0);
+ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
- stub.GenerateCall(this, function);
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(this, r3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, r0);
+ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
+ Label return_value_loaded;
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- cmp(r0, Operand(0));
- LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- ldr(r0, MemOperand(r0), ne);
-
+ // load value from ReturnValue
+ ldr(r0, return_value_operand);
+ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- str(r4, MemOperand(r7, kNextOffset));
+ str(r4, MemOperand(r9, kNextOffset));
if (emit_debug_code()) {
- ldr(r1, MemOperand(r7, kLevelOffset));
+ ldr(r1, MemOperand(r9, kLevelOffset));
cmp(r1, r6);
- Check(eq, "Unexpected level after return from api call");
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
}
sub(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
- ldr(ip, MemOperand(r7, kLimitOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
+ ldr(ip, MemOperand(r9, kLimitOffset));
cmp(r5, ip);
b(ne, &delete_allocated_handles);
@@ -2199,24 +2442,32 @@
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
+ bind(&exception_handled);
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ ldr(cp, *context_restore_operand);
+ }
// LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
- LeaveExitFrame(false, r4);
+ LeaveExitFrame(false, r4, !restore_context);
mov(pc, lr);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- str(r5, MemOperand(r7, kLimitOffset));
+ str(r5, MemOperand(r9, kLimitOffset));
mov(r4, r0);
PrepareCallCFunction(1, r5);
- mov(r0, Operand(ExternalReference::isolate_address()));
+ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
CallCFunction(
ExternalReference::delete_handle_scope_extensions(isolate()), 1);
mov(r0, r4);
@@ -2225,16 +2476,7 @@
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- add(sp, sp, Operand(num_arguments * kPointerSize));
- }
- LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2243,333 +2485,162 @@
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- mov(index, Operand(hash, LSL, kSmiTagSize));
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
}
-void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg) {
- // ARMv7 VFP3 instructions to implement integer to double conversion.
- mov(r7, Operand(inReg, ASR, kSmiTagSize));
- vmov(s15, r7);
- vcvt_f64_s32(d7, s15);
- vmov(outLowReg, outHighReg, d7);
-}
-
-
-void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
- DwVfpRegister result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags) {
- Label done;
- if ((flags & OBJECT_NOT_SMI) == 0) {
- Label not_smi;
- JumpIfNotSmi(object, ¬_smi);
- // Remove smi tag and convert to double.
- mov(scratch1, Operand(object, ASR, kSmiTagSize));
- vmov(scratch3, scratch1);
- vcvt_f64_s32(result, scratch3);
- b(&done);
- bind(¬_smi);
- }
- // Check for heap number and load double value from it.
- ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
- sub(scratch2, object, Operand(kHeapObjectTag));
- cmp(scratch1, heap_number_map);
- b(ne, not_number);
- if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
- // If exponent is all ones the number is either a NaN or +/-Infinity.
- ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- Sbfx(scratch1,
- scratch1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // All-one value sign extend to -1.
- cmp(scratch1, Operand(-1));
- b(eq, not_number);
- }
- vldr(result, scratch2, HeapNumber::kValueOffset);
- bind(&done);
-}
-
-
-void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2) {
- mov(scratch1, Operand(smi, ASR, kSmiTagSize));
- vmov(scratch2, scratch1);
- vcvt_f64_s32(value, scratch2);
-}
-
-
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32) {
+void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- sub(scratch, source, Operand(kHeapObjectTag));
- vldr(double_scratch, scratch, HeapNumber::kValueOffset);
- vcvt_s32_f64(double_scratch.low(), double_scratch);
- vmov(dest, double_scratch.low());
- // Signed vcvt instruction will saturate to the minimum (0x80000000) or
- // maximun (0x7fffffff) signed 32bits integer when the double is out of
- // range. When substracting one, the minimum signed integer becomes the
- // maximun signed integer.
- sub(scratch, dest, Operand(1));
- cmp(scratch, Operand(LONG_MAX - 1));
- // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
- b(ge, not_int32);
+ vmov(value.low(), smi);
+ vcvt_f64_s32(value, 1);
} else {
- // This code is faster for doubles that are in the ranges -0x7fffffff to
- // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
- // the range of signed int32 values that are not Smis. Jumps to the label
- // 'not_int32' if the double isn't in the range -0x80000000.0 to
- // 0x80000000.0 (excluding the endpoints).
- Label right_exponent, done;
- // Get exponent word.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, Operand(0, RelocInfo::NONE));
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
- // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
- // split it up to avoid a constant pool entry. You can't do that in general
- // for cmp because of the overflow flag, but we know the exponent is in the
- // range 0-2047 so there is no overflow.
- int fudge_factor = 0x400;
- sub(scratch2, scratch2, Operand(fudge_factor));
- cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- b(eq, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- b(gt, not_int32);
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
- sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
- // Dest already has a Smi zero.
- b(lt, &done);
-
- // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
- // get how much to shift down.
- rsb(dest, scratch2, Operand(30));
-
- bind(&right_exponent);
- // Get the top bits of the mantissa.
- and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- bind(&done);
+ SmiUntag(ip, smi);
+ vmov(value.low(), ip);
+ vcvt_f64_s32(value, value.low());
}
}
-void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
- DwVfpRegister double_input,
- Register scratch1,
- Register scratch2,
- CheckForInexactConversion check_inexact) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
- CpuFeatures::Scope scope(VFP3);
- Register prev_fpscr = scratch1;
- Register scratch = scratch2;
-
- int32_t check_inexact_conversion =
- (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
-
- // Set custom FPCSR:
- // - Set rounding mode.
- // - Clear vfp cumulative exception flags.
- // - Make sure Flush-to-zero mode control bit is unset.
- vmrs(prev_fpscr);
- bic(scratch,
- prev_fpscr,
- Operand(kVFPExceptionMask |
- check_inexact_conversion |
- kVFPRoundingModeMask |
- kVFPFlushToZeroMask));
- // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
- if (rounding_mode != kRoundToNearest) {
- orr(scratch, scratch, Operand(rounding_mode));
- }
- vmsr(scratch);
-
- // Convert the argument to an integer.
- vcvt_s32_f64(result,
- double_input,
- (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
- : kFPSCRRounding);
-
- // Retrieve FPSCR.
- vmrs(scratch);
- // Restore FPSCR.
- vmsr(prev_fpscr);
- // Check for vfp exceptions.
- tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
+void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
+ LowDwVfpRegister double_scratch) {
+ DCHECK(!double_input.is(double_scratch));
+ vcvt_s32_f64(double_scratch.low(), double_input);
+ vcvt_f64_s32(double_scratch, double_scratch.low());
+ VFPCompareAndSetFlags(double_input, double_scratch);
}
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
+void MacroAssembler::TryDoubleToInt32Exact(Register result,
+ DwVfpRegister double_input,
+ LowDwVfpRegister double_scratch) {
+ DCHECK(!double_input.is(double_scratch));
+ vcvt_s32_f64(double_scratch.low(), double_input);
+ vmov(result, double_scratch.low());
+ vcvt_f64_s32(double_scratch, double_scratch.low());
+ VFPCompareAndSetFlags(double_input, double_scratch);
+}
- // Extract the biased exponent in result.
- Ubfx(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // Check for Infinity and NaNs, which should return 0.
- cmp(result, Operand(HeapNumber::kExponentMask));
- mov(result, Operand(0), LeaveCC, eq);
- b(eq, &done);
+void MacroAssembler::TryInt32Floor(Register result,
+ DwVfpRegister double_input,
+ Register input_high,
+ LowDwVfpRegister double_scratch,
+ Label* done,
+ Label* exact) {
+ DCHECK(!result.is(input_high));
+ DCHECK(!double_input.is(double_scratch));
+ Label negative, exception;
- // Express exponent as delta to (number of mantissa bits + 31).
- sub(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
- SetCC);
+ VmovHigh(input_high, double_input);
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- b(le, &normal_exponent);
- mov(result, Operand(0));
- b(&done);
+ // Test for NaN and infinities.
+ Sbfx(result, input_high,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ cmp(result, Operand(-1));
+ b(eq, &exception);
+ // Test for values that can be exactly represented as a
+ // signed 32-bit integer.
+ TryDoubleToInt32Exact(result, double_input, double_scratch);
+ // If exact, return (result already fetched).
+ b(eq, exact);
+ cmp(input_high, Operand::Zero());
+ b(mi, &negative);
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
+ // Input is in ]+0, +inf[.
+ // If result equals 0x7fffffff input was out of range or
+ // in ]0x7fffffff, 0x80000000[. We ignore this last case which
+ // could fits into an int32, that means we always think input was
+ // out of range and always go to exception.
+ // If result < 0x7fffffff, go to done, result fetched.
+ cmn(result, Operand(1));
+ b(mi, &exception);
+ b(done);
- // Save the sign.
- Register sign = result;
- result = no_reg;
- and_(sign, input_high, Operand(HeapNumber::kSignMask));
+ // Input is in ]-inf, -0[.
+ // If x is a non integer negative number,
+ // floor(x) <=> round_to_zero(x) - 1.
+ bind(&negative);
+ sub(result, result, Operand(1), SetCC);
+ // If result is still negative, go to done, result fetched.
+ // Else, we had an overflow and we fall through exception.
+ b(mi, done);
+ bind(&exception);
+}
- // Set the implicit 1 before the mantissa part in input_high.
- orr(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- mov(input_high, Operand(input_high, LSL, scratch));
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DwVfpRegister double_input,
+ Label* done) {
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
+ vcvt_s32_f64(double_scratch.low(), double_input);
+ vmov(result, double_scratch.low());
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- rsb(scratch, scratch, Operand(32), SetCC);
- b(&pos_shift, ge);
+ // If result is not saturated (0x7fffffff or 0x80000000), we are done.
+ sub(ip, result, Operand(1));
+ cmp(ip, Operand(0x7ffffffe));
+ b(lt, done);
+}
- // Negate scratch.
- rsb(scratch, scratch, Operand(0));
- mov(input_low, Operand(input_low, LSL, scratch));
- b(&shift_done);
- bind(&pos_shift);
- mov(input_low, Operand(input_low, LSR, scratch));
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DwVfpRegister double_input) {
+ Label done;
- bind(&shift_done);
- orr(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- cmp(sign, Operand(0));
- result = sign;
- sign = no_reg;
- rsb(result, input_high, Operand(0), LeaveCC, ne);
- mov(result, input_high, LeaveCC, eq);
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(lr);
+ sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ vstr(double_input, MemOperand(sp, 0));
+
+ DoubleToIStub stub(isolate(), sp, result, 0, true, true);
+ CallStub(&stub);
+
+ add(sp, sp, Operand(kDoubleSize));
+ pop(lr);
+
bind(&done);
}
-void MacroAssembler::EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- SwVfpRegister single_scratch,
- Register scratch,
- Register input_high,
- Register input_low) {
- CpuFeatures::Scope scope(VFP3);
- ASSERT(!input_high.is(result));
- ASSERT(!input_low.is(result));
- ASSERT(!input_low.is(input_high));
- ASSERT(!scratch.is(result) &&
- !scratch.is(input_high) &&
- !scratch.is(input_low));
- ASSERT(!single_scratch.is(double_input.low()) &&
- !single_scratch.is(double_input.high()));
-
+void MacroAssembler::TruncateHeapNumberToI(Register result,
+ Register object) {
Label done;
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
+ DCHECK(!result.is(object));
- // Clear cumulative exception flags.
- ClearFPSCRBits(kVFPExceptionMask, scratch);
- // Try a conversion to a signed integer.
- vcvt_s32_f64(single_scratch, double_input);
- vmov(result, single_scratch);
- // Retrieve he FPSCR.
- vmrs(scratch);
- // Check for overflow and NaNs.
- tst(scratch, Operand(kVFPOverflowExceptionBit |
- kVFPUnderflowExceptionBit |
- kVFPInvalidOpExceptionBit));
- // If we had no exceptions we are done.
- b(eq, &done);
+ vldr(double_scratch,
+ MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
- // Load the double value and perform a manual truncation.
- vmov(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(lr);
+ DoubleToIStub stub(isolate(),
+ object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true,
+ true);
+ CallStub(&stub);
+ pop(lr);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Label* not_number) {
+ Label done;
+ DCHECK(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+ TruncateHeapNumberToI(result, object);
+
bind(&done);
}
@@ -2577,10 +2648,10 @@
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
- mov(dst, Operand(src, ASR, kSmiTagSize));
+ SmiUntag(dst, src);
and_(dst, dst, Operand((1 << num_least_bits) - 1));
}
}
@@ -2594,16 +2665,14 @@
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -2611,21 +2680,7 @@
// smarter.
mov(r0, Operand(num_arguments));
mov(r1, Operand(ExternalReference(f, isolate())));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1, kSaveFPRegs);
+ CEntryStub stub(isolate(), 1, save_doubles);
CallStub(&stub);
}
@@ -2635,7 +2690,7 @@
mov(r0, Operand(num_arguments));
mov(r1, Operand(ext));
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
@@ -2664,10 +2719,10 @@
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
#if defined(__thumb__)
// Thumb mode builtin.
- ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+ DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
#endif
mov(r1, Operand(builtin));
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -2676,17 +2731,15 @@
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
- SetCallKind(r5, CALL_AS_METHOD);
Call(r2);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, CALL_AS_METHOD);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(r2);
}
}
@@ -2695,7 +2748,8 @@
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ ldr(target,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
ldr(target, FieldMemOperand(target,
@@ -2704,7 +2758,7 @@
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(r1));
+ DCHECK(!target.is(r1));
GetBuiltinFunction(r1, id);
// Load the code entry point from the builtins object.
ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
@@ -2723,7 +2777,7 @@
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
ldr(scratch1, MemOperand(scratch2));
@@ -2735,7 +2789,7 @@
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
ldr(scratch1, MemOperand(scratch2));
@@ -2745,25 +2799,15 @@
}
-void MacroAssembler::Assert(Condition cond, const char* msg) {
+void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
if (emit_debug_code())
- Check(cond, msg);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(ip, index);
- cmp(reg, ip);
- Check(eq, "Register did not match expected root");
- }
+ Check(cond, reason);
}
void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) {
- ASSERT(!elements.is(ip));
+ DCHECK(!elements.is(ip));
Label ok;
push(elements);
ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
@@ -2776,61 +2820,58 @@
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
pop(elements);
}
}
-void MacroAssembler::Check(Condition cond, const char* msg) {
+void MacroAssembler::Check(Condition cond, BailoutReason reason) {
Label L;
b(cond, &L);
- Abort(msg);
+ Abort(reason);
// will not return here
bind(&L);
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
#endif
- mov(r0, Operand(p0));
+ mov(r0, Operand(Smi::FromInt(reason)));
push(r0);
- mov(r0, Operand(Smi::FromInt(p1 - p0)));
- push(r0);
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
- static const int kExpectedAbortInstructions = 10;
+ static const int kExpectedAbortInstructions = 7;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
- ASSERT(abort_instructions <= kExpectedAbortInstructions);
+ DCHECK(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
nop();
}
@@ -2861,47 +2902,35 @@
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ ldr(scratch,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- int expected_index =
- Context::GetContextMapIndexFromElementsKind(expected_kind);
- ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ ldr(scratch,
+ MemOperand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+ size_t offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ ldr(ip, FieldMemOperand(scratch, offset));
cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
- int trans_index =
- Context::GetContextMapIndexFromElementsKind(transitioned_kind);
- ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch, Register map_out) {
- ASSERT(!function_in.is(map_out));
- Label done;
- ldr(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ ldr(map_in_out, FieldMemOperand(scratch, offset));
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
+ ldr(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
ldr(function, FieldMemOperand(function,
- GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
ldr(function, MemOperand(function, Context::SlotOffset(index)));
}
@@ -2916,7 +2945,7 @@
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
b(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
@@ -2958,7 +2987,7 @@
void MacroAssembler::UntagAndJumpIfSmi(
Register dst, Register src, Label* smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ SmiUntag(dst, src, SetCC);
b(cc, smi_case); // Shifter carry is not set for a smi.
}
@@ -2966,7 +2995,7 @@
void MacroAssembler::UntagAndJumpIfNotSmi(
Register dst, Register src, Label* non_smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ SmiUntag(dst, src, SetCC);
b(cs, non_smi_case); // Shifter carry is set for a non-smi.
}
@@ -2981,38 +3010,72 @@
}
-void MacroAssembler::AbortIfSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(ne, "Operand is a smi");
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmi);
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(eq, "Operand is not smi");
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(eq, kOperandIsNotSmi);
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(ne, "Operand is not a string");
- push(object);
- ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(lo, "Operand is not a string");
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAString);
+ push(object);
+ ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(lo, kOperandIsNotAString);
+ }
}
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAName);
+ push(object);
+ ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, LAST_NAME_TYPE);
+ pop(object);
+ Check(le, kOperandIsNotAName);
+ }
+}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- CompareRoot(src, root_value_index);
- Assert(eq, message);
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ b(eq, &done_checking);
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
+ if (emit_debug_code()) {
+ CompareRoot(reg, index);
+ Check(eq, kHeapNumberMapRegisterClobbered);
+ }
}
@@ -3021,46 +3084,131 @@
Register scratch,
Label* on_not_heap_number) {
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
cmp(scratch, heap_number_map);
b(ne, on_not_heap_number);
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
+ sub(mask, mask, Operand(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ add(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+ eor(scratch1, scratch1, Operand(scratch2));
+ and_(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch1,
+ number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ sub(scratch2, object, Operand(kHeapObjectTag));
+ vldr(d0, scratch2, HeapNumber::kValueOffset);
+ sub(probe, probe, Operand(kHeapObjectTag));
+ vldr(d1, probe, HeapNumber::kValueOffset);
+ VFPCompareAndSetFlags(d0, d1);
+ b(ne, not_found); // The cache did not contain this value.
+ b(&load_result_from_cache);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ and_(scratch, mask, Operand(object, ASR, 1));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch,
+ number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ cmp(object, probe);
+ b(ne, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- // Test that both first and second are sequential ASCII strings.
+ // Test that both first and second are sequential one-byte strings.
// Assume that they are non-smis.
ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
+ JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
// Check that neither is a smi.
- STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
+ JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+ scratch2, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ b(eq, &succeed);
+ cmp(reg, Operand(SYMBOL_TYPE));
+ b(ne, not_unique_name);
+
+ bind(&succeed);
}
@@ -3070,19 +3218,25 @@
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required) {
+ Label* gc_required,
+ TaggingMode tagging_mode,
+ MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
+ AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ if (tagging_mode == TAG_RESULT) {
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
}
@@ -3101,27 +3255,23 @@
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
Register src,
- RegList temps,
+ LowDwVfpRegister double_scratch,
int field_count) {
- // At least one bit set in the first 15 registers.
- ASSERT((temps & ((1 << 15) - 1)) != 0);
- ASSERT((temps & dst.bit()) == 0);
- ASSERT((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
-
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < 15; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.set_code(i);
- break;
- }
+ int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
+ for (int i = 0; i < double_count; i++) {
+ vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
+ vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
}
- ASSERT(!tmp.is(no_reg));
- for (int i = 0; i < field_count; i++) {
- ldr(tmp, FieldMemOperand(src, i * kPointerSize));
- str(tmp, FieldMemOperand(dst, i * kPointerSize));
+ STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
+ STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
+
+ int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
+ if (remain != 0) {
+ vldr(double_scratch.low(),
+ FieldMemOperand(src, (field_count - 1) * kPointerSize));
+ vstr(double_scratch.low(),
+ FieldMemOperand(dst, (field_count - 1) * kPointerSize));
}
}
@@ -3130,46 +3280,45 @@
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- cmp(length, Operand(0));
- b(eq, &done);
+ cmp(length, Operand(kPointerSize));
+ b(le, &byte_loop);
+
bind(&align_loop_1);
tst(src, Operand(kPointerSize - 1));
b(eq, &word_loop);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
-
+ b(&align_loop_1);
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
tst(src, Operand(kPointerSize - 1));
- Assert(eq, "Expecting alignment for CopyBytes");
+ Assert(eq, kExpectingAlignmentForCopyBytes);
}
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
-#if CAN_USE_UNALIGNED_ACCESSES
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
-#else
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
-#endif
+ if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+ str(scratch, MemOperand(dst, kPointerSize, PostIndex));
+ } else {
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ }
sub(length, length, Operand(kPointerSize));
b(&word_loop);
// Copy the last bytes if any left.
bind(&byte_loop);
- cmp(length, Operand(0));
+ cmp(length, Operand::Zero());
b(eq, &done);
bind(&byte_loop_1);
ldrb(scratch, MemOperand(src, 1, PostIndex));
@@ -3193,70 +3342,54 @@
}
-void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
- Register source, // Input.
- Register scratch) {
- ASSERT(!zeros.is(source) || !source.is(scratch));
- ASSERT(!zeros.is(scratch));
- ASSERT(!scratch.is(ip));
- ASSERT(!source.is(ip));
- ASSERT(!zeros.is(ip));
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- clz(zeros, source); // This instruction is only supported after ARM5.
-#else
- // Order of the next two lines is important: zeros register
- // can be the same as source register.
- Move(scratch, source);
- mov(zeros, Operand(0, RelocInfo::NONE));
- // Top 16.
- tst(scratch, Operand(0xffff0000));
- add(zeros, zeros, Operand(16), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
- // Top 8.
- tst(scratch, Operand(0xff000000));
- add(zeros, zeros, Operand(8), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
- // Top 4.
- tst(scratch, Operand(0xf0000000));
- add(zeros, zeros, Operand(4), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
- // Top 2.
- tst(scratch, Operand(0xc0000000));
- add(zeros, zeros, Operand(2), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
- // Top bit.
- tst(scratch, Operand(0x80000000u));
- add(zeros, zeros, Operand(1), LeaveCC, eq);
-#endif
+void MacroAssembler::CheckFor32DRegs(Register scratch) {
+ mov(scratch, Operand(ExternalReference::cpu_features()));
+ ldr(scratch, MemOperand(scratch));
+ tst(scratch, Operand(1u << VFP32DREGS));
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
+ CheckFor32DRegs(scratch);
+ vstm(db_w, location, d16, d31, ne);
+ sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
+ vstm(db_w, location, d0, d15);
+}
+
+
+void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
+ CheckFor32DRegs(scratch);
+ vldm(ia_w, location, d0, d15);
+ vldm(ia_w, location, d16, d31, ne);
+ add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch1, first, Operand(kFlatAsciiStringMask));
- and_(scratch2, second, Operand(kFlatAsciiStringMask));
- cmp(scratch1, Operand(kFlatAsciiStringTag));
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ and_(scratch1, first, Operand(kFlatOneByteStringMask));
+ and_(scratch2, second, Operand(kFlatOneByteStringMask));
+ cmp(scratch1, Operand(kFlatOneByteStringTag));
// Ignore second test if first test failed.
- cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+ cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
b(ne, failure);
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch, type, Operand(kFlatAsciiStringMask));
- cmp(scratch, Operand(kFlatAsciiStringTag));
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ and_(scratch, type, Operand(kFlatOneByteStringMask));
+ cmp(scratch, Operand(kFlatOneByteStringTag));
b(ne, failure);
}
@@ -3269,9 +3402,9 @@
if (use_eabi_hardfloat()) {
// In the hard floating point calling convention, we can use
// all double registers to pass doubles.
- if (num_double_arguments > DoubleRegister::kNumRegisters) {
+ if (num_double_arguments > DoubleRegister::NumRegisters()) {
stack_passed_words +=
- 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+ 2 * (num_double_arguments - DoubleRegister::NumRegisters());
}
} else {
// In the soft floating point calling convention, every double
@@ -3286,6 +3419,42 @@
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ SmiTst(string);
+ Check(ne, kNonObject);
+
+ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ cmp(ip, Operand(encoding_mask));
+ Check(eq, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Label index_tag_ok, index_tag_bad;
+ TrySmiTag(index, index, &index_tag_bad);
+ b(&index_tag_ok);
+ bind(&index_tag_bad);
+ Abort(kIndexIsTooLarge);
+ bind(&index_tag_ok);
+
+ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
+ cmp(index, ip);
+ Check(lt, kIndexIsTooLarge);
+
+ cmp(index, Operand(Smi::FromInt(0)));
+ Check(ge, kIndexIsNegative);
+
+ SmiUntag(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
@@ -3297,7 +3466,7 @@
// and the original value of sp.
mov(scratch, sp);
sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -3312,41 +3481,27 @@
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- if (use_eabi_hardfloat()) {
- Move(d0, dreg);
- } else {
- vmov(r0, r1, dreg);
+void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
+ DCHECK(src.is(d0));
+ if (!use_eabi_hardfloat()) {
+ vmov(r0, r1, src);
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
- if (use_eabi_hardfloat()) {
- if (dreg2.is(d0)) {
- ASSERT(!dreg1.is(d1));
- Move(d1, dreg2);
- Move(d0, dreg1);
- } else {
- Move(d0, dreg1);
- Move(d1, dreg2);
- }
- } else {
- vmov(r0, r1, dreg1);
- vmov(r2, r3, dreg2);
- }
+// On ARM this is just a synonym to make the purpose clear.
+void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
+ MovToFloatParameter(src);
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
- Register reg) {
- if (use_eabi_hardfloat()) {
- Move(d0, dreg);
- Move(r0, reg);
- } else {
- Move(r2, reg);
- vmov(r0, r1, dreg);
+void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
+ DwVfpRegister src2) {
+ DCHECK(src1.is(d0));
+ DCHECK(src2.is(d1));
+ if (!use_eabi_hardfloat()) {
+ vmov(r0, r1, src1);
+ vmov(r2, r3, src2);
}
}
@@ -3381,16 +3536,16 @@
void MacroAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
- ASSERT(has_frame());
+ DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
-#if defined(V8_HOST_ARCH_ARM)
+#if V8_HOST_ARCH_ARM
if (emit_debug_code()) {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
tst(sp, Operand(frame_alignment_mask));
b(eq, &alignment_as_expected);
@@ -3417,22 +3572,71 @@
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
- const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- const int32_t kPCRegOffset = 2 * kPointerSize;
+ Register result,
+ Register scratch) {
+ Label small_constant_pool_load, load_result;
ldr(result, MemOperand(ldr_location));
+
+ if (FLAG_enable_ool_constant_pool) {
+ // Check if this is an extended constant pool load.
+ and_(scratch, result, Operand(GetConsantPoolLoadMask()));
+ teq(scratch, Operand(GetConsantPoolLoadPattern()));
+ b(eq, &small_constant_pool_load);
+ if (emit_debug_code()) {
+ // Check that the instruction sequence is:
+ // movw reg, #offset_low
+ // movt reg, #offset_high
+ // ldr reg, [pp, reg]
+ Instr patterns[] = {GetMovWPattern(), GetMovTPattern(),
+ GetLdrPpRegOffsetPattern()};
+ for (int i = 0; i < 3; i++) {
+ ldr(result, MemOperand(ldr_location, i * kInstrSize));
+ and_(result, result, Operand(patterns[i]));
+ cmp(result, Operand(patterns[i]));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
+ }
+ // Result was clobbered. Restore it.
+ ldr(result, MemOperand(ldr_location));
+ }
+
+ // Get the offset into the constant pool. First extract movw immediate into
+ // result.
+ and_(scratch, result, Operand(0xfff));
+ mov(ip, Operand(result, LSR, 4));
+ and_(ip, ip, Operand(0xf000));
+ orr(result, scratch, Operand(ip));
+ // Then extract movt immediate and or into result.
+ ldr(scratch, MemOperand(ldr_location, kInstrSize));
+ and_(ip, scratch, Operand(0xf0000));
+ orr(result, result, Operand(ip, LSL, 12));
+ and_(scratch, scratch, Operand(0xfff));
+ orr(result, result, Operand(scratch, LSL, 16));
+
+ b(&load_result);
+ }
+
+ bind(&small_constant_pool_load);
if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [pc + offset] .
- and_(result, result, Operand(kLdrPCPattern));
- cmp(result, Operand(kLdrPCPattern));
- Check(eq, "The instruction to patch should be a load from pc.");
+ // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
+ and_(result, result, Operand(GetConsantPoolLoadPattern()));
+ cmp(result, Operand(GetConsantPoolLoadPattern()));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
- // Get the address of the constant.
+
+ // Get the offset into the constant pool.
+ const uint32_t kLdrOffsetMask = (1 << 12) - 1;
and_(result, result, Operand(kLdrOffsetMask));
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(kPCRegOffset));
+
+ bind(&load_result);
+ // Get the address of the constant.
+ if (FLAG_enable_ool_constant_pool) {
+ add(result, pp, Operand(result));
+ } else {
+ add(result, ldr_location, Operand(result));
+ add(result, result, Operand(Instruction::kPCReadOffset));
+ }
}
@@ -3442,19 +3646,31 @@
int mask,
Condition cc,
Label* condition_met) {
- and_(scratch, object, Operand(~Page::kPageAlignmentMask));
+ Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
b(cc, condition_met);
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, Operand(map));
+ ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ tst(scratch, Operand(Map::Deprecated::kMask));
+ b(ne, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
}
@@ -3464,7 +3680,7 @@
Label* has_color,
int first_bit,
int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+ DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
GetMarkBits(object, bitmap_scratch, mask_scratch);
@@ -3497,8 +3713,8 @@
ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
b(eq, &is_data_object);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -3511,7 +3727,7 @@
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
@@ -3528,14 +3744,14 @@
Register mask_scratch,
Register load_scratch,
Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+ DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
@@ -3568,8 +3784,8 @@
b(eq, &is_data_object);
// Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
@@ -3581,18 +3797,18 @@
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
tst(instance_type, Operand(kExternalStringTag));
mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
b(ne, &is_data_object);
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask));
mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
@@ -3621,125 +3837,213 @@
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg) {
- Label above_zero;
+ DwVfpRegister input_reg,
+ LowDwVfpRegister double_scratch) {
Label done;
- Label in_bounds;
- Vmov(temp_double_reg, 0.0);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
- b(gt, &above_zero);
-
- // Double value is less than zero, NaN or Inf, return 0.
- mov(result_reg, Operand(0));
- b(al, &done);
-
- // Double value is >= 255, return 255.
- bind(&above_zero);
- Vmov(temp_double_reg, 255.0);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
- b(le, &in_bounds);
+ // Handle inputs >= 255 (including +infinity).
+ Vmov(double_scratch, 255.0, result_reg);
mov(result_reg, Operand(255));
- b(al, &done);
+ VFPCompareAndSetFlags(input_reg, double_scratch);
+ b(ge, &done);
- // In 0-255 range, round and truncate.
- bind(&in_bounds);
- Vmov(temp_double_reg, 0.5);
- vadd(temp_double_reg, input_reg, temp_double_reg);
- vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
- vmov(result_reg, temp_double_reg.low());
+ // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
+ // rounding mode will provide the correct result.
+ vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
+ vmov(result_reg, double_scratch.low());
+
bind(&done);
}
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- ldr(descriptors,
- FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, ¬_smi);
- mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
- bind(¬_smi);
+ ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
}
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next;
- // Preload a couple of values used in the loop.
Register empty_fixed_array_value = r6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r7;
- LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- mov(r1, r0);
+ Label next, start;
+ mov(r2, r0);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+
+ EnumLength(r3, r1);
+ cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
+ b(eq, call_runtime);
+
+ jmp(&start);
+
bind(&next);
-
- // Check that there are no elements. Register r1 contains the
- // current JS object we've reached through the prototype chain.
- ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- cmp(r2, empty_fixed_array_value);
- b(ne, call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in r2 for the subsequent
- // prototype load.
- ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(r3, call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (r3). This is the case if the next enumeration
- // index field does not contain a smi.
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(r3, call_runtime);
+ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- cmp(r1, r0);
- b(eq, &check_prototype);
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
- cmp(r3, empty_fixed_array_value);
+ EnumLength(r3, r1);
+ cmp(r3, Operand(Smi::FromInt(0)));
b(ne, call_runtime);
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
- ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
- cmp(r1, null_value);
+ bind(&start);
+
+ // Check that there are no elements. Register r2 contains the current JS
+ // object we've reached through the prototype chain.
+ Label no_elements;
+ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
+ cmp(r2, empty_fixed_array_value);
+ b(eq, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
+ b(ne, call_runtime);
+
+ bind(&no_elements);
+ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
+ cmp(r2, null_value);
b(ne, &next);
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ add(scratch_reg, receiver_reg,
+ Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ cmp(scratch_reg, Operand(new_space_start));
+ b(lt, no_memento_found);
+ mov(ip, Operand(new_space_allocation_top));
+ ldr(ip, MemOperand(ip));
+ cmp(scratch_reg, ip);
+ b(gt, no_memento_found);
+ ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+ cmp(scratch_reg,
+ Operand(isolate()->factory()->allocation_memento_map()));
}
-CodePatcher::CodePatcher(byte* address, int instructions)
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ DCHECK(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
+ b(eq, found);
+ ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ cmp(current, Operand(factory->null_value()));
+ b(ne, &loop_again);
+}
+
+
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6,
+ Register reg7,
+ Register reg8) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+ reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid();
+
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
+
+ return n_of_valid_regs != n_of_non_aliasing_regs;
+}
+#endif
+
+
+CodePatcher::CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache)
: address_(address),
- instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
+ masm_(NULL, address, size_ + Assembler::kGap),
+ flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ if (flush_cache_ == FLUSH) {
+ CpuFeatures::FlushICache(address_, size_);
+ }
// Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.pc_ == address_ + size_);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
@@ -3760,6 +4064,28 @@
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ DCHECK(!dividend.is(result));
+ DCHECK(!dividend.is(ip));
+ DCHECK(!result.is(ip));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ mov(ip, Operand(mag.multiplier));
+ smull(ip, result, dividend, ip);
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) {
+ add(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && !neg && mag.multiplier > 0) {
+ sub(result, result, Operand(dividend));
+ }
+ if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
+ add(result, result, Operand(dividend, LSR, 31));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 47afa93..d2a1786 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1,36 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
+#include "src/assembler.h"
+#include "src/bailout-reason.h"
+#include "src/frames.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -44,50 +22,54 @@
}
-inline Operand SmiUntagOperand(Register object) {
- return Operand(object, ASR, kSmiTagSize);
-}
-
-
-
// Give alias names to registers
-const Register cp = { 8 }; // JavaScript context pointer
-const Register kRootRegister = { 10 }; // Roots array pointer.
+const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
+const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
+const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
- // words instead of bytes.
- SIZE_IN_WORDS = 1 << 2
-};
-
-
-// Flags used for the ObjectToDoubleVFPRegister function.
-enum ObjectToDoubleFlags {
- // No special flags.
- NO_OBJECT_TO_DOUBLE_FLAGS = 0,
- // Object is known to be a non smi.
- OBJECT_NOT_SMI = 1 << 0,
- // Don't load NaNs or infinities, branch to the non number case instead.
- AVOID_NANS_AND_INFINITIES = 1 << 1
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
};
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+ Register reg2,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg,
+ Register reg7 = no_reg,
+ Register reg8 = no_reg);
+#endif
+
+
+enum TargetAddressStorageMode {
+ CAN_INLINE_TARGET_ADDRESS,
+ NEVER_INLINE_TARGET_ADDRESS
+};
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -97,24 +79,37 @@
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+ // Returns the size of a call in instructions. Note, the value returned is
+ // only valid as long as no entries are added to the constant pool between
+ // checking the call size and emitting the actual call.
+ static int CallSize(Register target, Condition cond = al);
+ int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallStubSize(CodeStub* stub,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
+ static int CallSizeNotPredictableCodeSize(Isolate* isolate,
+ Address target,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
+
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
- static int CallSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond = al);
- void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
- Condition cond = al);
+ void Call(Address target, RelocInfo::Mode rmode,
+ Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
- Condition cond = al);
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
void Ret(Condition cond = al);
// Emit code to discard a non-negative number of pointer-sized elements
@@ -130,7 +125,8 @@
Register scratch = no_reg,
Condition cond = al);
-
+ void Mls(Register dst, Register src1, Register src2, Register srcA,
+ Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
Condition cond = al);
void Ubfx(Register dst, Register src, int lsb, int width,
@@ -146,16 +142,27 @@
int lsb,
int width,
Condition cond = al);
- void Bfc(Register dst, int lsb, int width, Condition cond = al);
+ void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
void Usat(Register dst, int satpos, const Operand& src,
Condition cond = al);
void Call(Label* target);
+ void Push(Register src) { push(src); }
+ void Pop(Register dst) { pop(dst); }
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
- void Move(DoubleRegister dst, DoubleRegister src);
+ void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
+ Condition cond = al) {
+ if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
+ mov(dst, src, sbit, cond);
+ }
+ }
+ void Move(DwVfpRegister dst, DwVfpRegister src);
+
+ void Load(Register dst, const MemOperand& src, Representation r);
+ void Store(Register src, const MemOperand& dst, Representation r);
// Load an object from the root table.
void LoadRoot(Register destination,
@@ -166,16 +173,6 @@
Heap::RootListIndex index,
Condition cond = al);
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
// ---------------------------------------------------------------------------
// GC Support
@@ -203,6 +200,10 @@
Condition cc,
Label* condition_met);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
@@ -262,7 +263,9 @@
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
@@ -274,7 +277,9 @@
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
@@ -282,9 +287,17 @@
lr_status,
save_fp,
remembered_set_action,
- smi_check);
+ smi_check,
+ pointers_to_here_check_for_value);
}
+ void RecordWriteForMap(
+ Register object,
+ Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp);
+
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
@@ -295,14 +308,17 @@
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// Push a handle.
void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
- ASSERT(!src1.is(src2));
+ DCHECK(!src1.is(src2));
if (src1.code() > src2.code()) {
stm(db_w, sp, src1.bit() | src2.bit(), cond);
} else {
@@ -313,9 +329,9 @@
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
+ DCHECK(!src1.is(src2));
+ DCHECK(!src2.is(src3));
+ DCHECK(!src1.is(src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -335,12 +351,12 @@
Register src3,
Register src4,
Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- ASSERT(!src1.is(src4));
- ASSERT(!src2.is(src4));
- ASSERT(!src3.is(src4));
+ DCHECK(!src1.is(src2));
+ DCHECK(!src2.is(src3));
+ DCHECK(!src1.is(src3));
+ DCHECK(!src1.is(src4));
+ DCHECK(!src2.is(src4));
+ DCHECK(!src3.is(src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@@ -364,7 +380,7 @@
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Condition cond = al) {
- ASSERT(!src1.is(src2));
+ DCHECK(!src1.is(src2));
if (src1.code() > src2.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
} else {
@@ -375,9 +391,9 @@
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
+ DCHECK(!src1.is(src2));
+ DCHECK(!src2.is(src3));
+ DCHECK(!src1.is(src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -387,7 +403,7 @@
}
} else {
Pop(src2, src3, cond);
- str(src1, MemOperand(sp, 4, PostIndex), cond);
+ ldr(src1, MemOperand(sp, 4, PostIndex), cond);
}
}
@@ -397,12 +413,12 @@
Register src3,
Register src4,
Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- ASSERT(!src1.is(src4));
- ASSERT(!src2.is(src4));
- ASSERT(!src3.is(src4));
+ DCHECK(!src1.is(src2));
+ DCHECK(!src2.is(src3));
+ DCHECK(!src1.is(src3));
+ DCHECK(!src1.is(src4));
+ DCHECK(!src2.is(src4));
+ DCHECK(!src3.is(src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@@ -424,16 +440,19 @@
}
}
+ // Push a fixed frame, consisting of lr, fp, constant pool (if
+ // FLAG_enable_ool_constant_pool), context and JS function / marker id if
+ // marker_reg is a valid register.
+ void PushFixedFrame(Register marker_reg = no_reg);
+ void PopFixedFrame(Register marker_reg = no_reg);
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
- void PushSafepointRegistersAndDoubles();
- void PopSafepointRegistersAndDoubles();
// Store value in register src in the safepoint stack slot for
// register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
- void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
@@ -450,10 +469,23 @@
const MemOperand& dst,
Condition cond = al);
- // Clear specified FPSCR bits.
- void ClearFPSCRBits(const uint32_t bits_to_clear,
- const Register scratch,
- const Condition cond = al);
+ // Ensure that FPSCR contains values needed by JavaScript.
+ // We need the NaNModeControlBit to be sure that operations like
+ // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
+ // In VFP3 it will be always the Canonical NaN.
+ // In VFP2 it will be either the Canonical NaN or the negative version
+ // of the Canonical NaN. It doesn't matter if we have two values. The aim
+ // is to be sure to never generate the hole NaN.
+ void VFPEnsureFPSCRState(Register scratch);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
+ void VFPCanonicalizeNaN(const DwVfpRegister value,
+ const Condition cond = al) {
+ VFPCanonicalizeNaN(value, value, cond);
+ }
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
@@ -475,7 +507,50 @@
void Vmov(const DwVfpRegister dst,
const double imm,
- const Condition cond = al);
+ const Register scratch = no_reg);
+
+ void VmovHigh(Register dst, DwVfpRegister src);
+ void VmovHigh(DwVfpRegister dst, Register src);
+ void VmovLow(Register dst, DwVfpRegister src);
+ void VmovLow(DwVfpRegister dst, Register src);
+
+ // Loads the number from object into dst register.
+ // If |object| is neither smi nor heap number, |not_number| is jumped to
+ // with |object| still intact.
+ void LoadNumber(Register object,
+ LowDwVfpRegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number);
+
+ // Loads the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ void LoadNumberAsInt32Double(Register object,
+ DwVfpRegister double_dst,
+ Register heap_number_map,
+ Register scratch,
+ LowDwVfpRegister double_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ void LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch,
+ DwVfpRegister double_scratch0,
+ LowDwVfpRegister double_scratch1,
+ Label* not_int32);
+
+ // Generates function and stub prologue code.
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -484,7 +559,9 @@
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count);
+ void LeaveExitFrame(bool save_doubles,
+ Register argument_count,
+ bool restore_context);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -492,8 +569,8 @@
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the global context if the map in register
- // map_in_out is the cached Array map in the global context of
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@@ -502,11 +579,6 @@
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out);
-
void LoadGlobalFunction(int index, Register function);
// Load the initial map from the global function. The registers
@@ -524,39 +596,31 @@
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
- void InvokeFunction(Handle<JSFunction> function,
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
void IsObjectJSObjectType(Register heap_object,
Register map,
@@ -571,12 +635,14 @@
Register scratch,
Label* fail);
-#ifdef ENABLE_DEBUGGER_SUPPORT
+ void IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail);
+
// ---------------------------------------------------------------------------
// Debugger Support
void DebugBreak();
-#endif
// ---------------------------------------------------------------------------
// Exception handling
@@ -625,7 +691,7 @@
// These instructions are generated to mark special location in the code,
// like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
- ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+ DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
return IsNop(instr, type);
}
@@ -645,7 +711,7 @@
(FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
? src_reg
: -1;
- ASSERT((type == -1) ||
+ DCHECK((type == -1) ||
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
return type;
}
@@ -654,25 +720,26 @@
// ---------------------------------------------------------------------------
// Allocation support
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the space is exhausted control continues at the gc_required
+ // label. The allocated object is returned in result. If the flag
+ // tag_allocated_object is true the result is tagged as as a heap object.
+ // All registers are clobbered also when control continues at the gc_required
+ // label.
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
@@ -687,32 +754,25 @@
Register scratch2,
Register scratch3,
Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
@@ -721,7 +781,9 @@
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required);
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT,
+ MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
Register scratch1,
@@ -730,7 +792,10 @@
Label* gc_required);
// Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
+ void CopyFields(Register dst,
+ Register src,
+ LowDwVfpRegister double_scratch,
+ int field_count);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@@ -768,11 +833,21 @@
// are the same register). It leaves the heap object in the heap_object
// register unless the heap_object register is the same register as one of the
// other registers.
+ // Type_reg can be no_reg. In that case ip is used.
void CompareObjectType(Register heap_object,
Register map,
Register type_reg,
InstanceType type);
+ // Compare object type for heap object. Branch to false_label if type
+ // is lower than min_type or greater than max_type.
+ // Load map into the register map.
+ void CheckObjectTypeRange(Register heap_object,
+ Register map,
+ InstanceType min_type,
+ InstanceType max_type,
+ Label* false_label);
+
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -795,23 +870,20 @@
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail);
+ void CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail, in which
- // case scratch2, scratch3 and scratch4 are unmodified.
+ // the FastDoubleElements array elements. Otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
Register elements_reg,
Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail);
+ LowDwVfpRegister double_scratch,
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
@@ -820,8 +892,13 @@
void CompareMap(Register obj,
Register scratch,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* early_success);
+
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -831,8 +908,7 @@
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ SmiCheckType smi_check_type);
void CheckMap(Register obj,
@@ -859,21 +935,20 @@
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string.
+ // Returns a condition that will be enabled if the object was a string
+ // and the passed-in condition passed. If the passed-in condition failed
+ // then flags remain unchanged.
Condition IsObjectStringType(Register obj,
- Register type) {
- ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
- ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
- tst(type, Operand(kIsNotStringMask));
- ASSERT_EQ(0, kStringTag);
+ Register type,
+ Condition cond = al) {
+ ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
+ ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
+ tst(type, Operand(kIsNotStringMask), cond);
+ DCHECK_EQ(0, kStringTag);
return eq;
}
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
@@ -884,101 +959,100 @@
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
- // Uses VFP instructions to Convert a Smi to a double.
- void IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg);
+ // Load the value of a smi object into a double register.
+ // The register value must be between d0 and d15.
+ void SmiToDouble(LowDwVfpRegister value, Register smi);
- // Load the value of a number object into a VFP double register. If the object
- // is not a number a jump to the label not_number is performed and the VFP
- // double register is unchanged.
- void ObjectToDoubleVFPRegister(
- Register object,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
+ // Check if a double can be exactly represented as a signed 32-bit integer.
+ // Z flag set to one if true.
+ void TestDoubleIsInt32(DwVfpRegister double_input,
+ LowDwVfpRegister double_scratch);
- // Load the value of a smi object into a VFP double register. The register
- // scratch1 can be the same register as smi in which case smi will hold the
- // untagged value afterwards.
- void SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2);
+ // Try to convert a double to a signed 32-bit integer.
+ // Z flag set to one and result assigned if the conversion is exact.
+ void TryDoubleToInt32Exact(Register result,
+ DwVfpRegister double_input,
+ LowDwVfpRegister double_scratch);
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If VFP3 is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32);
+ // Floor a double and writes the value to the result register.
+ // Go to exact if the conversion is exact (to be able to test -0),
+ // fall through calling code if an overflow occurred, else go to done.
+ // In return, input_high is loaded with high bits of input.
+ void TryInt32Floor(Register result,
+ DwVfpRegister double_input,
+ Register input_high,
+ LowDwVfpRegister double_scratch,
+ Label* done,
+ Label* exact);
- // Truncates a double using a specific rounding mode.
- // Clears the z flag (ne condition) if an overflow occurs.
- // If exact_conversion is true, the z flag is also cleared if the conversion
- // was inexact, i.e. if the double value could not be converted exactly
- // to a 32bit integer.
- void EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
- DwVfpRegister double_input,
- Register scratch1,
- Register scratch2,
- CheckForInexactConversion check
- = kDontCheckForInexactConversion);
-
- // Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the singed 32bit
- // integer range to a 32bit signed integer.
- // Expects the double value loaded in input_high and input_low.
- // Exits with the answer in 'result'.
- // Note that this code does not work for values in the 32bit range!
- void EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch);
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DwVfpRegister input,
+ Label* done);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- SwVfpRegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3);
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DwVfpRegister double_input);
- // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
- // instruction. On pre-ARM5 hardware this routine gives the wrong answer
- // for 0 (31 instead of 32). Source and scratch can be the same in which case
- // the source is clobbered. Source and zeros can also be the same in which
- // case scratch should be a different register.
- void CountLeadingZeros(Register zeros,
- Register source,
- Register scratch);
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Label* not_int32);
+
+ // Check whether d16-d31 are available on the CPU. The result is given by the
+ // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
+ void CheckFor32DRegs(Register scratch);
+
+ // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
+ // values to location, saving [d0..(d15|d31)].
+ void SaveFPRegs(Register location, Register scratch);
+
+ // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
+ // values to location, restoring [d0..(d15|d31)].
+ void RestoreFPRegs(Register location, Register scratch);
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub, Condition cond = al);
+ void CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1019,9 +1093,9 @@
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DoubleRegister dreg);
- void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
- void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+ void MovToFloatParameter(DwVfpRegister src);
+ void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
+ void MovToFloatResult(DwVfpRegister src);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1037,13 +1111,18 @@
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DoubleRegister dst);
+ void MovFromFloatParameter(DwVfpRegister dst);
+ void MovFromFloatResult(DwVfpRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@@ -1062,11 +1141,15 @@
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
+ DCHECK(!code_object_.is_null());
return code_object_;
}
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and ip gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1083,28 +1166,27 @@
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, const char* msg);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+ void Assert(Condition cond, BailoutReason reason);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cond, const char* msg);
+ void Check(Condition cond, BailoutReason reason);
// Print a message to stdout and abort execution.
- void Abort(const char* msg);
+ void Abort(BailoutReason msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
-#if USE_EABI_HARDFLOAT
+#ifdef __arm__
+ return base::OS::ArmUsingHardFloat();
+#elif USE_EABI_HARDFLOAT
return true;
#else
return false;
@@ -1145,18 +1227,21 @@
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
// sets flags.
- void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
- mov(scratch, reg);
- SmiTag(scratch, SetCC);
+ void TrySmiTag(Register reg, Label* not_a_smi) {
+ TrySmiTag(reg, reg, not_a_smi);
+ }
+ void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
+ SmiTag(ip, src, SetCC);
b(vs, not_a_smi);
- mov(reg, scratch);
+ mov(reg, ip);
}
+
void SmiUntag(Register reg, SBit s = LeaveCC) {
- mov(reg, Operand(reg, ASR, kSmiTagSize), s);
+ mov(reg, Operand::SmiUntag(reg), s);
}
void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
- mov(dst, Operand(src, ASR, kSmiTagSize), s);
+ mov(dst, Operand::SmiUntag(src), s);
}
// Untag the source value into destination and jump if source is a smi.
@@ -1167,7 +1252,14 @@
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
- // Jump the register contains a smi.
+ // Test if the register contains a smi (Z == 0 (eq) if true).
+ inline void SmiTst(Register value) {
+ tst(value, Operand(kSmiTagMask));
+ }
+ inline void NonNegativeSmiTst(Register value) {
+ tst(value, Operand(kSmiTagMask | kSmiSignMask));
+ }
+ // Jump if the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
@@ -1182,17 +1274,23 @@
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
- void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ // Abort execution if reg is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertIsRoot(Register reg, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// HeapNumber utilities
@@ -1205,64 +1303,136 @@
// ---------------------------------------------------------------------------
// String utilities
- // Checks if both objects are sequential ASCII strings and jumps to label
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Checks if both objects are sequential one-byte strings and jumps to label
// if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
- // Checks if both objects are sequential ASCII strings and jumps to label
+ // Checks if both objects are sequential one-byte strings and jumps to label
// if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* not_flat_ascii_strings);
+ void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* not_flat_one_byte_strings);
- // Checks if both instance types are sequential ASCII strings and jumps to
+ // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
- // Check if instance type is sequential ASCII string and jump to label if
+ // Check if instance type is sequential one-byte string and jump to label if
// it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
+ void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+ Label* failure);
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
+
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
// ---------------------------------------------------------------------------
// Patching helpers.
// Get the location of a relocated constant (its address in the constant pool)
// from its load site.
- void GetRelocatedValueLocation(Register ldr_location,
- Register result);
+ void GetRelocatedValueLocation(Register ldr_location, Register result,
+ Register scratch);
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg);
+ DwVfpRegister input_reg,
+ LowDwVfpRegister double_scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register dst, Register src) {
+ Ubfx(dst, src, Field::kShift, Field::kSize);
+ }
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register dst, Register src) {
+ static const int shift = Field::kShift;
+ static const int mask = Field::kMask >> shift << kSmiTagSize;
+ STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ if (shift < kSmiTagSize) {
+ mov(dst, Operand(src, LSL, kSmiTagSize - shift));
+ and_(dst, dst, Operand(mask));
+ } else if (shift > kSmiTagSize) {
+ mov(dst, Operand(src, LSR, shift - kSmiTagSize));
+ and_(dst, dst, Operand(mask));
+ } else {
+ and_(dst, src, Operand(mask));
+ }
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
// Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
+ // Returns the pc offset at which the frame ends.
+ int LeaveFrame(StackFrame::Type type);
// Expects object in r0 and returns map with validated enum cache
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, condition flags are set to eq.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ b(eq, memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@@ -1278,8 +1448,7 @@
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InitializeNewString(Register string,
Register length,
@@ -1309,19 +1478,20 @@
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Loads the constant pool pointer (pp) register.
+ void LoadConstantPoolPointerRegister();
+
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
-#ifdef ENABLE_DEBUGGER_SUPPORT
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit
@@ -1329,7 +1499,14 @@
// an assertion to fail.
class CodePatcher {
public:
- CodePatcher(byte* address, int instructions);
+ enum FlushICache {
+ FLUSH,
+ DONT_FLUSH
+ };
+
+ CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache = FLUSH);
virtual ~CodePatcher();
// Macro assembler to emit code.
@@ -1347,11 +1524,75 @@
private:
byte* address_; // The address of the code being patched.
- int instructions_; // Number of instructions of the expected patch size.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
+ FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
-#endif // ENABLE_DEBUGGER_SUPPORT
+
+
+class FrameAndConstantPoolScope {
+ public:
+ FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
+ : masm_(masm),
+ type_(type),
+ old_has_frame_(masm->has_frame()),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ // We only want to enable constant pool access for non-manual frame scopes
+ // to ensure the constant pool pointer is valid throughout the scope.
+ DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+ masm->set_has_frame(true);
+ masm->set_constant_pool_available(true);
+ masm->EnterFrame(type, !old_constant_pool_available_);
+ }
+
+ ~FrameAndConstantPoolScope() {
+ masm_->LeaveFrame(type_);
+ masm_->set_has_frame(old_has_frame_);
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+
+ // Normally we generate the leave-frame code when this object goes
+ // out of scope. Sometimes we may need to generate the code somewhere else
+ // in addition. Calling this will achieve that, but the object stays in
+ // scope, the MacroAssembler is still marked as being in a frame scope, and
+ // the code will be generated again when it goes out of scope.
+ void GenerateLeaveFrame() {
+ DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+ masm_->LeaveFrame(type_);
+ }
+
+ private:
+ MacroAssembler* masm_;
+ StackFrame::Type type_;
+ bool old_has_frame_;
+ bool old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
+};
+
+
+// Class for scoping the the unavailability of constant pool access.
+class ConstantPoolUnavailableScope {
+ public:
+ explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
+ : masm_(masm),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(false);
+ }
+ }
+ ~ConstantPoolUnavailableScope() {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+ }
+
+ private:
+ MacroAssembler* masm_;
+ int old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
+};
// -----------------------------------------------------------------------------
@@ -1363,7 +1604,7 @@
inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
+ return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
}
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 10ff2dd..f4918fe 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -1,41 +1,20 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "arm/regexp-macro-assembler-arm.h"
+#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
+#include "src/arm/regexp-macro-assembler-arm.h"
namespace v8 {
namespace internal {
@@ -43,45 +22,49 @@
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
+ * - r4 : Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
* - r5 : Pointer to current code object (Code*) including heap object tag.
* - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
- * - r8 : points to tip of backtrack stack
+ * - r8 : Points to tip of backtrack stack
* - r9 : Unused, might be used by C code and expected unchanged.
* - r10 : End of input (points to byte after last character in input).
* - r11 : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - r12 : IP register, used by assembler. Very volatile.
- * - r13/sp : points to tip of C stack.
+ * - r13/sp : Points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
- * - fp[52] Isolate* isolate (Address of the current isolate)
- * - fp[48] direct_call (if 1, direct call from JavaScript code,
- * if 0, call through the runtime system).
- * - fp[44] stack_area_base (High end of the memory area to use as
- * backtracking stack).
+ * - fp[56] Isolate* isolate (address of the current isolate)
+ * - fp[52] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[48] stack_area_base (high end of the memory area to use as
+ * backtracking stack).
+ * - fp[44] capture array size (may fit multiple sets of matches)
* - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call.
* --- sp when called ---
- * - fp[32] return address (lr).
- * - fp[28] old frame pointer (r11).
+ * - fp[32] return address (lr).
+ * - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
- * - fp[-4] end of input (Address of end of string).
- * - fp[-8] start of input (Address of first character in string).
+ * - fp[-4] end of input (address of end of string).
+ * - fp[-8] start of input (address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] Offset of location before start of input (effectively character
+ * - fp[-20] success counter (only for global regexps to count matches).
+ * - fp[-24] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
- * - fp[-24] At start (if 1, we are starting at the start of the
+ * - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
- * - fp[-28] register 0 (Only positions must be stored in the first
+ * - fp[-32] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -115,8 +98,10 @@
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -125,9 +110,8 @@
success_label_(),
backtrack_label_(),
exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
+ DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
- EmitBacktrackConstantPool();
__ bind(&start_label_); // And then continue from here.
}
@@ -159,8 +143,8 @@
void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
+ DCHECK(reg >= 0);
+ DCHECK(reg < num_registers_);
if (by != 0) {
__ ldr(r0, register_location(reg));
__ add(r0, r0, Operand(by));
@@ -197,9 +181,9 @@
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(eq, ¬_at_start);
+ __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
+ __ cmp(r0, Operand::Zero());
+ BranchOrBacktrack(ne, ¬_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@@ -212,9 +196,9 @@
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(eq, on_not_at_start);
+ __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
+ __ cmp(r0, Operand::Zero());
+ BranchOrBacktrack(ne, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ add(r0, end_of_input_address(), Operand(current_input_offset()));
@@ -229,54 +213,6 @@
}
-void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- if (on_failure == NULL) {
- // Instead of inlining a backtrack for each test, (re)use the global
- // backtrack target.
- on_failure = &backtrack_label_;
- }
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- if (cp_offset != 0) {
- int byte_offset = cp_offset * char_size();
- __ add(r0, r0, Operand(byte_offset));
- }
-
- // r0 : Address of characters to match against str.
- int stored_high_byte = 0;
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
- ASSERT(str[i] <= String::kMaxAsciiCharCode);
- __ cmp(r1, Operand(str[i]));
- } else {
- __ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
- uc16 match_char = str[i];
- int match_high_byte = (match_char >> 8);
- if (match_high_byte == 0) {
- __ cmp(r1, Operand(str[i]));
- } else {
- if (match_high_byte != stored_high_byte) {
- __ mov(r2, Operand(match_high_byte));
- stored_high_byte = match_high_byte;
- }
- __ add(r3, r2, Operand(match_char & 0xff));
- __ cmp(r1, r3);
- }
- }
- BranchOrBacktrack(ne, on_failure);
- }
-}
-
-
void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
__ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
__ cmp(current_input_offset(), r0);
@@ -302,7 +238,7 @@
__ cmn(r1, Operand(current_input_offset()));
BranchOrBacktrack(gt, on_no_match);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
Label success;
Label fail;
Label loop_check;
@@ -331,8 +267,13 @@
__ b(ne, &fail);
__ sub(r3, r3, Operand('a'));
__ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter?
- __ b(hi, &fail);
-
+ __ b(ls, &loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ sub(r3, r3, Operand(224 - 'a'));
+ __ cmp(r3, Operand(254 - 224));
+ __ b(hi, &fail); // Weren't Latin-1 letters.
+ __ cmp(r3, Operand(247 - 224)); // Check for 247.
+ __ b(eq, &fail);
__ bind(&loop_check);
__ cmp(r0, r1);
@@ -346,7 +287,7 @@
// Compute new value of character position after the matched part.
__ sub(current_input_offset(), r2, end_of_input_address());
} else {
- ASSERT(mode_ == UC16);
+ DCHECK(mode_ == UC16);
int argument_count = 4;
__ PrepareCallCFunction(argument_count, r2);
@@ -369,17 +310,17 @@
// Address of current input position.
__ add(r1, current_input_offset(), Operand(end_of_input_address()));
// Isolate.
- __ mov(r3, Operand(ExternalReference::isolate_address()));
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
__ CallCFunction(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
// On success, increment position by length of capture.
__ add(current_input_offset(), current_input_offset(), Operand(r4));
@@ -413,11 +354,11 @@
Label loop;
__ bind(&loop);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
__ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
} else {
- ASSERT(mode_ == UC16);
+ DCHECK(mode_ == UC16);
__ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
__ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
}
@@ -432,16 +373,6 @@
}
-void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- __ ldr(r0, register_location(reg1));
- __ ldr(r1, register_location(reg2));
- __ cmp(r0, r1);
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
Label* on_not_equal) {
__ cmp(current_character(), Operand(c));
@@ -452,8 +383,12 @@
void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal) {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
+ if (c == 0) {
+ __ tst(current_character(), Operand(mask));
+ } else {
+ __ and_(r0, current_character(), Operand(mask));
+ __ cmp(r0, Operand(c));
+ }
BranchOrBacktrack(eq, on_equal);
}
@@ -461,8 +396,12 @@
void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal) {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
+ if (c == 0) {
+ __ tst(current_character(), Operand(mask));
+ } else {
+ __ and_(r0, current_character(), Operand(mask));
+ __ cmp(r0, Operand(c));
+ }
BranchOrBacktrack(ne, on_not_equal);
}
@@ -472,7 +411,7 @@
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
+ DCHECK(minus < String::kMaxUtf16CodeUnit);
__ sub(r0, current_character(), Operand(minus));
__ and_(r0, r0, Operand(mask));
__ cmp(r0, Operand(c));
@@ -480,6 +419,44 @@
}
+void RegExpMacroAssemblerARM::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ sub(r0, current_character(), Operand(from));
+ __ cmp(r0, Operand(to - from));
+ BranchOrBacktrack(ls, on_in_range); // Unsigned lower-or-same condition.
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ sub(r0, current_character(), Operand(from));
+ __ cmp(r0, Operand(to - from));
+ BranchOrBacktrack(hi, on_not_in_range); // Unsigned higher condition.
+}
+
+
+void RegExpMacroAssemblerARM::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ mov(r0, Operand(table));
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+ __ and_(r1, current_character(), Operand(kTableSize - 1));
+ __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ add(r1,
+ current_character(),
+ Operand(ByteArray::kHeaderSize - kHeapObjectTag));
+ }
+ __ ldrb(r0, MemOperand(r0, r1));
+ __ cmp(r0, Operand::Zero());
+ BranchOrBacktrack(ne, on_bit_set);
+}
+
+
bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
@@ -487,35 +464,29 @@
switch (type) {
case 's':
// Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
+ if (mode_ == LATIN1) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ cmp(current_character(), Operand(' '));
__ b(eq, &success);
// Check range 0x09..0x0d
__ sub(r0, current_character(), Operand('\t'));
__ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(hi, on_no_match);
+ __ b(ls, &success);
+ // \u00a0 (NBSP).
+ __ cmp(r0, Operand(0x00a0 - '\t'));
+ BranchOrBacktrack(ne, on_no_match);
__ bind(&success);
return true;
}
return false;
case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmp(current_character(), Operand(' '));
- BranchOrBacktrack(eq, on_no_match);
- __ sub(r0, current_character(), Operand('\t'));
- __ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(ls, on_no_match);
- return true;
- }
+ // The emitted code for generic character classes is good enough.
return false;
case 'd':
// Match ASCII digits ('0'..'9')
__ sub(r0, current_character(), Operand('0'));
- __ cmp(current_character(), Operand('9' - '0'));
+ __ cmp(r0, Operand('9' - '0'));
BranchOrBacktrack(hi, on_no_match);
return true;
case 'D':
@@ -547,7 +518,7 @@
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(r0, r0, Operand(0x0b));
__ cmp(r0, Operand(0x0c - 0x0b));
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
BranchOrBacktrack(hi, on_no_match);
} else {
Label done;
@@ -563,31 +534,31 @@
return true;
}
case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Operand('z'));
BranchOrBacktrack(hi, on_no_match);
}
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
return true;
}
case 'W': {
Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Operand('z'));
__ b(hi, &done);
}
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_no_match);
- if (mode_ != ASCII) {
+ if (mode_ != LATIN1) {
__ bind(&done);
}
return true;
@@ -609,6 +580,7 @@
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
+ Label return_r0;
// Finalize code - write the entry point code now we know how many
// registers we need.
@@ -632,14 +604,15 @@
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
+ __ mov(r0, Operand::Zero());
+ __ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
- __ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
+ ExternalReference::address_of_stack_limit(isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC);
@@ -652,13 +625,13 @@
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(r0, Operand(EXCEPTION));
- __ jmp(&exit_label_);
+ __ jmp(&return_r0);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
// If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &exit_label_);
+ __ b(ne, &return_r0);
__ bind(&stack_ok);
@@ -679,41 +652,45 @@
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Determine whether the start index is zero, that is at the start of the
- // string, and store that value in a local variable.
- __ cmp(r1, Operand(0));
- __ mov(r1, Operand(1), LeaveCC, eq);
- __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- __ str(r1, MemOperand(frame_pointer(), kAtStart));
+ // Initialize code pointer register
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmp(r1, Operand::Zero());
+ __ b(ne, &load_char_start_regexp);
+ __ mov(current_character(), Operand('\n'), LeaveCC, eq);
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
-
- // Address of register 0.
- __ add(r1, frame_pointer(), Operand(kRegisterZero));
- __ mov(r2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(ne, &init_loop);
+ if (num_saved_registers_ > 8) {
+ // Address of register 0.
+ __ add(r1, frame_pointer(), Operand(kRegisterZero));
+ __ mov(r2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(ne, &init_loop);
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ str(r0, register_location(i));
+ }
+ }
}
// Initialize backtrack stack pointer.
__ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
- // Initialize code pointer register
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
- // Load previous char as initial value of current character register.
- Label at_start;
- __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(ne, &at_start);
- LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
- __ jmp(&start_label_);
- __ bind(&at_start);
- __ mov(current_character(), Operand('\n'));
- __ jmp(&start_label_);
+ __ jmp(&start_label_);
// Exit code:
if (success_label_.is_linked()) {
@@ -733,13 +710,17 @@
__ add(r1, r1, Operand(r2));
// r1 is length of string in characters.
- ASSERT_EQ(0, num_saved_registers_ % 2);
+ DCHECK_EQ(0, num_saved_registers_ % 2);
// Always an even number of capture registers. This allows us to
// unroll the loop once to add an operation between a load of a register
// and the following use of that register.
for (int i = 0; i < num_saved_registers_; i += 2) {
__ ldr(r2, register_location(i));
__ ldr(r3, register_location(i + 1));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in r4 for the zero-length check later.
+ __ mov(r4, r2);
+ }
if (mode_ == UC16) {
__ add(r2, r1, Operand(r2, ASR, 1));
__ add(r3, r1, Operand(r3, ASR, 1));
@@ -751,10 +732,58 @@
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
}
}
- __ mov(r0, Operand(SUCCESS));
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ add(r0, r0, Operand(1));
+ __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ sub(r1, r1, Operand(num_saved_registers_));
+ // Check whether we have enough room for another set of capture results.
+ __ cmp(r1, Operand(num_saved_registers_));
+ __ b(lt, &return_r0);
+
+ __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
+ __ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare r0 to initialize registers with its value in the next run.
+ __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // r4: capture start index
+ __ cmp(current_input_offset(), r4);
+ // Not a zero-length match, restart.
+ __ b(ne, &load_char_start_regexp);
+ // Offset from the end is zero if we already reached the end.
+ __ cmp(current_input_offset(), Operand::Zero());
+ __ b(eq, &exit_label_);
+ // Advance current position after a zero-length match.
+ __ add(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ b(&load_char_start_regexp);
+ } else {
+ __ mov(r0, Operand(SUCCESS));
+ }
}
+
// Exit and return r0
__ bind(&exit_label_);
+ if (global()) {
+ __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_r0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
@@ -773,10 +802,10 @@
SafeCallTarget(&check_preempt_label_);
CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
- __ b(ne, &exit_label_);
+ __ b(ne, &return_r0);
// String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@@ -794,13 +823,13 @@
__ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r2, Operand(ExternalReference::isolate_address()));
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
+ ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
__ b(eq, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), r0);
@@ -813,15 +842,14 @@
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(r0, Operand(EXCEPTION));
- __ jmp(&exit_label_);
+ __ jmp(&return_r0);
}
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = FACTORY->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@@ -867,8 +895,8 @@
Label* on_end_of_input,
bool check_bounds,
int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
+ DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
+ DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
CheckPosition(cp_offset + characters - 1, on_end_of_input);
}
@@ -887,37 +915,8 @@
}
-static bool is_valid_memory_offset(int value) {
- if (value < 0) value = -value;
- return value < (1<<12);
-}
-
-
void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
- } else {
- int constant_offset = GetBacktrackConstantPoolEntry();
- masm_->label_at_put(label, constant_offset);
- // Reading pc-relative is based on the address 8 bytes ahead of
- // the current opcode.
- unsigned int offset_of_pc_register_read =
- masm_->pc_offset() + Assembler::kPcLoadDelta;
- int pc_offset_of_constant =
- constant_offset - offset_of_pc_register_read;
- ASSERT(pc_offset_of_constant < 0);
- if (is_valid_memory_offset(pc_offset_of_constant)) {
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
- } else {
- // Not a 12-bit offset, so it needs to be loaded from the constant
- // pool.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
- __ ldr(r0, MemOperand(pc, r0));
- }
- }
+ __ mov_label_offset(r0, label);
Push(r0);
CheckStackLimit();
}
@@ -962,14 +961,15 @@
void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
__ mov(r0, Operand(to));
__ str(r0, register_location(register_index));
}
-void RegExpMacroAssemblerARM::Succeed() {
+bool RegExpMacroAssemblerARM::Succeed() {
__ jmp(&success_label_);
+ return global();
}
@@ -985,7 +985,7 @@
void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
+ DCHECK(reg_from <= reg_to);
__ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ str(r0, register_location(reg));
@@ -1003,16 +1003,34 @@
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
+ __ PrepareCallCFunction(3, scratch);
+
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
// Code* of self.
__ mov(r1, Operand(masm_->CodeObject()));
- // r0 becomes return address pointer.
+
+ // We need to make room for the return address on the stack.
+ int stack_alignment = base::OS::ActivationFrameAlignment();
+ DCHECK(IsAligned(stack_alignment, kPointerSize));
+ __ sub(sp, sp, Operand(stack_alignment));
+
+ // r0 will point to the return address, placed by DirectCEntry.
+ __ mov(r0, sp);
+
ExternalReference stack_guard_check =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ mov(ip, Operand(stack_guard_check));
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm_, ip);
+
+ // Drop the return address from the stack.
+ __ add(sp, sp, Operand(stack_alignment));
+
+ DCHECK(stack_alignment != 0);
+ __ ldr(sp, MemOperand(sp, 0));
+
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1027,8 +1045,8 @@
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
@@ -1049,13 +1067,13 @@
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
+ DCHECK(re_code->instruction_start() <= *return_address);
+ DCHECK(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+ Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();
@@ -1080,8 +1098,8 @@
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
@@ -1091,7 +1109,7 @@
// be a sequential or external string with the same content.
// Update the start and end pointers in the stack frame to the current
// location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ DCHECK(StringShape(*subject_tmp).IsSequential() ||
StringShape(*subject_tmp).IsExternal());
// The original start address of the characters to match.
@@ -1123,7 +1141,7 @@
MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
+ DCHECK(register_index < (1<<30));
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
@@ -1176,14 +1194,14 @@
void RegExpMacroAssemblerARM::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
+ DCHECK(!source.is(backtrack_stackpointer()));
__ str(source,
MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
}
void RegExpMacroAssemblerARM::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
+ DCHECK(!target.is(backtrack_stackpointer()));
__ ldr(target,
MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
}
@@ -1192,7 +1210,7 @@
void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
+ ExternalReference::address_of_stack_limit(isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(sp, r0);
@@ -1202,7 +1220,7 @@
void RegExpMacroAssemblerARM::CheckStackLimit() {
ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+ ExternalReference::address_of_regexp_stack_limit(isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(backtrack_stackpointer(), Operand(r0));
@@ -1210,50 +1228,8 @@
}
-void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
- __ CheckConstPool(false, false);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- backtrack_constant_pool_offset_ = masm_->pc_offset();
- for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
- __ emit(0);
- }
-
- backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
-}
-
-
-int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
- while (backtrack_constant_pool_capacity_ > 0) {
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- if (masm_->pc_offset() - offset < 2 * KB) {
- return offset;
- }
- }
- Label new_pool_skip;
- __ jmp(&new_pool_skip);
- EmitBacktrackConstantPool();
- __ bind(&new_pool_skip);
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- return offset;
-}
-
-
-void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ mov(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ ldr(sp, MemOperand(sp, 0));
- }
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
+bool RegExpMacroAssemblerARM::CanReadUnaligned() {
+ return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
}
@@ -1261,49 +1237,39 @@
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
- __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
- offset = r0;
+ // r4 is not being used to store the capture start index at this point.
+ __ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = r4;
}
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it.
// If unaligned load/stores are not supported then this function must only
// be used to load a single character at a time.
-#if !V8_TARGET_CAN_READ_UNALIGNED
- ASSERT(characters == 1);
-#endif
+ if (!CanReadUnaligned()) {
+ DCHECK(characters == 1);
+ }
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
if (characters == 4) {
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
} else if (characters == 2) {
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
} else {
- ASSERT(characters == 1);
+ DCHECK(characters == 1);
__ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
}
} else {
- ASSERT(mode_ == UC16);
+ DCHECK(mode_ == UC16);
if (characters == 2) {
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
} else {
- ASSERT(characters == 1);
+ DCHECK(characters == 1);
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
}
}
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the link register.
- __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
- __ mov(r0, sp);
- __ Call(r5);
- __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
-}
-
#undef __
#endif // V8_INTERPRETED_REGEXP
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 5c8ed06..7414e54 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -1,51 +1,22 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
+#include "src/arm/assembler-arm.h"
+#include "src/arm/assembler-arm-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerARM();
- virtual ~RegExpMacroAssemblerARM();
-};
-
-#else // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
+ RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerARM();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
@@ -59,10 +30,6 @@
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
@@ -70,7 +37,6 @@
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
@@ -79,6 +45,14 @@
uc16 minus,
uc16 mask,
Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from,
+ uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from,
+ uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
@@ -105,10 +79,11 @@
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
+ virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
@@ -129,7 +104,8 @@
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@@ -141,10 +117,10 @@
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kInputStartMinusOne = kInputString - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
@@ -161,9 +137,6 @@
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
- void EmitBacktrackConstantPool();
- int GetBacktrackConstantPoolEntry();
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
@@ -213,18 +186,11 @@
// and increments it by a word size.
inline void Pop(Register target);
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
+ Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
- // Which mode to generate code for (ASCII or UC16).
+ // Which mode to generate code for (Latin1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
@@ -234,11 +200,6 @@
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
- // Manage a small pre-allocated pool for writing label targets
- // to for pushing backtrack addresses.
- int backtrack_constant_pool_offset_;
- int backtrack_constant_pool_capacity_;
-
// Labels used internally.
Label entry_label_;
Label start_label_;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 629c209..0444025 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1,41 +1,20 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include <stdarg.h>
#include <stdlib.h>
-#include <math.h>
-#include <cstdarg>
-#include "v8.h"
+#include <cmath>
-#if defined(V8_TARGET_ARCH_ARM)
+#include "src/v8.h"
-#include "disasm.h"
-#include "assembler.h"
-#include "arm/constants-arm.h"
-#include "arm/simulator-arm.h"
+#if V8_TARGET_ARCH_ARM
+
+#include "src/arm/constants-arm.h"
+#include "src/arm/simulator-arm.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/disasm.h"
#if defined(USE_SIMULATOR)
@@ -108,11 +87,11 @@
char** msg_address =
reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
char* msg = *msg_address;
- ASSERT(msg != NULL);
+ DCHECK(msg != NULL);
// Update this stop description.
- if (isWatchedStop(code) && !watched_stops[code].desc) {
- watched_stops[code].desc = msg;
+ if (isWatchedStop(code) && !watched_stops_[code].desc) {
+ watched_stops_[code].desc = msg;
}
if (strlen(msg) > 0) {
@@ -140,8 +119,8 @@
char* msg = *reinterpret_cast<char**>(sim_->get_pc()
+ Instruction::kInstrSize);
// Update this stop description.
- if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
- sim_->watched_stops[code].desc = msg;
+ if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
+ sim_->watched_stops_[code].desc = msg;
}
// Print the stop message and code if it is not the default code.
if (code != kMaxStopCode) {
@@ -276,7 +255,7 @@
// make them invisible to all commands.
UndoBreakpoints();
- while (!done) {
+ while (!done && !sim_->has_bad_pc()) {
if (last_pc != sim_->get_pc()) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@@ -330,9 +309,9 @@
PrintF("\n");
}
}
- for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
+ for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
dvalue = GetVFPDoubleRegisterValue(i);
- uint64_t as_words = BitCast<uint64_t>(dvalue);
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
VFPRegisters::Name(i, true),
dvalue,
@@ -343,10 +322,10 @@
if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (GetVFPSingleValue(arg1, &svalue)) {
- uint32_t as_word = BitCast<uint32_t>(svalue);
+ uint32_t as_word = bit_cast<uint32_t>(svalue);
PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
- uint64_t as_words = BitCast<uint64_t>(dvalue);
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%s: %f 0x%08x %08x\n",
arg1,
dvalue,
@@ -363,17 +342,18 @@
|| (strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
int32_t value;
+ OFStream os(stdout);
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
- PrintF("%s: \n", arg1);
+ os << arg1 << ": \n";
#ifdef DEBUG
- obj->PrintLn();
+ obj->Print(os);
+ os << "\n";
#else
- obj->ShortPrint();
- PrintF("\n");
+ os << Brief(obj) << "\n";
#endif
} else {
- PrintF("%s unrecognized\n", arg1);
+ os << arg1 << " unrecognized\n";
}
} else {
PrintF("printobject <value>\n");
@@ -398,7 +378,7 @@
int32_t words;
if (argc == next_arg) {
words = 10;
- } else if (argc == next_arg + 1) {
+ } else {
if (!GetValue(argv[next_arg], &words)) {
words = 10;
}
@@ -411,7 +391,7 @@
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
@@ -472,7 +452,7 @@
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
- v8::internal::OS::DebugBreak();
+ v8::base::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
if (argc == 2) {
@@ -628,8 +608,8 @@
static bool ICacheMatch(void* one, void* two) {
- ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+ DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
return one == two;
}
@@ -666,7 +646,7 @@
FlushOnePage(i_cache, start, bytes_to_flush);
start += bytes_to_flush;
size -= bytes_to_flush;
- ASSERT_EQ(0, start & CachePage::kPageMask);
+ DCHECK_EQ(0, start & CachePage::kPageMask);
offset = 0;
}
if (size != 0) {
@@ -691,10 +671,10 @@
void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
intptr_t start,
int size) {
- ASSERT(size <= CachePage::kPageSize);
- ASSERT(AllOnOnePage(start, size - 1));
- ASSERT((start & CachePage::kLineMask) == 0);
- ASSERT((size & CachePage::kLineMask) == 0);
+ DCHECK(size <= CachePage::kPageSize);
+ DCHECK(AllOnOnePage(start, size - 1));
+ DCHECK((start & CachePage::kLineMask) == 0);
+ DCHECK((size & CachePage::kLineMask) == 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page);
@@ -715,9 +695,9 @@
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
- CHECK(memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize) == 0);
+ CHECK_EQ(0,
+ memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), Instruction::kInstrSize));
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@@ -764,14 +744,15 @@
// All registers are initialized to zero to start with
// even though s_registers_ & d_registers_ share the same
// physical registers in the target.
- for (int i = 0; i < num_s_registers; i++) {
- vfp_register[i] = 0;
+ for (int i = 0; i < num_d_registers * 2; i++) {
+ vfp_registers_[i] = 0;
}
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false;
- FPSCR_rounding_mode_ = RZ;
+ FPSCR_rounding_mode_ = RN;
+ FPSCR_default_NaN_mode_ = false;
inv_op_vfp_flag_ = false;
div_zero_vfp_flag_ = false;
@@ -793,6 +774,10 @@
}
+Simulator::~Simulator() {
+}
+
+
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@@ -828,7 +813,10 @@
Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
+ if (current->external_function_ == external_function) {
+ DCHECK_EQ(current->type(), type);
+ return current;
+ }
}
return new Redirection(external_function, type);
}
@@ -840,6 +828,12 @@
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
+ static void* ReverseRedirection(int32_t reg) {
+ Redirection* redirection = FromSwiInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -859,7 +853,7 @@
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
- ASSERT(isolate_data != NULL);
+ DCHECK(isolate_data != NULL);
Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
@@ -874,7 +868,7 @@
// Sets the register in the architecture state. It will also deal with updating
// Simulator internal state for special registers such as PC.
void Simulator::set_register(int reg, int32_t value) {
- ASSERT((reg >= 0) && (reg < num_registers));
+ DCHECK((reg >= 0) && (reg < num_registers));
if (reg == pc) {
pc_modified_ = true;
}
@@ -885,7 +879,7 @@
// Get the register from the architecture state. This function does handle
// the special case of accessing the PC register.
int32_t Simulator::get_register(int reg) const {
- ASSERT((reg >= 0) && (reg < num_registers));
+ DCHECK((reg >= 0) && (reg < num_registers));
// Stupid code added to avoid bug in GCC.
// See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
if (reg >= num_registers) return 0;
@@ -895,25 +889,79 @@
double Simulator::get_double_from_register_pair(int reg) {
- ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+ DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
double dm_val = 0.0;
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
- char buffer[2 * sizeof(vfp_register[0])];
+ char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0]));
memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
+void Simulator::set_register_pair_from_double(int reg, double* value) {
+ DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+ memcpy(registers_ + reg, value, sizeof(*value));
+}
+
+
void Simulator::set_dw_register(int dreg, const int* dbl) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
+ DCHECK((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
registers_[dreg + 1] = dbl[1];
}
+void Simulator::get_d_register(int dreg, uint64_t* value) {
+ DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
+}
+
+
+void Simulator::set_d_register(int dreg, const uint64_t* value) {
+ DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
+}
+
+
+void Simulator::get_d_register(int dreg, uint32_t* value) {
+ DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
+}
+
+
+void Simulator::set_d_register(int dreg, const uint32_t* value) {
+ DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
+}
+
+
+void Simulator::get_q_register(int qreg, uint64_t* value) {
+ DCHECK((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
+}
+
+
+void Simulator::set_q_register(int qreg, const uint64_t* value) {
+ DCHECK((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
+}
+
+
+void Simulator::get_q_register(int qreg, uint32_t* value) {
+ DCHECK((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
+}
+
+
+void Simulator::set_q_register(int qreg, const uint32_t* value) {
+ DCHECK((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
@@ -934,138 +982,61 @@
// Getting from and setting into VFP registers.
void Simulator::set_s_register(int sreg, unsigned int value) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- vfp_register[sreg] = value;
+ DCHECK((sreg >= 0) && (sreg < num_s_registers));
+ vfp_registers_[sreg] = value;
}
unsigned int Simulator::get_s_register(int sreg) const {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- return vfp_register[sreg];
+ DCHECK((sreg >= 0) && (sreg < num_s_registers));
+ return vfp_registers_[sreg];
}
-void Simulator::set_s_register_from_float(int sreg, const float flt) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- // Read the bits from the single precision floating point value
- // into the unsigned integer element of vfp_register[] given by index=sreg.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &flt, sizeof(vfp_register[0]));
- memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
+template<class InputType, int register_size>
+void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
+ DCHECK(reg_index >= 0);
+ if (register_size == 1) DCHECK(reg_index < num_s_registers);
+ if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
+
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
+ memcpy(&vfp_registers_[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_registers_[0]));
}
-void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- // Read the bits from the integer value into the unsigned integer element of
- // vfp_register[] given by index=sreg.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &sint, sizeof(vfp_register[0]));
- memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
+template<class ReturnType, int register_size>
+ReturnType Simulator::GetFromVFPRegister(int reg_index) {
+ DCHECK(reg_index >= 0);
+ if (register_size == 1) DCHECK(reg_index < num_s_registers);
+ if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
+
+ ReturnType value = 0;
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &vfp_registers_[register_size * reg_index],
+ register_size * sizeof(vfp_registers_[0]));
+ memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
+ return value;
}
-void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
- // Read the bits from the double precision floating point value into the two
- // consecutive unsigned integer elements of vfp_register[] given by index
- // 2*sreg and 2*sreg+1.
- char buffer[2 * sizeof(vfp_register[0])];
- memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
- memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
-}
-
-
-float Simulator::get_float_from_s_register(int sreg) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
-
- float sm_val = 0.0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the single precision floating point value and return it.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
- memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
- return(sm_val);
-}
-
-
-int Simulator::get_sinteger_from_s_register(int sreg) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
-
- int sm_val = 0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the single precision floating point value and return it.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
- memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
- return(sm_val);
-}
-
-
-double Simulator::get_double_from_d_register(int dreg) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
-
- double dm_val = 0.0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the double precision floating point value and return it.
- char buffer[2 * sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
- return(dm_val);
-}
-
-
-// For use in calls that take two double values, constructed either
-// from r0-r3 or d0 and d1.
-void Simulator::GetFpArgs(double* x, double* y) {
+// Runtime FP routines take:
+// - two double arguments
+// - one double argument and zero or one integer arguments.
+// All are consructed here from r0-r3 or d0, d1 and r0.
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
- *x = vfp_register[0];
- *y = vfp_register[1];
+ *x = get_double_from_d_register(0);
+ *y = get_double_from_d_register(1);
+ *z = get_register(0);
} else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- // Registers 2 and 3 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
- }
-}
-
-// For use in calls that take one double value, constructed either
-// from r0 and r1 or d0.
-void Simulator::GetFpArgs(double* x) {
- if (use_eabi_hardfloat()) {
- *x = vfp_register[0];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from r0 and r1 or d0 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_register[0];
- *y = registers_[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- // Register 2 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
+ *x = get_double_from_register_pair(0);
+ // Register 2 and 3 -> y.
+ *y = get_double_from_register_pair(2);
+ // Register 2 -> z
+ *z = get_register(2);
}
}
@@ -1073,10 +1044,10 @@
// The return value is either in r0/r1 or d0.
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
- char buffer[2 * sizeof(vfp_register[0])];
+ char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
// Copy result to d0.
- memcpy(vfp_register, buffer, sizeof(buffer));
+ memcpy(vfp_registers_, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
@@ -1093,6 +1064,7 @@
registers_[12] = 0x50Bad4U;
}
+
// Some Operating Systems allow unaligned access on ARMv7 targets. We
// assume that unaligned accesses are not allowed unless the v8 build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
@@ -1108,111 +1080,83 @@
int Simulator::ReadW(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
+ } else {
+ PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
-#endif
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
- return;
+ } else {
+ PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
}
- PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
}
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
+ } else {
+ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08"
+ V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
-#endif
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
+ } else {
+ PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
-#endif
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
- return;
+ } else {
+ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08"
+ V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
}
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
}
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
- return;
+ } else {
+ PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
}
- PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
}
@@ -1241,37 +1185,26 @@
int32_t* Simulator::ReadDW(int32_t addr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
+ } else {
+ PrintF("Unaligned read at 0x%08x\n", addr);
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
-#endif
}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
- return;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
- return;
+ } else {
+ PrintF("Unaligned write at 0x%08x\n", addr);
+ UNIMPLEMENTED();
}
- PrintF("Unaligned write at 0x%08x\n", addr);
- UNIMPLEMENTED();
-#endif
}
@@ -1376,7 +1309,7 @@
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
- if (isnan(val1) || isnan(val2)) {
+ if (std::isnan(val1) || std::isnan(val2)) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = true;
@@ -1468,7 +1401,14 @@
}
case ROR: {
- UNIMPLEMENTED();
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
break;
}
@@ -1490,7 +1430,7 @@
*carry_out = (result & 1) == 1;
result >>= 1;
} else {
- ASSERT(shift_amount >= 32);
+ DCHECK(shift_amount >= 32);
if (result < 0) {
*carry_out = true;
result = 0xffffffff;
@@ -1513,7 +1453,7 @@
*carry_out = (result & 1) == 1;
result = 0;
} else {
- ASSERT(shift_amount > 32);
+ DCHECK(shift_amount > 32);
*carry_out = false;
result = 0;
}
@@ -1540,7 +1480,14 @@
}
case ROR: {
- UNIMPLEMENTED();
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
break;
}
@@ -1577,11 +1524,11 @@
}
-void Simulator::ProcessPUW(Instruction* instr,
- int num_regs,
- int reg_size,
- intptr_t* start_address,
- intptr_t* end_address) {
+int32_t Simulator::ProcessPU(Instruction* instr,
+ int num_regs,
+ int reg_size,
+ intptr_t* start_address,
+ intptr_t* end_address) {
int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
switch (instr->PUField()) {
@@ -1612,11 +1559,10 @@
break;
}
}
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
+ return rn_val;
}
+
// Addressing Mode 4 - Load and Store Multiple
void Simulator::HandleRList(Instruction* instr, bool load) {
int rlist = instr->RlistValue();
@@ -1624,11 +1570,12 @@
intptr_t start_address = 0;
intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
+ int32_t rn_val =
+ ProcessPU(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
// Catch null pointers a little earlier.
- ASSERT(start_address > 8191 || start_address < 0);
+ DCHECK(start_address > 8191 || start_address < 0);
int reg = 0;
while (rlist != 0) {
if ((rlist & 1) != 0) {
@@ -1642,7 +1589,10 @@
reg++;
rlist >>= 1;
}
- ASSERT(end_address == ((intptr_t)address) - 4);
+ DCHECK(end_address == ((intptr_t)address) - 4);
+ if (instr->HasW()) {
+ set_register(instr->RnValue(), rn_val);
+ }
}
@@ -1665,7 +1615,8 @@
intptr_t start_address = 0;
intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
+ int32_t rn_val =
+ ProcessPU(instr, num_regs, operand_size, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
for (int reg = vd; reg < vd + num_regs; reg++) {
@@ -1680,20 +1631,27 @@
address += 1;
} else {
if (load) {
- set_s_register_from_sinteger(
- 2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
- set_s_register_from_sinteger(
- 2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
+ int32_t data[] = {
+ ReadW(reinterpret_cast<int32_t>(address), instr),
+ ReadW(reinterpret_cast<int32_t>(address + 1), instr)
+ };
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(reg, d);
} else {
- WriteW(reinterpret_cast<int32_t>(address),
- get_sinteger_from_s_register(2 * reg), instr);
- WriteW(reinterpret_cast<int32_t>(address + 1),
- get_sinteger_from_s_register(2 * reg + 1), instr);
+ int32_t data[2];
+ double d = get_double_from_d_register(reg);
+ memcpy(data, &d, 8);
+ WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
+ WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
address += 2;
}
}
- ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+ DCHECK(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+ if (instr->HasW()) {
+ set_register(instr->RnValue(), rn_val);
+ }
}
@@ -1709,18 +1667,22 @@
int32_t arg3,
int32_t arg4,
int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
+
+// These prototypes handle the four types of FP calls.
+typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
+typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(
+ int32_t arg0, int32_t arg1, void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1746,59 +1708,33 @@
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
- if (use_eabi_hardfloat()) {
- // With the hard floating point calling convention, double
- // arguments are passed in VFP registers. Fetch the arguments
- // from there and call the builtin using soft floating point
- // convention.
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = vfp_register[0];
- arg1 = vfp_register[1];
- arg2 = vfp_register[2];
- arg3 = vfp_register[3];
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- arg0 = vfp_register[0];
- arg1 = vfp_register[1];
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = vfp_register[0];
- arg1 = vfp_register[1];
- arg2 = get_register(0);
- break;
- default:
- break;
- }
- }
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double dval0, dval1;
- int32_t ival;
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
+ FUNCTION_ADDR(generic_target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
+ FUNCTION_ADDR(generic_target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
+ FUNCTION_ADDR(generic_target), dval0, ival);
break;
default:
UNREACHABLE();
@@ -1810,65 +1746,119 @@
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(r0, static_cast<int32_t>(iresult));
+ set_register(r1, static_cast<int32_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- set_register(r0, lo_res);
- set_register(r1, hi_res);
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x",
- FUNCTION_ADDR(target), arg0);
+ reinterpret_cast<void*>(external), arg0);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, (int32_t) *result);
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
- FUNCTION_ADDR(target), arg0, arg1);
+ reinterpret_cast<void*>(external), arg0, arg1);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, Redirection::ReverseRedirection(arg1));
+ } else if (
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08x %08x",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
}
- set_register(r0, (int32_t) *result);
+ CHECK(stack_aligned);
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08x %08x %08x",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
+ external);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
} else {
// builtin call.
- ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
- "Call to host function at %p"
+ "Call to host function at %p "
"args %08x, %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target),
arg0,
@@ -1926,6 +1916,12 @@
}
+double Simulator::canonicalizeNaN(double value) {
+ return (FPSCR_default_NaN_mode_ && std::isnan(value)) ?
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value;
+}
+
+
// Stop helper functions.
bool Simulator::isStopInstruction(Instruction* instr) {
return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
@@ -1933,62 +1929,62 @@
bool Simulator::isWatchedStop(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
+ DCHECK(code <= kMaxStopCode);
return code < kNumOfWatchedStops;
}
bool Simulator::isEnabledStop(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
+ DCHECK(code <= kMaxStopCode);
// Unwatched stops are always enabled.
return !isWatchedStop(code) ||
- !(watched_stops[code].count & kStopDisabledBit);
+ !(watched_stops_[code].count & kStopDisabledBit);
}
void Simulator::EnableStop(uint32_t code) {
- ASSERT(isWatchedStop(code));
+ DCHECK(isWatchedStop(code));
if (!isEnabledStop(code)) {
- watched_stops[code].count &= ~kStopDisabledBit;
+ watched_stops_[code].count &= ~kStopDisabledBit;
}
}
void Simulator::DisableStop(uint32_t code) {
- ASSERT(isWatchedStop(code));
+ DCHECK(isWatchedStop(code));
if (isEnabledStop(code)) {
- watched_stops[code].count |= kStopDisabledBit;
+ watched_stops_[code].count |= kStopDisabledBit;
}
}
void Simulator::IncreaseStopCounter(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- ASSERT(isWatchedStop(code));
- if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
+ DCHECK(code <= kMaxStopCode);
+ DCHECK(isWatchedStop(code));
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code);
- watched_stops[code].count = 0;
+ watched_stops_[code].count = 0;
EnableStop(code);
} else {
- watched_stops[code].count++;
+ watched_stops_[code].count++;
}
}
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
+ DCHECK(code <= kMaxStopCode);
if (!isWatchedStop(code)) {
PrintF("Stop not watched.");
} else {
const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
- int32_t count = watched_stops[code].count & ~kStopDisabledBit;
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
// Don't print the state of unused breakpoints.
if (count != 0) {
- if (watched_stops[code].desc) {
+ if (watched_stops_[code].desc) {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
- code, code, state, count, watched_stops[code].desc);
+ code, code, state, count, watched_stops_[code].desc);
} else {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
code, code, state, count);
@@ -2028,11 +2024,23 @@
SetNZFlags(alu_out);
}
} else {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ int rd = instr->RdValue();
+ int32_t acc_value = get_register(rd);
+ if (instr->Bit(22) == 0) {
+ // The MLA instruction description (A 4.1.28) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ // Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ int32_t mul_out = rm_val * rs_val;
+ int32_t result = acc_value + mul_out;
+ set_register(rn, result);
+ } else {
+ // Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
+ int32_t mul_out = rm_val * rs_val;
+ int32_t result = acc_value - mul_out;
+ set_register(rn, result);
+ }
}
} else {
// The signed/long multiply instructions use the terms RdHi and RdLo
@@ -2084,7 +2092,7 @@
switch (instr->PUField()) {
case da_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
- ASSERT(!instr->HasW());
+ DCHECK(!instr->HasW());
addr = rn_val;
rn_val -= rm_val;
set_register(rn, rn_val);
@@ -2092,7 +2100,7 @@
}
case ia_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
- ASSERT(!instr->HasW());
+ DCHECK(!instr->HasW());
addr = rn_val;
rn_val += rm_val;
set_register(rn, rn_val);
@@ -2127,7 +2135,7 @@
switch (instr->PUField()) {
case da_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
- ASSERT(!instr->HasW());
+ DCHECK(!instr->HasW());
addr = rn_val;
rn_val -= imm_val;
set_register(rn, rn_val);
@@ -2135,7 +2143,7 @@
}
case ia_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
- ASSERT(!instr->HasW());
+ DCHECK(!instr->HasW());
addr = rn_val;
rn_val += imm_val;
set_register(rn, rn_val);
@@ -2167,7 +2175,7 @@
}
}
if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
- ASSERT((rd % 2) == 0);
+ DCHECK((rd % 2) == 0);
if (instr->HasH()) {
// The strd instruction.
int32_t value1 = get_register(rd);
@@ -2198,8 +2206,8 @@
}
} else {
// signed byte loads
- ASSERT(instr->HasSign());
- ASSERT(instr->HasL());
+ DCHECK(instr->HasSign());
+ DCHECK(instr->HasL());
int8_t val = ReadB(addr);
set_register(rd, val);
}
@@ -2252,6 +2260,8 @@
PrintF("%08x\n", instr->InstructionBits());
UNIMPLEMENTED();
}
+ } else if ((type == 1) && instr->IsNopType1()) {
+ // NOP.
} else {
int rd = instr->RdValue();
int rn = instr->RnValue();
@@ -2261,7 +2271,7 @@
if (type == 0) {
shifter_operand = GetShiftRm(instr, &shifter_carry_out);
} else {
- ASSERT(instr->TypeValue() == 1);
+ DCHECK(instr->TypeValue() == 1);
shifter_operand = GetImm(instr, &shifter_carry_out);
}
int32_t alu_out;
@@ -2408,7 +2418,7 @@
// Format(instr, "cmn'cond 'rn, 'imm");
alu_out = rn_val + shifter_operand;
SetNZFlags(alu_out);
- SetCFlag(!CarryFrom(rn_val, shifter_operand));
+ SetCFlag(CarryFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
} else {
// Other instructions matching this pattern are handled in the
@@ -2484,7 +2494,7 @@
switch (instr->PUField()) {
case da_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
- ASSERT(!instr->HasW());
+ DCHECK(!instr->HasW());
addr = rn_val;
rn_val -= im_val;
set_register(rn, rn_val);
@@ -2492,7 +2502,7 @@
}
case ia_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
- ASSERT(!instr->HasW());
+ DCHECK(!instr->HasW());
addr = rn_val;
rn_val += im_val;
set_register(rn, rn_val);
@@ -2548,46 +2558,183 @@
int32_t addr = 0;
switch (instr->PUField()) {
case da_x: {
- ASSERT(!instr->HasW());
+ DCHECK(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
UNIMPLEMENTED();
break;
}
case ia_x: {
- if (instr->HasW()) {
- ASSERT(instr->Bits(5, 4) == 0x1);
-
- if (instr->Bit(22) == 0x1) { // USAT.
- int32_t sat_pos = instr->Bits(20, 16);
- int32_t sat_val = (1 << sat_pos) - 1;
- int32_t shift = instr->Bits(11, 7);
- int32_t shift_type = instr->Bit(6);
- int32_t rm_val = get_register(instr->RmValue());
- if (shift_type == 0) { // LSL
- rm_val <<= shift;
- } else { // ASR
- rm_val >>= shift;
+ if (instr->Bit(4) == 0) {
+ // Memop.
+ } else {
+ if (instr->Bit(5) == 0) {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(6) == 0) {
+ // Pkhbt.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t shift = instr->Bits(11, 7);
+ rm_val <<= shift;
+ set_register(rd, (rn_val & 0xFFFF) | (rm_val & 0xFFFF0000U));
+ } else {
+ // Pkhtb.
+ uint32_t rn_val = get_register(rn);
+ int32_t rm_val = get_register(instr->RmValue());
+ int32_t shift = instr->Bits(11, 7);
+ if (shift == 0) {
+ shift = 32;
+ }
+ rm_val >>= shift;
+ set_register(rd, (rn_val & 0xFFFF0000U) | (rm_val & 0xFFFF));
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 1:
+ UNIMPLEMENTED();
+ break;
+ case 2:
+ UNIMPLEMENTED();
+ break;
+ case 3: {
+ // Usat.
+ int32_t sat_pos = instr->Bits(20, 16);
+ int32_t sat_val = (1 << sat_pos) - 1;
+ int32_t shift = instr->Bits(11, 7);
+ int32_t shift_type = instr->Bit(6);
+ int32_t rm_val = get_register(instr->RmValue());
+ if (shift_type == 0) { // LSL
+ rm_val <<= shift;
+ } else { // ASR
+ rm_val >>= shift;
+ }
+ // If saturation occurs, the Q flag should be set in the CPSR.
+ // There is no Q flag yet, and no instruction (MRS) to read the
+ // CPSR directly.
+ if (rm_val > sat_val) {
+ rm_val = sat_val;
+ } else if (rm_val < 0) {
+ rm_val = 0;
+ }
+ set_register(rd, rm_val);
+ break;
+ }
}
- // If saturation occurs, the Q flag should be set in the CPSR.
- // There is no Q flag yet, and no instruction (MRS) to read the
- // CPSR directly.
- if (rm_val > sat_val) {
- rm_val = sat_val;
- } else if (rm_val < 0) {
- rm_val = 0;
+ } else {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ UNIMPLEMENTED();
+ break;
+ case 1:
+ UNIMPLEMENTED();
+ break;
+ case 2:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ // Uxtb16.
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd,
+ (rm_val & 0xFF) | (rm_val & 0xFF0000));
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 3:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ // Uxtb.
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, (rm_val & 0xFF));
+ } else {
+ // Uxtab.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, rn_val + (rm_val & 0xFF));
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
}
- set_register(rd, rm_val);
- } else { // SSAT.
- UNIMPLEMENTED();
}
return;
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
- UNIMPLEMENTED();
}
break;
}
case db_x: {
+ if (FLAG_enable_sudiv) {
+ if (instr->Bits(5, 4) == 0x1) {
+ if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+ // (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs
+ // Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs);
+ int rm = instr->RmValue();
+ int32_t rm_val = get_register(rm);
+ int rs = instr->RsValue();
+ int32_t rs_val = get_register(rs);
+ int32_t ret_val = 0;
+ DCHECK(rs_val != 0);
+ // udiv
+ if (instr->Bit(21) == 0x1) {
+ ret_val = static_cast<int32_t>(static_cast<uint32_t>(rm_val) /
+ static_cast<uint32_t>(rs_val));
+ } else if ((rm_val == kMinInt) && (rs_val == -1)) {
+ ret_val = kMinInt;
+ } else {
+ ret_val = rm_val / rs_val;
+ }
+ set_register(rn, ret_val);
+ return;
+ }
+ }
+ }
// Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
addr = rn_val - shifter_operand;
if (instr->HasW()) {
@@ -2627,7 +2774,7 @@
uint32_t rd_val =
static_cast<uint32_t>(get_register(instr->RdValue()));
uint32_t bitcount = msbit - lsbit + 1;
- uint32_t mask = (1 << bitcount) - 1;
+ uint32_t mask = 0xffffffffu >> (32 - bitcount);
rd_val &= ~(mask << lsbit);
if (instr->RmValue() != 15) {
// bfi - bitfield insert.
@@ -2674,7 +2821,7 @@
void Simulator::DecodeType4(Instruction* instr) {
- ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
+ DCHECK(instr->Bit(22) == 0); // only allowed to be set in privileged mode
if (instr->HasL()) {
// Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
HandleRList(instr, true);
@@ -2717,6 +2864,7 @@
// vmov :Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
+// vcvt.f64.s32 Dd, Dd, #<fbits>
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
@@ -2727,8 +2875,8 @@
// vmrs
// Dd = vsqrt(Dm)
void Simulator::DecodeTypeVFP(Instruction* instr) {
- ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
- ASSERT(instr->Bits(11, 9) == 0x5);
+ DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
+ DCHECK(instr->Bits(11, 9) == 0x5);
// Obtain double precision register codes.
int vm = instr->VFPMRegValue(kDoublePrecision);
@@ -2752,17 +2900,26 @@
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
double dm_value = get_double_from_d_register(vm);
- double dd_value = fabs(dm_value);
+ double dd_value = std::fabs(dm_value);
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
double dm_value = get_double_from_d_register(vm);
double dd_value = -dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
+ (instr->Bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>
+ int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
+ int fixed_value = get_sinteger_from_s_register(vd * 2);
+ double divide = 1 << fraction_bits;
+ set_d_register_from_double(vd, fixed_value / divide);
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
@@ -2772,7 +2929,8 @@
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
- double dd_value = sqrt(dm_value);
+ double dd_value = std::sqrt(dm_value);
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if (instr->Opc3Value() == 0x0) {
// vmov immediate.
@@ -2794,12 +2952,14 @@
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
// vadd
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
@@ -2811,7 +2971,32 @@
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Opc1Value() == 0x0)) {
+ // vmla, vmls
+ const bool is_vmls = (instr->Opc3Value() & 0x1);
+
+ if (instr->SzValue() != 0x1) {
+ UNREACHABLE(); // Not used by V8.
+ }
+
+ const double dd_val = get_double_from_d_register(vd);
+ const double dn_val = get_double_from_d_register(vn);
+ const double dm_val = get_double_from_d_register(vm);
+
+ // Note: we do the mul and add/sub in separate steps to avoid getting a
+ // result with too high precision.
+ set_d_register_from_double(vd, dn_val * dm_val);
+ if (is_vmls) {
+ set_d_register_from_double(
+ vd,
+ canonicalizeNaN(dd_val - get_double_from_d_register(vd)));
+ } else {
+ set_d_register_from_double(
+ vd,
+ canonicalizeNaN(dd_val + get_double_from_d_register(vd)));
+ }
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() != 0x1) {
@@ -2822,6 +3007,7 @@
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value;
div_zero_vfp_flag_ = (dm_value == 0);
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
@@ -2830,6 +3016,26 @@
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ // vmov (ARM core register to scalar)
+ int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ double dd_value = get_double_from_d_register(vd);
+ int32_t data[2];
+ memcpy(data, &dd_value, 8);
+ data[instr->Bit(21)] = get_register(instr->RtValue());
+ memcpy(&dd_value, data, 8);
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ // vmov (scalar to ARM core register)
+ int vn = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ double dn_value = get_double_from_d_register(vn);
+ int32_t data[2];
+ memcpy(data, &dn_value, 8);
+ set_register(instr->RtValue(), data[instr->Bit(21)]);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
@@ -2844,6 +3050,7 @@
(z_flag_FPSCR_ << 30) |
(c_flag_FPSCR_ << 29) |
(v_flag_FPSCR_ << 28) |
+ (FPSCR_default_NaN_mode_ << 25) |
(inexact_vfp_flag_ << 4) |
(underflow_vfp_flag_ << 3) |
(overflow_vfp_flag_ << 2) |
@@ -2866,6 +3073,7 @@
z_flag_FPSCR_ = (rt_value >> 30) & 1;
c_flag_FPSCR_ = (rt_value >> 29) & 1;
v_flag_FPSCR_ = (rt_value >> 28) & 1;
+ FPSCR_default_NaN_mode_ = (rt_value >> 25) & 1;
inexact_vfp_flag_ = (rt_value >> 4) & 1;
underflow_vfp_flag_ = (rt_value >> 3) & 1;
overflow_vfp_flag_ = (rt_value >> 2) & 1;
@@ -2883,7 +3091,7 @@
void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
Instruction* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+ DCHECK((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0));
int t = instr->RtValue();
@@ -2901,8 +3109,8 @@
void Simulator::DecodeVCMP(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ DCHECK(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
(instr->Opc3Value() & 0x1));
// Comparison.
@@ -2926,7 +3134,7 @@
// Raise exceptions for quiet NaNs if necessary.
if (instr->Bit(7) == 1) {
- if (isnan(dd_value)) {
+ if (std::isnan(dd_value)) {
inv_op_vfp_flag_ = true;
}
}
@@ -2939,8 +3147,8 @@
void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
+ DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ DCHECK((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
VFPRegPrecision dst_precision = kDoublePrecision;
VFPRegPrecision src_precision = kSinglePrecision;
@@ -2964,7 +3172,7 @@
bool get_inv_op_vfp_flag(VFPRoundingMode mode,
double val,
bool unsigned_) {
- ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
+ DCHECK((mode == RN) || (mode == RM) || (mode == RZ));
double max_uint = static_cast<double>(0xffffffffu);
double max_int = static_cast<double>(kMaxInt);
double min_int = static_cast<double>(kMinInt);
@@ -3017,9 +3225,9 @@
void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
+ DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
(instr->Bits(27, 23) == 0x1D));
- ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+ DCHECK(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
(((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
// Conversion between floating-point and integer.
@@ -3043,7 +3251,7 @@
// mode or the default Round to Zero mode.
VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
: RZ;
- ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
+ DCHECK((mode == RM) || (mode == RZ) || (mode == RN));
bool unsigned_integer = (instr->Bit(16) == 0);
bool double_precision = (src_precision == kDoublePrecision);
@@ -3057,8 +3265,8 @@
inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
double abs_diff =
- unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
- : fabs(val - temp);
+ unsigned_integer ? std::fabs(val - static_cast<uint32_t>(temp))
+ : std::fabs(val - temp);
inexact_vfp_flag_ = (abs_diff != 0);
@@ -3103,15 +3311,15 @@
if (src_precision == kDoublePrecision) {
if (unsigned_integer) {
- set_d_register_from_double(dst,
- static_cast<double>((uint32_t)val));
+ set_d_register_from_double(
+ dst, static_cast<double>(static_cast<uint32_t>(val)));
} else {
set_d_register_from_double(dst, static_cast<double>(val));
}
} else {
if (unsigned_integer) {
- set_s_register_from_float(dst,
- static_cast<float>((uint32_t)val));
+ set_s_register_from_float(
+ dst, static_cast<float>(static_cast<uint32_t>(val)));
} else {
set_s_register_from_float(dst, static_cast<float>(val));
}
@@ -3127,7 +3335,7 @@
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
- ASSERT((instr->TypeValue() == 6));
+ DCHECK((instr->TypeValue() == 6));
if (instr->CoprocessorValue() == 0xA) {
switch (instr->OpcodeValue()) {
@@ -3168,31 +3376,32 @@
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
UNIMPLEMENTED(); // Not used by V8.
} else {
int rt = instr->RtValue();
int rn = instr->RnValue();
- int vm = instr->VmValue();
+ int vm = instr->VFPMRegValue(kDoublePrecision);
if (instr->HasL()) {
- int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
- int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
- set_register(rt, rt_int_value);
- set_register(rn, rn_int_value);
+ int32_t data[2];
+ double d = get_double_from_d_register(vm);
+ memcpy(data, &d, 8);
+ set_register(rt, data[0]);
+ set_register(rn, data[1]);
} else {
- int32_t rs_val = get_register(rt);
- int32_t rn_val = get_register(rn);
-
- set_s_register_from_sinteger(2*vm, rs_val);
- set_s_register_from_sinteger((2*vm+1), rn_val);
+ int32_t data[] = { get_register(rt), get_register(rn) };
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(vm, d);
}
}
break;
case 0x8:
- case 0xC: { // Load and store double to memory.
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store double to memory.
int rn = instr->RnValue();
- int vd = instr->VdValue();
+ int vd = instr->VFPDRegValue(kDoublePrecision);
int offset = instr->Immed8Value();
if (!instr->HasU()) {
offset = -offset;
@@ -3200,18 +3409,29 @@
int32_t address = get_register(rn) + 4 * offset;
if (instr->HasL()) {
// Load double from memory: vldr.
- set_s_register_from_sinteger(2*vd, ReadW(address, instr));
- set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
+ int32_t data[] = {
+ ReadW(address, instr),
+ ReadW(address + 4, instr)
+ };
+ double val;
+ memcpy(&val, data, 8);
+ set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(2*vd), instr);
- WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
+ int32_t data[2];
+ double val = get_double_from_d_register(vd);
+ memcpy(data, &val, 8);
+ WriteW(address, data[0], instr);
+ WriteW(address + 4, data[1], instr);
}
break;
}
case 0x4:
case 0x5:
+ case 0x6:
+ case 0x7:
case 0x9:
+ case 0xB:
// Load/store multiple double from memory: vldm/vstm.
HandleVList(instr);
break;
@@ -3224,6 +3444,158 @@
}
+void Simulator::DecodeSpecialCondition(Instruction* instr) {
+ switch (instr->SpecialValue()) {
+ case 5:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl signed
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ int8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ int16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 7:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl unsigned
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ uint8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ uint16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 8:
+ if (instr->Bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int Rm = instr->VmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ get_d_register(Vd + r, data);
+ WriteW(address, data[0], instr);
+ WriteW(address + 4, data[1], instr);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
+ } else if (instr->Bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int Rm = instr->VmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ data[0] = ReadW(address, instr);
+ data[1] = ReadW(address + 4, instr);
+ set_d_register(Vd + r, data);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 0xA:
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ // pld: ignore instruction.
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+}
+
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -3240,7 +3612,7 @@
PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
}
if (instr->ConditionField() == kSpecialCondition) {
- UNIMPLEMENTED();
+ DecodeSpecialCondition(instr);
} else if (ConditionallyExecute(instr)) {
switch (instr->TypeValue()) {
case 0:
@@ -3321,33 +3693,7 @@
}
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(r0, va_arg(parameters, int32_t));
- set_register(r1, va_arg(parameters, int32_t));
- set_register(r2, va_arg(parameters, int32_t));
- set_register(r3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
+void Simulator::CallInternal(byte* entry) {
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@@ -3401,6 +3747,37 @@
set_register(r9, r9_val);
set_register(r10, r10_val);
set_register(r11, r11_val);
+}
+
+
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments
+
+ // First four arguments passed in registers.
+ DCHECK(argument_count >= 4);
+ set_register(r0, va_arg(parameters, int32_t));
+ set_register(r1, va_arg(parameters, int32_t));
+ set_register(r2, va_arg(parameters, int32_t));
+ set_register(r3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@@ -3411,6 +3788,35 @@
}
+void Simulator::CallFP(byte* entry, double d0, double d1) {
+ if (use_eabi_hardfloat()) {
+ set_d_register_from_double(0, d0);
+ set_d_register_from_double(1, d1);
+ } else {
+ set_register_pair_from_double(0, &d0);
+ set_register_pair_from_double(2, &d1);
+ }
+ CallInternal(entry);
+}
+
+
+int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ int32_t result = get_register(r0);
+ return result;
+}
+
+
+double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ if (use_eabi_hardfloat()) {
+ return get_double_from_d_register(0);
+ } else {
+ return get_double_from_register_pair(0);
+ }
+}
+
+
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 585f1e0..76865bc 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -1,29 +1,6 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Declares a Simulator for ARM instructions if we are not generating a native
@@ -36,7 +13,7 @@
#ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_ARM_SIMULATOR_ARM_H_
-#include "allocation.h"
+#include "src/allocation.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native arm platform.
@@ -49,19 +26,16 @@
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, Address, int, Isolate*);
+ void*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
@@ -86,9 +60,9 @@
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
-#include "constants-arm.h"
-#include "hashmap.h"
-#include "assembler.h"
+#include "src/arm/constants-arm.h"
+#include "src/assembler.h"
+#include "src/hashmap.h"
namespace v8 {
namespace internal {
@@ -142,7 +116,12 @@
num_s_registers = 32,
d0 = 0, d1, d2, d3, d4, d5, d6, d7,
d8, d9, d10, d11, d12, d13, d14, d15,
- num_d_registers = 16
+ d16, d17, d18, d19, d20, d21, d22, d23,
+ d24, d25, d26, d27, d28, d29, d30, d31,
+ num_d_registers = 32,
+ q0 = 0, q1, q2, q3, q4, q5, q6, q7,
+ q8, q9, q10, q11, q12, q13, q14, q15,
+ num_q_registers = 16
};
explicit Simulator(Isolate* isolate);
@@ -158,22 +137,54 @@
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
+ void set_register_pair_from_double(int reg, double* value);
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
+ void get_d_register(int dreg, uint64_t* value);
+ void set_d_register(int dreg, const uint64_t* value);
+ void get_d_register(int dreg, uint32_t* value);
+ void set_d_register(int dreg, const uint32_t* value);
+ void get_q_register(int qreg, uint64_t* value);
+ void set_q_register(int qreg, const uint64_t* value);
+ void get_q_register(int qreg, uint32_t* value);
+ void set_q_register(int qreg, const uint32_t* value);
+
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
- void set_d_register_from_double(int dreg, const double& dbl);
- double get_double_from_d_register(int dreg);
- void set_s_register_from_float(int sreg, const float dbl);
- float get_float_from_s_register(int sreg);
- void set_s_register_from_sinteger(int reg, const int value);
- int get_sinteger_from_s_register(int reg);
+
+ void set_d_register_from_double(int dreg, const double& dbl) {
+ SetVFPRegister<double, 2>(dreg, dbl);
+ }
+
+ double get_double_from_d_register(int dreg) {
+ return GetFromVFPRegister<double, 2>(dreg);
+ }
+
+ void set_s_register_from_float(int sreg, const float flt) {
+ SetVFPRegister<float, 1>(sreg, flt);
+ }
+
+ float get_float_from_s_register(int sreg) {
+ return GetFromVFPRegister<float, 1>(sreg);
+ }
+
+ void set_s_register_from_sinteger(int sreg, const int sint) {
+ SetVFPRegister<int, 1>(sreg, sint);
+ }
+
+ int get_sinteger_from_s_register(int sreg) {
+ return GetFromVFPRegister<int, 1>(sreg);
+ }
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
@@ -187,6 +198,10 @@
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
+ // Alternative: call a 2-argument double function.
+ void CallFP(byte* entry, double d0, double d1);
+ int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+ double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -232,7 +247,7 @@
// Checks if the current instruction should be executed based on its
// condition bits.
- bool ConditionallyExecute(Instruction* instr);
+ inline bool ConditionallyExecute(Instruction* instr);
// Helper functions to set the conditional flags in the architecture state.
void SetNZFlags(int32_t val);
@@ -247,20 +262,21 @@
inline int GetCarry() {
return c_flag_ ? 1 : 0;
- };
+ }
// Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2);
void Copy_FPSCR_to_APSR();
+ inline double canonicalizeNaN(double value);
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
- void ProcessPUW(Instruction* instr,
- int num_regs,
- int operand_size,
- intptr_t* start_address,
- intptr_t* end_address);
+ int32_t ProcessPU(Instruction* instr,
+ int num_regs,
+ int operand_size,
+ intptr_t* start_address,
+ intptr_t* end_address);
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
@@ -305,6 +321,7 @@
// Support for VFP.
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
+ void DecodeSpecialCondition(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
@@ -325,13 +342,19 @@
void* external_function,
v8::internal::ExternalReference::Type type);
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
+ template<class ReturnType, int register_size>
+ ReturnType GetFromVFPRegister(int reg_index);
+
+ template<class InputType, int register_size>
+ void SetVFPRegister(int reg_index, const InputType& value);
+
+ void CallInternal(byte* entry);
+
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q
@@ -343,7 +366,7 @@
bool v_flag_;
// VFP architecture state.
- unsigned int vfp_register[num_s_registers];
+ unsigned int vfp_registers_[num_d_registers * 2];
bool n_flag_FPSCR_;
bool z_flag_FPSCR_;
bool c_flag_FPSCR_;
@@ -351,6 +374,7 @@
// VFP rounding mode. See ARM DDI 0406B Page A2-29.
VFPRoundingMode FPSCR_rounding_mode_;
+ bool FPSCR_default_NaN_mode_;
// VFP FP exception flags architecture state.
bool inv_op_vfp_flag_;
@@ -384,14 +408,14 @@
static const uint32_t kStopDisabledBit = 1 << 31;
// A stop is enabled, meaning the simulator will stop when meeting the
- // instruction, if bit 31 of watched_stops[code].count is unset.
- // The value watched_stops[code].count & ~(1 << 31) indicates how many times
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
struct StopCountAndDesc {
uint32_t count;
char* desc;
};
- StopCountAndDesc watched_stops[kNumOfWatchedStops];
+ StopCountAndDesc watched_stops_[kNumOfWatchedStops];
};
@@ -401,13 +425,13 @@
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+#define CALL_GENERATED_FP_INT(entry, p0, p1) \
+ Simulator::current(Isolate::Current())->CallFPReturnsInt( \
+ FUNCTION_ADDR(entry), p0, p1)
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ Simulator::current(Isolate::Current())->Call( \
+ entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
// The simulator has its own stack. Thus it has a different stack limit from
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
deleted file mode 100644
index d514b60..0000000
--- a/src/arm/stub-cache-arm.cc
+++ /dev/null
@@ -1,4466 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // Number of the cache entry, not scaled.
- Register offset,
- Register scratch,
- Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- ASSERT(value_off_addr > key_off_addr);
- ASSERT((value_off_addr - key_off_addr) % 4 == 0);
- ASSERT((value_off_addr - key_off_addr) < (256 * 4));
- ASSERT(map_off_addr > key_off_addr);
- ASSERT((map_off_addr - key_off_addr) % 4 == 0);
- ASSERT((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ mov(base_addr, Operand(key_offset));
- __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
-
- // Check that the key in the entry matches the name.
- __ ldr(ip, MemOperand(base_addr, 0));
- __ cmp(name, ip);
- __ b(ne, &miss);
-
- // Check the map matches.
- __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ cmp(ip, scratch2);
- __ b(ne, &miss);
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- // It's a nice optimization if this constant is encodable in the bic insn.
-
- uint32_t mask = Code::kFlagsNotUsedInLookup;
- ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
- __ bic(flags_reg, flags_reg, Operand(mask));
- // Using cmn and the negative instead of cmp means we can use movw.
- if (flags < 0) {
- __ cmn(flags_reg, Operand(-flags));
- } else {
- __ cmp(flags_reg, Operand(flags));
- }
- __ b(ne, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register scratch0,
- Register scratch1) {
- ASSERT(name->IsSymbol());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ b(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ cmp(map, tmp);
- __ b(ne, miss_label);
-
- // Restore the temporarily used register.
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- ASSERT(sizeof(Entry) == 12);
-
- // Make sure the flags does not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
- ASSERT(!extra.is(receiver));
- ASSERT(!extra.is(name));
- ASSERT(!extra.is(scratch));
- ASSERT(!extra2.is(receiver));
- ASSERT(!extra2.is(name));
- ASSERT(!extra2.is(scratch));
- ASSERT(!extra2.is(extra));
-
- // Check scratch, extra and extra2 registers are valid.
- ASSERT(!scratch.is(no_reg));
- ASSERT(!extra.is(no_reg));
- ASSERT(!extra2.is(no_reg));
- ASSERT(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
- __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ add(scratch, scratch, Operand(ip));
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
- // Mask down the eor argument to the minimum to keep the immediate
- // ARM-encodable.
- __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
- // Prefer and_ to ubfx here because ubfx takes 2 cycles.
- __ and_(scratch, scratch, Operand(mask));
-
- // Probe the primary table.
- ProbeTable(isolate,
- masm,
- flags,
- kPrimary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
- __ and_(scratch, scratch, Operand(mask2));
-
- // Probe the secondary table.
- ProbeTable(isolate,
- masm,
- flags,
- kSecondary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- __ ldr(prototype,
- FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ ldr(prototype,
- FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
- Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ Move(ip, isolate->global());
- __ cmp(prototype, ip);
- __ b(ne, miss);
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->global_context()->get(index)));
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ ldr(dst, FieldMemOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ ldr(dst, FieldMemOperand(dst, offset));
- }
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, miss_label);
-
- // Load length directly from the JS array.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Ret();
-}
-
-
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
- __ b(ne, non_string_object);
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length directly from the string.
- __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
-
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(r0, scratch1);
- __ Ret();
-}
-
-
-// Generate StoreField code, value is passed in r0 register.
-// When leaving generated code after success, the receiver_reg and name_reg
-// may be clobbered. Upon branch to miss_label, the receiver and name
-// registers have their original values.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label) {
- // r0 : value
- Label exit;
-
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, mode);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ push(receiver_reg);
- __ mov(r2, Operand(transition));
- __ Push(r2, r0);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (!transition.is_null()) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ mov(ip, Operand(transition));
- __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ str(r0, FieldMemOperand(receiver_reg, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(r0, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, r0);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(r0, FieldMemOperand(scratch, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(r0, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, r0);
- __ RecordWriteField(scratch,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
-
- // Return the value (register r0).
- __ bind(&exit);
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateCallFunction(MacroAssembler* masm,
- Handle<Object> object,
- const ParameterCount& arguments,
- Label* miss,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r0: receiver
- // -- r1: function to call
- // -----------------------------------
-
- // Check that the function really is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Operand(interceptor));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
- __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ mov(r0, Operand(5));
- __ mov(r1, Operand(ref));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-}
-
-
-static const int kFastApiCallArguments = 3;
-
-// Reserves space for the extra arguments to FastHandleApiCall in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- __ mov(scratch, Operand(Smi::FromInt(0)));
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(scratch);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
-
-
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : last JS argument
- // -- ...
- // -- sp[(argc + 3) * 4] : first JS argument
- // -- sp[(argc + 4) * 4] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(r5, function);
- __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
-
- // Pass the additional arguments FastHandleApiCall expects.
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(r0, api_call_info);
- __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
- } else {
- __ Move(r6, call_data);
- }
- // Store JS function and call data.
- __ stm(ib, sp, r5.bit() | r6.bit());
-
- // r2 points to call data as expected by Arguments
- // (refer to layout above).
- __ add(r2, sp, Operand(2 * kPointerSize));
-
- const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // r0 = v8::Arguments&
- // Arguments is after the return address.
- __ add(r0, sp, Operand(1 * kPointerSize));
- // v8::Arguments::implicit_args = data
- __ str(r2, MemOperand(r0, 0 * kPointerSize));
- // v8::Arguments::values = last argument
- __ add(ip, r2, Operand(argc * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
- __ mov(ip, Operand(argc));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
- __ mov(ip, Operand(0));
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference ref = ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
- masm->isolate());
- AllowExternalCallThatCantCauseGC scope(masm);
-
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
- Counters* counters = masm->isolate()->counters();
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- ®ular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ b(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(®ular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- // Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 5);
- // Restore the name_ register.
- __ pop(name_);
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- }
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch);
- __ b(ne, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_ic_state_;
-};
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ mov(scratch, Operand(cell));
- __ ldr(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ b(ne, miss);
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If VFP3 is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
- } else {
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
- // Negate value if it is negative.
- __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(ival, Operand(1));
- __ b(gt, ¬_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
- __ b(&done);
-
- __ bind(¬_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ CountLeadingZeros(zeros, ival, scratch1);
-
- // Compute exponent and or it into the exponent register.
- __ rsb(scratch1,
- zeros,
- Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
-
- __ orr(fval,
- fval,
- Operand(scratch1, LSL, kBinary32ExponentShift));
-
- // Shift up the source chopping the top bit off.
- __ add(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ mov(ival, Operand(ival, LSL, zeros));
- // And the top (top 20 bits).
- __ orr(fval,
- fval,
- Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
-
- __ bind(&done);
- __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
- }
-}
-
-
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- __ mov(loword, Operand(0, RelocInfo::NONE));
- __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
- }
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- Handle<Map> current_map(current->map());
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (heap()->InNewSpace(*prototype)) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- __ mov(reg, Operand(prototype));
- }
- }
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
-
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
-
- // Return the constant value.
- __ LoadHeapObject(r0, value);
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
-
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver);
- __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
- if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch3, callback);
- __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
- } else {
- __ Move(scratch3, Handle<Object>(callback->data()));
- }
- __ Push(reg, scratch3, name_reg);
- __ mov(r0, sp); // r0 = Handle<String>
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // Create AccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object** args_) as the data.
- __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
- __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
-
- const int kStackUnwindSpace = 4;
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference ref =
- ExternalReference(&fun,
- ExternalReference::DIRECT_GETTER_CALL,
- masm()->isolate());
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver, holder_reg, name_reg);
- } else {
- __ Push(holder_reg, name_reg);
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
- __ b(eq, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver);
- }
- // Leave the internal frame.
- }
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), r0, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ Ret();
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ Move(scratch2, callback);
- // holder_reg is either receiver or scratch1.
- if (!receiver.is(holder_reg)) {
- ASSERT(scratch1.is(holder_reg));
- __ Push(receiver, holder_reg);
- __ ldr(scratch3,
- FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
- __ Push(scratch3, scratch2, name_reg);
- } else {
- __ push(receiver);
- __ ldr(scratch3,
- FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
- __ Push(holder_reg, scratch3, scratch2, name_reg);
- }
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 5, 1);
- }
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
- masm()->isolate());
- __ TailCallExternalReference(ref, 5, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(r2, Operand(name));
- __ b(ne, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r0, miss);
- CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ mov(r3, Operand(cell));
- __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ cmp(r4, r3);
- } else {
- __ cmp(r1, Operand(function));
- }
- __ b(ne, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
-
- // Get the receiver of the function from the stack into r0.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r0, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
- GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- Register receiver = r1;
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
- name, &miss);
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements;
-
- Register elements = r6;
- Register end_elements = r5;
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
-
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(r0, r4);
- __ b(gt, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- Label with_write_barrier;
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(r4, &with_write_barrier);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&with_write_barrier);
-
- __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(r3, r7, ¬_fast_object);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(¬_fast_object);
- __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
- // edx: receiver
- // r3: map
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- r3,
- r7,
- &call_builtin);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(r3, r3, &call_builtin);
- }
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- __ RecordWrite(elements,
- end_elements,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&attempt_to_grow_elements);
- // r0: array's length + 1.
- // r4: elements' length.
-
- if (!FLAG_inline_new) {
- __ b(&call_builtin);
- }
-
- __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r7, r7, &call_builtin);
- __ bind(&no_fast_elements_check);
-
- Isolate* isolate = masm()->isolate();
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate);
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate);
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r3, MemOperand(r7));
- __ cmp(end_elements, r3);
- __ b(ne, &call_builtin);
-
- __ mov(r9, Operand(new_space_allocation_limit));
- __ ldr(r9, MemOperand(r9));
- __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
- __ cmp(r3, r9);
- __ b(hi, &call_builtin);
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ str(r3, MemOperand(r7));
- // Push the argument.
- __ str(r2, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ str(r3, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
- __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Ret();
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- masm()->isolate()),
- argc + 1,
- 1);
- }
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss, return_undefined, call_builtin;
- Register receiver = r1;
- Register elements = r3;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
- r4, r0, name, &miss);
-
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r4 and calculate new length.
- __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC);
- __ b(lt, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(r0, r6);
- __ b(eq, &call_builtin);
-
- // Set the array's length.
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&return_undefined);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
- masm()->isolate()),
- argc + 1,
- 1);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r1, r3, r4, name, &miss);
-
- Register receiver = r1;
- Register index = r4;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r1, r3, r4, name, &miss);
-
- Register receiver = r0;
- Register index = r4;
- Register scratch = r3;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = r1;
- __ ldr(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, r0);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- if (!CpuFeatures::IsSupported(VFP3)) {
- return Handle<Code>::null();
- }
-
- CpuFeatures::Scope scope_vfp3(VFP3);
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss, slow;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ Drop(argc + 1, eq);
- __ Ret(eq);
-
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
-
- Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
-
- // If vfp3 is enabled, we use the fpu rounding with the RM (round towards
- // minus infinity) mode.
-
- // Load the HeapNumber value.
- // We will need access to the value in the core registers, so we load it
- // with ldrd and move it to the fpu. It also spares a sub instruction for
- // updating the HeapNumber value address, as vldr expects a multiple
- // of 4 offset.
- __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(d1, r4, r5);
-
- // Backup FPSCR.
- __ vmrs(r3);
- // Set custom FPCSR:
- // - Set rounding mode to "Round towards Minus Infinity"
- // (i.e. bits [23:22] = 0b10).
- // - Clear vfp cumulative exception flags (bits [3:0]).
- // - Make sure Flush-to-zero mode control bit is unset (bit 22).
- __ bic(r9, r3,
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(r9, r9, Operand(kRoundToMinusInf));
- __ vmsr(r9);
-
- // Convert the argument to an integer.
- __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
-
- // Use vcvt latency to start checking for special cases.
- // Get the argument exponent and clear the sign bit.
- __ bic(r6, r5, Operand(HeapNumber::kSignMask));
- __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
-
- // Retrieve FPSCR and check for vfp exceptions.
- __ vmrs(r9);
- __ tst(r9, Operand(kVFPExceptionMask));
- __ b(&no_vfp_exception, eq);
-
- // Check for NaN, Infinity, and -Infinity.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ sub(r7, r6, Operand(HeapNumber::kExponentMask
- >> HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ b(&restore_fpscr_and_return, eq);
- // We had an overflow or underflow in the conversion. Check if we
- // have a big exponent.
- __ cmp(r7, Operand(HeapNumber::kMantissaBits));
- // If greater or equal, the argument is already round and in r0.
- __ b(&restore_fpscr_and_return, ge);
- __ b(&wont_fit_smi);
-
- __ bind(&no_vfp_exception);
- // Move the result back to general purpose register r0.
- __ vmov(r0, s0);
- // Check if the result fits into a smi.
- __ add(r1, r0, Operand(0x40000000), SetCC);
- __ b(&wont_fit_smi, mi);
- // Tag the result.
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-
- // Check for -0.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(&restore_fpscr_and_return, ne);
- // r5 already holds the HeapNumber exponent.
- __ tst(r5, Operand(HeapNumber::kSignMask));
- // If our HeapNumber is negative it was -0, so load its address and return.
- // Else r0 is loaded with 0, so we can also just return.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
-
- __ bind(&restore_fpscr_and_return);
- // Restore FPSCR and return.
- __ vmsr(r3);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&wont_fit_smi);
- // Restore FPCSR and fall to slow case.
- __ vmsr(r3);
-
- __ bind(&slow);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(r0, ¬_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ b(mi, &slow);
-
- // Smi case done.
- __ Drop(argc + 1);
- __ Ret();
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(¬_smi);
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ tst(r1, Operand(HeapNumber::kSignMask));
- __ b(ne, &negative_sign);
- __ Drop(argc + 1);
- __ Ret();
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ eor(r1, r1, Operand(HeapNumber::kSignMask));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
- __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ Drop(argc + 1);
- __ Ret();
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r1, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, r0, r3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
-
- ReserveSpaceForFastApiCall(masm(), r0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
- depth, &miss);
-
- GenerateFastApiDirectCall(masm(), optimization, argc);
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(r1, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(masm()->isolate()->counters()->call_const(),
- 1, r0, r3);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
- break;
-
- case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a two-byte string or a symbol.
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r3, r1, r4, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
- break;
-
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(r1, &fast);
- __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r3, r1, r4, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
- break;
-
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r3, r1, r4, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
- break;
- }
-
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
- &miss);
-
- // Move returned value, the function to call, to r1.
- __ mov(r1, r0);
- // Restore receiver.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Set up the context (function already in r1).
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- __ push(r1); // receiver
- __ mov(ip, Operand(callback)); // callback info
- __ Push(ip, r2, r0);
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ Push(r1, r2, r0); // Receiver, name, value.
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode_)));
- __ push(r0); // strict mode
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ mov(r4, Operand(cell));
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- __ cmp(r5, r6);
- __ b(eq, &miss);
-
- // Store the value in the cell.
- __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
- __ Ret();
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that receiver is not a smi.
- __ JumpIfSmi(r0, &miss);
-
- // Check the maps of the full prototype chain.
- CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
- }
-
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Ret();
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(FIELD, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(INTERCEPTOR, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ JumpIfSmi(r0, &miss);
- CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
-
- // Get the value from the cell.
- __ mov(r3, Operand(cell));
- __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss);
- }
-
- __ mov(r0, r4);
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1, r1, r3);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(FIELD, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(INTERCEPTOR, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadArrayLength(masm(), r1, r2, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
-
- // Check the name hasn't changed.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(r1, &miss);
-
- int receiver_count = receiver_maps->length();
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- __ mov(ip, Operand(receiver_maps->at(current)));
- __ cmp(r2, ip);
- __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
- }
-
- __ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
-
- // Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : name
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
-
- // Check that the name has not changed.
- __ cmp(r1, Operand(name));
- __ b(ne, &miss);
-
- // r3 is used as scratch register. r1 and r2 keep their values if a jump to
- // the miss label is generated.
- GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
-
- __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(r2, &miss);
-
- int receiver_count = receiver_maps->length();
- __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- __ mov(ip, Operand(receiver_maps->at(i)));
- __ cmp(r3, ip);
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ b(ne, &next_map);
- __ mov(r3, Operand(transitioned_maps->at(i)));
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
-
- // Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
-}
-
-
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- r0 : argc
- // -- r1 : constructor
- // -- lr : return address
- // -- [sp] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r7 for holding undefined which is used in several places below.
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(r2, r7);
- __ b(ne, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- // r7: undefined
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &generic_stub_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ Check(ne, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r7: undefined
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Calculate the location of the first argument. The stack contains only the
- // argc arguments.
- __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- // Fill all the in-object properties with undefined.
- // r0: argc
- // r1: first argument
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- // r7: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed, next;
- // Check if the argument assigned to the property is actually passed.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ cmp(r0, Operand(arg_number));
- __ b(le, ¬_passed);
- // Argument passed - find it on the stack.
- __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- __ b(&next);
- __ bind(¬_passed);
- // Set the property to undefined.
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&next);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
- __ mov(r2, Operand(constant));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- }
-
- // r0: argc
- // r4: JSObject (not tagged)
- // Move argc to r1 and the JSObject to return to r0 and tag it.
- __ mov(r1, r0);
- __ mov(r0, r4);
- __ orr(r0, r0, Operand(kHeapObjectTag));
-
- // r0: JSObject
- // r1: argc
- // Remove caller arguments and receiver from the stack and return.
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
- __ add(sp, sp, Operand(kPointerSize));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
- __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
- __ Jump(lr);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, miss_force_generic;
-
- Register key = r0;
- Register receiver = r1;
-
- __ JumpIfNotSmi(key, &miss_force_generic);
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
- __ Ret();
-
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static bool IsElementTypeSigned(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- return true;
-
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- return false;
-
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- return false;
- }
- return false;
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow, failed_allocation;
-
- Register key = r0;
- Register receiver = r1;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // r3: elements array
-
- // Check that the index is in range.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- // r3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = r2;
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ ldrsb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ ldrb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ ldrsh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ ldrh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ add(r2, r3, Operand(key, LSL, 1));
- __ vldr(s0, r2, 0);
- } else {
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- }
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ add(r2, r3, Operand(key, LSL, 2));
- __ vldr(d0, r2, 0);
- } else {
- __ add(r4, r3, Operand(key, LSL, 2));
- // r4: pointer to the beginning of the double we want to load.
- __ ldr(r2, MemOperand(r4, 0));
- __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // r2: value
- // For float array type:
- // s0: value (if VFP3 is supported)
- // r2: value (if VFP3 is not supported)
- // For double array type:
- // d0: value (if VFP3 is supported)
- // r2/r3: value (if VFP3 is not supported)
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ cmp(value, Operand(0xC0000000));
- __ b(mi, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, value);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- Register dst1 = r1;
- Register dst2 = r3;
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm,
- value,
- dest,
- d0,
- dst1,
- dst2,
- r9,
- s0);
- __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- Label box_int, done;
- __ tst(value, Operand(0xC0000000));
- __ b(ne, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- __ vmov(s0, value);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
-
- __ vcvt_f64_u32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
-
- __ mov(r0, r2);
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ tst(value, Operand(0x80000000));
- __ b(ne, &box_int_0);
- __ tst(value, Operand(0x40000000));
- __ b(ne, &box_int_1);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- Register hiword = value; // r2.
- Register loword = r3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, r4, 0);
- __ b(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, r4, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
-
- __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r4);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ vcvt_f64_f32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
-
- __ mov(r0, r2);
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
- // VFP is not available, do manual single to double conversion.
-
- // r2: floating point value (binary32)
- // r3: heap number for result
-
- // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
- // the slow case from here.
- __ and_(r0, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
- // the slow case from here.
- __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
- __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(r1, Operand(0x00));
- __ b(eq, &exponent_rebiased);
-
- __ teq(r1, Operand(0xff));
- __ mov(r1, Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(r1,
- r1,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(r2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
- __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
- __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r3);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
-
- __ mov(r0, r2);
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow);
-
- __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
- __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ mov(r0, r4);
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- // r3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // r3: external array.
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
- } else {
- __ JumpIfNotSmi(value, &check_heap_number);
- }
- __ SmiUntag(r5, value);
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r5: value (integer).
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- // Clamp the value to [0..255].
- __ Usat(r5, 8, Operand(r5));
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(r4, key);
- StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ add(r3, r3, Operand(key, LSL, 2));
- // r3: effective address of the double element
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
- FloatingPointHelper::ConvertIntToDouble(
- masm, r5, destination,
- d0, r6, r7, // These are: double_dst, dst1, dst2.
- r4, s2); // These are: scratch2, single_scratch.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- __ vstr(d0, r3, 0);
- } else {
- __ str(r6, MemOperand(r3, 0));
- __ str(r7, MemOperand(r3, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // r3: external array.
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 1));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 2));
- __ vstr(d0, r5, 0);
- } else {
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can
- // not include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
- } else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
-
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
-
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
-
- __ bind(&done);
- __ str(r5, MemOperand(r3, key, LSL, 1));
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ add(r7, r3, Operand(key, LSL, 2));
- // r7: effective address of destination element.
- __ str(r6, MemOperand(r7, 0));
- __ str(r5, MemOperand(r7, Register::kSizeInBytes));
- __ Ret();
- } else {
- bool is_signed_type = IsElementTypeSigned(elements_kind);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative then result is 0.
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big then result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand(0, RelocInfo::NONE));
- __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- __ bind(&done);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(r0, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ AssertFastElements(r2);
-
- // Check that the key is within bounds.
- __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ cmp(r0, Operand(r3));
- __ b(hs, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(r4,
- MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss_force_generic);
- __ mov(r0, r4);
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- Register key_reg = r0;
- Register receiver_reg = r1;
- Register elements_reg = r2;
- Register heap_number_reg = r2;
- Register indexed_double_offset = r3;
- Register scratch = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
- Register heap_number_map = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key_reg, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(key_reg, Operand(scratch));
- __ b(hs, &miss_force_generic);
-
- // Load the upper word of the double in the fixed array and test for NaN.
- __ add(indexed_double_offset, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- __ b(&miss_force_generic, eq);
-
- // Non-NaN. Allocate a new heap number and copy the double value into it.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber);
-
- // Don't need to reload the upper 32 bits of the double, it's already in
- // scratch.
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kExponentOffset));
- __ ldr(scratch, FieldMemOperand(indexed_double_offset,
- FixedArray::kHeaderSize));
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kMantissaOffset));
-
- __ mov(r0, heap_number_reg);
- __ Ret();
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register scratch = r4;
- Register elements_reg = r3;
- Register length_reg = r5;
- Register scratch2 = r6;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key_reg, &miss_force_generic);
-
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- __ cmp(key_reg, scratch);
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- } else {
- ASSERT(elements_kind == FAST_ELEMENTS);
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (r0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
- TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(length_reg, scratch);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -- r4 : scratch
- // -- r5 : scratch
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register elements_reg = r3;
- Register scratch1 = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
- Register scratch4 = r7;
- Register length_reg = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(key_reg, &miss_force_generic);
-
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- __ cmp(key_reg, scratch1);
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- receiver_reg,
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
- __ Ret();
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
- TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch1,
- Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ jmp(&finish_store);
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ cmp(length_reg, scratch1);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM