Merge V8 5.2.361.47 DO NOT MERGE
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
FPIIM-449
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index b0b22b6..52ebe32 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -71,11 +71,6 @@
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Assembler::target_address_at(pc_, host_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@@ -118,19 +113,6 @@
}
}
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
- icache_flush_mode);
-}
-
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@@ -276,7 +258,7 @@
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 62516e8..1ccc3a6 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -141,15 +141,6 @@
}
if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
-
- if (cpu.implementer() == base::CPU::NVIDIA &&
- cpu.variant() == base::CPU::NVIDIA_DENVER &&
- cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
- // TODO(jkummerow): This is turned off as an experiment to see if it
- // affects crash rates. Keep an eye on crash reports and either remove
- // coherent cache support permanently, or re-enable it!
- // supported_ |= 1u << COHERENT_CACHE;
- }
#endif
DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
@@ -212,18 +203,14 @@
void CpuFeatures::PrintFeatures() {
printf(
- "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
- "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
- CpuFeatures::IsSupported(ARMv8),
- CpuFeatures::IsSupported(ARMv7),
- CpuFeatures::IsSupported(VFP3),
- CpuFeatures::IsSupported(VFP32DREGS),
- CpuFeatures::IsSupported(NEON),
- CpuFeatures::IsSupported(SUDIV),
- CpuFeatures::IsSupported(MLS),
- CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
- CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
- CpuFeatures::IsSupported(COHERENT_CACHE));
+ "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
+ "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d",
+ CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
+ CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS),
+ CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
+ CpuFeatures::IsSupported(MLS),
+ CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
+ CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
#ifdef __arm__
bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
@@ -255,6 +242,42 @@
return Assembler::is_constant_pool_load(pc_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+ if (IsWasmMemoryReference(rmode_)) {
+ Address updated_memory_reference;
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_memory_reference &&
+ updated_memory_reference < new_base + new_size);
+ Assembler::set_target_address_at(
+ isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
+ } else if (IsWasmMemorySizeReference(rmode_)) {
+ uint32_t updated_size_reference;
+ DCHECK(wasm_memory_size_reference() <= old_size);
+ updated_size_reference =
+ new_size + (wasm_memory_size_reference() - old_size);
+ DCHECK(updated_size_reference <= new_size);
+ Assembler::set_target_address_at(
+ isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
+ icache_flush_mode);
+ } else {
+ UNREACHABLE();
+ }
+}
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
@@ -1702,8 +1725,6 @@
int satpos,
const Operand& src,
Condition cond) {
- // v6 and above.
- DCHECK(CpuFeatures::IsSupported(ARMv7));
DCHECK(!dst.is(pc) && !src.rm_.is(pc));
DCHECK((satpos >= 0) && (satpos <= 31));
DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@@ -2038,7 +2059,6 @@
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
- DCHECK(IsEnabled(ARMv7));
DCHECK(src.rm().is(no_reg));
DCHECK(!dst1.is(lr)); // r14.
DCHECK_EQ(0, dst1.code() % 2);
@@ -2053,7 +2073,6 @@
DCHECK(!src1.is(lr)); // r14.
DCHECK_EQ(0, src1.code() % 2);
DCHECK_EQ(src1.code() + 1, src2.code());
- DCHECK(IsEnabled(ARMv7));
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
@@ -3371,6 +3390,69 @@
0x5 * B9 | B6);
}
+void Assembler::vsel(Condition cond, const DwVfpRegister dst,
+ const DwVfpRegister src1, const DwVfpRegister src2) {
+ // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
+ // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=1(8) | N(7) |
+ // 0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int sz = 1;
+
+ // VSEL has a special (restricted) condition encoding.
+ // eq(0b0000)... -> 0b00
+ // ge(0b1010)... -> 0b10
+ // gt(0b1100)... -> 0b11
+ // vs(0b0110)... -> 0b01
+ // No other conditions are supported.
+ int vsel_cond = (cond >> 30) & 0x3;
+ if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
+ // We can implement some other conditions by swapping the inputs.
+ DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
+ std::swap(vn, vm);
+ std::swap(n, m);
+ }
+
+ emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
+ vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
+}
+
+void Assembler::vsel(Condition cond, const SwVfpRegister dst,
+ const SwVfpRegister src1, const SwVfpRegister src2) {
+ // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
+ // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=0(8) | N(7) |
+ // 0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int sz = 0;
+
+ // VSEL has a special (restricted) condition encoding.
+ // eq(0b0000)... -> 0b00
+ // ge(0b1010)... -> 0b10
+ // gt(0b1100)... -> 0b11
+ // vs(0b0110)... -> 0b01
+ // No other conditions are supported.
+ int vsel_cond = (cond >> 30) & 0x3;
+ if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
+ // We can implement some other conditions by swapping the inputs.
+ DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
+ std::swap(vn, vm);
+ std::swap(n, m);
+ }
+
+ emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
+ vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
+}
void Assembler::vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 08ad64c..26e062b 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -57,6 +57,12 @@
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) V(r8)
+#define FLOAT_REGISTERS(V) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
+ V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
+ V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
+ V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
+
#define DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
@@ -154,6 +160,10 @@
DCHECK(is_valid());
return 1 << reg_code;
}
+ static SwVfpRegister from_code(int code) {
+ SwVfpRegister r = {code};
+ return r;
+ }
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
*m = reg_code & 0x1;
@@ -163,9 +173,10 @@
int reg_code;
};
+typedef SwVfpRegister FloatRegister;
// Double word VFP register.
-struct DoubleRegister {
+struct DwVfpRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -187,7 +198,7 @@
const char* ToString();
bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+ bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
@@ -197,8 +208,8 @@
return 1 << reg_code;
}
- static DoubleRegister from_code(int code) {
- DoubleRegister r = {code};
+ static DwVfpRegister from_code(int code) {
+ DwVfpRegister r = {code};
return r;
}
void split_code(int* vm, int* m) const {
@@ -211,7 +222,7 @@
};
-typedef DoubleRegister DwVfpRegister;
+typedef DwVfpRegister DoubleRegister;
// Double word VFP register d0-15.
@@ -1225,6 +1236,17 @@
const Condition cond = al);
void vcmp(const SwVfpRegister src1, const float src2,
const Condition cond = al);
+
+ // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
+ void vsel(const Condition cond,
+ const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2);
+ void vsel(const Condition cond,
+ const SwVfpRegister dst,
+ const SwVfpRegister src1,
+ const SwVfpRegister src2);
+
void vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
@@ -1357,7 +1379,7 @@
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(const int reason, int raw_position, int id);
// Record the emission of a constant pool.
//
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 1fffcb6..031b483 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -604,16 +604,9 @@
// r0: number of arguments
// r1: constructor function
// r3: new target
- if (is_api_function) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -704,6 +697,140 @@
Generate_JSConstructStubHelper(masm, false, false, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the value to pass to the generator
+ // -- r1 : the JSGeneratorObject to resume
+ // -- r2 : the resume mode (tagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(r1);
+
+ // Store input value into generator object.
+ __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOffset));
+ __ RecordWriteField(r1, JSGeneratorObject::kInputOffset, r0, r3,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
+
+ // Load suspended function and context.
+ __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(masm->isolate());
+ __ mov(ip, Operand(step_in_enabled));
+ __ ldrb(ip, MemOperand(ip));
+ __ cmp(ip, Operand(0));
+ __ b(eq, &skip_flooding);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r2, r4);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(r1, r2);
+ __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+ }
+ __ bind(&skip_flooding);
+
+ // Push receiver.
+ __ ldr(ip, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
+ __ Push(ip);
+
+ // ----------- S t a t e -------------
+ // -- r1 : the JSGeneratorObject to resume
+ // -- r2 : the resume mode (tagged)
+ // -- r4 : generator function
+ // -- cp : generator context
+ // -- lr : return address
+ // -- sp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
+ __ b(mi, &done_loop);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ b(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
+ __ b(ne, &old_generator);
+
+ // New-style (ignition/turbofan) generator object
+ {
+ __ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r0,
+ FieldMemOperand(r0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(r0);
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(r3, r1);
+ __ Move(r1, r4);
+ __ ldr(r5, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Jump(r5);
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ DCHECK(!FLAG_enable_embedded_constant_pool);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(lr, fp);
+ __ Move(fp, sp);
+ __ Push(cp, r4);
+
+ // Restore the operand stack.
+ __ ldr(r0, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+ __ ldr(r3, FieldMemOperand(r0, FixedArray::kLengthOffset));
+ __ add(r0, r0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r3, r0, Operand(r3, LSL, kPointerSizeLog2 - 1));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ cmp(r0, r3);
+ __ b(eq, &done_loop);
+ __ ldr(ip, MemOperand(r0, kPointerSize, PostIndex));
+ __ Push(ip);
+ __ b(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+ __ str(ip, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+
+ // Resume the generator function at the continuation.
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ add(r3, r3, Operand(r2, ASR, 1));
+ __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ Move(r0, r1); // Continuation expects generator object in r0.
+ __ Jump(r3);
+ }
+}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -832,7 +959,6 @@
Generate_JSEntryTrampolineHelper(masm, true);
}
-
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -850,14 +976,16 @@
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r1);
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into kInterpreterBytecodeRegister.
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
Register debug_info = kInterpreterBytecodeArrayRegister;
DCHECK(!debug_info.is(r0));
@@ -869,8 +997,12 @@
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex), ne);
+ // Check function data field is actually a BytecodeArray object.
+ Label bytecode_array_not_present;
+ __ CompareRoot(kInterpreterBytecodeArrayRegister,
+ Heap::kUndefinedValueRootIndex);
+ __ b(eq, &bytecode_array_not_present);
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
@@ -878,8 +1010,12 @@
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
- // Push new.target, bytecode array and zero for bytecode array offset.
- __ mov(r0, Operand(0));
+ // Load the initial bytecode offset.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push new.target, bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
__ Push(r3, kInterpreterBytecodeArrayRegister, r0);
// Allocate the local and temporary register file on the stack.
@@ -911,18 +1047,8 @@
__ b(&loop_header, ge);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ add(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ mov(kInterpreterBytecodeOffsetRegister,
- Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
@@ -932,35 +1058,33 @@
kInterpreterBytecodeOffsetRegister));
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
-}
+ // The return value is in r0.
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
-
- // The return value is in accumulator, which is already in r0.
+ // Get the arguments + reciever count.
+ __ ldr(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ ldr(r2, FieldMemOperand(r2, BytecodeArray::kParameterSizeOffset));
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- // Drop receiver + arguments and return.
- __ ldr(ip, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ add(sp, sp, ip, LeaveCC);
+ __ add(sp, sp, r2, LeaveCC);
__ Jump(lr);
-}
+ // If the bytecode array is no longer present, then the underlying function
+ // has been switched to a different kind of code and we heal the closure by
+ // switching the code entry field over to the new code object as well.
+ __ bind(&bytecode_array_not_present);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+ __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(r1, r4, r5);
+ __ Jump(r4);
+}
static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
Register limit, Register scratch) {
@@ -974,7 +1098,6 @@
__ b(gt, &loop_header);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -1000,7 +1123,6 @@
RelocInfo::CODE_TARGET);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1025,25 +1147,24 @@
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ Move(r2, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ add(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ // Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- // Get the context from the frame.
- __ ldr(kContextRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ ldr(
- kInterpreterBytecodeArrayRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1056,9 +1177,7 @@
// Get the target bytecode offset from the frame.
__ ldr(kInterpreterBytecodeOffsetRegister,
- MemOperand(
- kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
@@ -1066,63 +1185,169 @@
kInterpreterBytecodeOffsetRegister));
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ mov(pc, ip);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register.
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee)
+ // -- r3 : new target (preserved for callee)
+ // -- r1 : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register argument_count = r0;
+ Register closure = r1;
+ Register new_target = r3;
+ __ push(argument_count);
+ __ push(new_target);
+ __ push(closure);
+
+ Register map = argument_count;
+ Register index = r2;
+ __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(map,
+ FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ cmp(index, Operand(Smi::FromInt(2)));
+ __ b(lt, &gotta_call_runtime);
+
+ // Find literals.
+ // r3 : native context
+ // r2 : length / index
+ // r0 : optimized code map
+ // stack[0] : new target
+ // stack[4] : closure
+ Register native_context = r3;
+ __ ldr(native_context, NativeContextMemOperand());
+
+ __ bind(&loop_top);
+ Register temp = r1;
+ Register array_pointer = r5;
+
+ // Does the native context match?
+ __ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index));
+ __ ldr(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ cmp(temp, native_context);
+ __ b(ne, &loop_bottom);
+ // OSR id set to none?
+ __ ldr(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ cmp(temp, Operand(Smi::FromInt(bailout_id)));
+ __ b(ne, &loop_bottom);
+ // Literals available?
+ __ ldr(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ ldr(r4, MemOperand(sp, 0));
+ __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+ __ push(index);
+ __ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(index);
+
+ // Code available?
+ Register entry = r4;
+ __ ldr(entry,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ pop(closure);
+ // Store code entry in the closure.
+ __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, r5);
+
+ // Link the closure into the optimized function list.
+ // r4 : code entry
+ // r3 : native context
+ // r1 : closure
+ __ ldr(r5,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ str(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(r5, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, r0,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, r5);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ Jump(entry);
+
+ __ bind(&loop_bottom);
+ __ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ cmp(index, Operand(Smi::FromInt(1)));
+ __ b(gt, &loop_top);
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+ __ pop(closure);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ __ pop(new_target);
+ __ pop(argument_count);
+ // Is the full code valid?
+ __ ldr(entry,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
+ __ and_(r5, r5, Operand(Code::KindField::kMask));
+ __ mov(r5, Operand(r5, LSR, Code::KindField::kShift));
+ __ cmp(r5, Operand(Code::BUILTIN));
+ __ b(eq, &gotta_call_runtime_no_stack);
+ // Yes, install the full code.
+ __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, r5);
+ __ Jump(entry);
+
+ __ bind(&gotta_call_runtime);
+ __ pop(closure);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
@@ -1252,14 +1477,17 @@
__ SmiUntag(r6);
// Switch on the state.
Label with_tos_register, unknown_state;
- __ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ cmp(r6,
+ Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
__ b(ne, &with_tos_register);
__ add(sp, sp, Operand(1 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&with_tos_register);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r0.code());
__ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ cmp(r6, Operand(FullCodeGenerator::TOS_REG));
+ __ cmp(r6,
+ Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
__ b(ne, &unknown_state);
__ add(sp, sp, Operand(2 * kPointerSize)); // Remove state.
__ Ret();
@@ -1474,28 +1702,6 @@
}
// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argc
- // -- sp[0] : first argument (left-hand side)
- // -- sp[4] : receiver (right-hand side)
- // -----------------------------------
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ ldr(InstanceOfDescriptor::LeftRegister(),
- MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
- __ ldr(InstanceOfDescriptor::RightRegister(),
- MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
- }
-
- // Pop the argument and the receiver.
- __ Ret(2);
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
@@ -2387,6 +2593,30 @@
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : requested object size (untagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ SmiTag(r1);
+ __ Push(r1);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : requested object size (untagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ SmiTag(r1);
+ __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ Push(r1, r2);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 31e3e95..0224f9d 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -55,12 +55,6 @@
}
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
void ArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -73,11 +67,6 @@
}
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -1072,8 +1061,6 @@
}
// Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
- __ VFPEnsureFPSCRState(r3);
-
// Check result for exception sentinel.
Label exception_returned;
__ CompareRoot(r0, Heap::kExceptionRootIndex);
@@ -1183,7 +1170,6 @@
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0.
__ vmov(kDoubleRegZero, 0.0);
- __ VFPEnsureFPSCRState(r4);
// Get address of argv, see stm above.
// r0: code entry
@@ -1333,126 +1319,6 @@
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = r1; // Object (lhs).
- Register const function = r0; // Function (rhs).
- Register const object_map = r2; // Map of {object}.
- Register const function_map = r3; // Map of {function}.
- Register const function_prototype = r4; // Prototype of {function}.
- Register const scratch = r5;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ b(ne, &fast_case);
- __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
- __ b(ne, &fast_case);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
- __ b(ne, &slow_case);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ Ret();
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
- __ b(ne, &slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsConstructor));
- __ b(eq, &slow_case);
-
- // Ensure that {function} has an instance prototype.
- __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ b(ne, &slow_case);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ ldr(function_prototype,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
- __ b(ne, &function_prototype_valid);
- __ ldr(function_prototype,
- FieldMemOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Register const object_instance_type = function_map;
- Register const map_bit_field = function_map;
- Register const null = scratch;
- Register const result = r0;
-
- Label done, loop, fast_runtime_fallback;
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ LoadRoot(null, Heap::kNullValueRootIndex);
- __ bind(&loop);
-
- // Check if the object needs to be access checked.
- __ ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ tst(map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &fast_runtime_fallback);
- // Check if the current object is a Proxy.
- __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- __ b(eq, &fast_runtime_fallback);
-
- __ ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object, function_prototype);
- __ b(eq, &done);
- __ cmp(object, null);
- __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ b(ne, &loop);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
- __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // Found Proxy or access check needed: Call the runtime
- __ bind(&fast_runtime_fallback);
- __ Push(object, function_prototype);
- // Invalidate the instanceof cache.
- __ Move(scratch, Smi::FromInt(0));
- __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ Push(object, function);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -3227,7 +3093,6 @@
// GC safe. The RegExp backend also relies on this.
__ str(lr, MemOperand(sp, 0));
__ blx(ip); // Call the C++ function.
- __ VFPEnsureFPSCRState(r2);
__ ldr(pc, MemOperand(sp, 0));
}
@@ -3839,8 +3704,8 @@
__ bind(¬_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
+ Code::Flags code_flags =
+ Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
receiver, name, feedback,
receiver_map, scratch1, r9);
@@ -3984,8 +3849,8 @@
__ bind(¬_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
+ Code::Flags code_flags =
+ Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
scratch1, scratch2);
@@ -4562,15 +4427,15 @@
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ str(r2, MemOperand(r0, JSObject::kMapOffset));
+ __ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
__ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, MemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, MemOperand(r0, JSObject::kElementsOffset));
+ __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ add(r1, r0, Operand(JSObject::kHeaderSize));
+ __ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
- // -- r0 : result (untagged)
+ // -- r0 : result (tagged)
// -- r1 : result fields (untagged)
// -- r5 : result end (untagged)
// -- r2 : initial map
@@ -4588,10 +4453,6 @@
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(r1, r5, r6);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ add(r0, r0, Operand(kHeapObjectTag));
__ Ret();
}
__ bind(&slack_tracking);
@@ -4610,10 +4471,6 @@
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(r1, r5, r6);
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ add(r0, r0, Operand(kHeapObjectTag));
-
// Check if we can finalize the instance size.
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
__ tst(r3, Operand(Map::ConstructionCounter::kMask));
@@ -4640,10 +4497,10 @@
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(r2);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ sub(r0, r0, Operand(kHeapObjectTag));
__ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ sub(r5, r5, Operand(kHeapObjectTag));
__ b(&done_allocate);
// Fall back to %NewObject.
@@ -4662,20 +4519,20 @@
// -----------------------------------
__ AssertFunction(r1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make r2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(r2, fp);
- __ b(&loop_entry);
- __ bind(&loop);
+ // Make r2 point to the JavaScript frame.
+ __ mov(r2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
- __ b(ne, &loop);
+ __ b(eq, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4706,7 +4563,7 @@
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in r0.
@@ -4748,7 +4605,7 @@
Label allocate, done_allocate;
__ mov(r1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
__ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+ __ Allocate(r1, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r3.
@@ -4804,23 +4661,40 @@
// -----------------------------------
__ AssertFunction(r1);
+ // Make r9 point to the JavaScript frame.
+ __ mov(r9, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ ldr(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ ldr(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
+ __ cmp(ip, r1);
+ __ b(eq, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ add(r3, fp, Operand(r2, LSL, kPointerSizeLog2 - 1));
+ __ add(r3, r9, Operand(r2, LSL, kPointerSizeLog2 - 1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
// r1 : function
// r2 : number of parameters (tagged)
// r3 : parameters pointer
+ // r9 : JavaScript frame pointer
// Registers used over whole function:
// r5 : arguments count (tagged)
// r6 : mapped parameter count (tagged)
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r4, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
__ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor_frame);
@@ -4863,7 +4737,7 @@
__ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
+ __ Allocate(r9, r0, r9, r4, &runtime, NO_ALLOCATION_FLAGS);
// r0 = address of new object(s) (tagged)
// r2 = argument count (smi-tagged)
@@ -5009,20 +4883,20 @@
// -----------------------------------
__ AssertFunction(r1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make r2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(r2, fp);
- __ b(&loop_entry);
- __ bind(&loop);
+ // Make r2 point to the JavaScript frame.
+ __ mov(r2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
- __ b(ne, &loop);
+ __ b(eq, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -5060,7 +4934,7 @@
Label allocate, done_allocate;
__ mov(r1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
__ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+ __ Allocate(r1, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r3.
@@ -5423,7 +5297,11 @@
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// context save
__ push(context);
@@ -5457,7 +5335,7 @@
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
@@ -5474,9 +5352,6 @@
// FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc()));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ mov(ip, Operand::Zero());
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5493,8 +5368,8 @@
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
int stack_space = 0;
- MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
- MemOperand* stack_space_operand = &is_construct_call_operand;
+ MemOperand length_operand = MemOperand(sp, 3 * kPointerSize);
+ MemOperand* stack_space_operand = &length_operand;
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = NULL;
@@ -5505,16 +5380,36 @@
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- r2 : api_function_address
- // -----------------------------------
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
- Register api_function_address = ApiGetterDescriptor::function_address();
- DCHECK(api_function_address.is(r2));
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = r4;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+ Register api_function_address = r2;
+
+ __ push(receiver);
+ // Push data from AccessorInfo.
+ __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ push(scratch);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Push(scratch, scratch);
+ __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch, holder);
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5534,6 +5429,10 @@
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ ldr(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
@@ -5541,7 +5440,6 @@
kStackUnwindSpace, NULL, return_value_operand, NULL);
}
-
#undef __
} // namespace internal
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7e1a550..4014aba 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -450,6 +450,7 @@
__ mov(lr, Operand(length, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+ __ sub(array, array, Operand(kHeapObjectTag));
// array: destination FixedDoubleArray, not tagged as heap object.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r4: source FixedArray.
@@ -594,11 +595,13 @@
__ add(array_size, array_size, Operand(length, LSL, 1));
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, not tagged as heap object
+ // array: destination FixedArray, tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ str(scratch, MemOperand(array, HeapObject::kMapOffset));
+ __ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
+
+ __ sub(array, array, Operand(kHeapObjectTag));
// Prepare for conversion loop.
Register src_elements = elements;
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 287152a..20a898e 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1869,6 +1869,48 @@
Unknown(instr);
}
break;
+ case 0x1C:
+ if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ // VSEL* (floating-point)
+ bool dp_operation = (instr->SzValue() == 1);
+ switch (instr->Bits(21, 20)) {
+ case 0x0:
+ if (dp_operation) {
+ Format(instr, "vseleq.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vseleq.f32 'Sd, 'Sn, 'Sm");
+ }
+ break;
+ case 0x1:
+ if (dp_operation) {
+ Format(instr, "vselvs.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselvs.f32 'Sd, 'Sn, 'Sm");
+ }
+ break;
+ case 0x2:
+ if (dp_operation) {
+ Format(instr, "vselge.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselge.f32 'Sd, 'Sn, 'Sm");
+ }
+ break;
+ case 0x3:
+ if (dp_operation) {
+ Format(instr, "vselgt.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselgt.f32 'Sd, 'Sn, 'Sm");
+ }
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
default:
Unknown(instr);
break;
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index b6cac76..4e8c95c 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -48,16 +48,11 @@
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
-const Register InstanceOfDescriptor::LeftRegister() { return r1; }
-const Register InstanceOfDescriptor::RightRegister() { return r0; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return r1; }
const Register StringCompareDescriptor::RightRegister() { return r0; }
-
-const Register ApiGetterDescriptor::function_address() { return r2; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return r0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register MathPowTaggedDescriptor::exponent() { return r2; }
@@ -70,6 +65,8 @@
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
+const Register HasPropertyDescriptor::ObjectRegister() { return r0; }
+const Register HasPropertyDescriptor::KeyRegister() { return r3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -248,13 +245,16 @@
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // register state
+ // r0 -- number of arguments
+ // r1 -- function
+ // r2 -- allocation site with elements kind
+ Register registers[] = {r1, r2, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -318,6 +318,11 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -398,9 +403,8 @@
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
- kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -435,6 +439,16 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r0, // the value to pass to the generator
+ r1, // the JSGeneratorObject to resume
+ r2 // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6af3d6c..d723251 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -355,37 +355,6 @@
}
-void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
- Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- DCHECK(!dst.is(pc) && !src.rm().is(pc));
- DCHECK((satpos >= 0) && (satpos <= 31));
-
- // These asserts are required to ensure compatibility with the ARMv7
- // implementation.
- DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
- DCHECK(src.rs().is(no_reg));
-
- Label done;
- int satval = (1 << satpos) - 1;
-
- if (cond != al) {
- b(NegateCondition(cond), &done); // Skip saturate if !condition.
- }
- if (!(src.is_reg() && dst.is(src.rm()))) {
- mov(dst, src);
- }
- tst(dst, Operand(~satval));
- b(eq, &done);
- mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
- mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
- bind(&done);
- } else {
- usat(dst, satpos, src, cond);
- }
-}
-
-
void MacroAssembler::Load(Register dst,
const MemOperand& src,
Representation r) {
@@ -889,10 +858,8 @@
// below doesn't support it yet.
DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
- // Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
- (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
- CpuFeatureScope scope(this, ARMv7);
+ // Generate two ldr instructions if ldrd is not applicable.
+ if ((dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
ldrd(dst1, dst2, src, cond);
} else {
if ((src.am() == Offset) || (src.am() == NegOffset)) {
@@ -930,10 +897,8 @@
// below doesn't support it yet.
DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
- // Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
- (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
- CpuFeatureScope scope(this, ARMv7);
+ // Generate two str instructions if strd is not applicable.
+ if ((src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
@@ -950,30 +915,12 @@
}
}
-
-void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
- // If needed, restore wanted bits of FPSCR.
- Label fpscr_done;
- vmrs(scratch);
- if (emit_debug_code()) {
- Label rounding_mode_correct;
- tst(scratch, Operand(kVFPRoundingModeMask));
- b(eq, &rounding_mode_correct);
- // Don't call Assert here, since Runtime_Abort could re-enter here.
- stop("Default rounding mode not set");
- bind(&rounding_mode_correct);
- }
- tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
- b(ne, &fpscr_done);
- orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
- vmsr(scratch);
- bind(&fpscr_done);
-}
-
-
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
+ // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
+ // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
vsub(dst, src, kDoubleRegZero, cond);
}
@@ -2003,6 +1950,7 @@
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2090,26 +2038,29 @@
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
- add(result_end, source, bits_operand, SetCC, cond);
+ add(result_end, source, bits_operand, LeaveCC, cond);
source = result_end;
cond = cc;
}
}
- b(cs, gc_required);
+
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
- str(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2185,7 +2136,7 @@
} else {
add(result_end, result, Operand(object_size), SetCC);
}
- b(cs, gc_required);
+
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
@@ -2194,14 +2145,122 @@
tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
- str(result_end, MemOperand(top_address));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ add(result, result, Operand(kHeapObjectTag));
}
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ Register top_address = scratch;
+ mov(top_address, Operand(allocation_top));
+ ldr(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
+ // Calculate new top using result. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
+ } else {
+ add(result_end, result, Operand(object_size), SetCC);
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ tst(result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace);
+ }
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
+
+ add(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address register.
+ Register top_address = scratch1;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+ ldr(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
+ // Calculate new top using result. Object size may be in words so a shift is
+ // required to get the number of bytes. We must preserve the ip register at
+ // this point, so we cannot just use add().
+ DCHECK(object_size > 0);
+ Register source = result;
+ Condition cond = al;
+ int shift = 0;
+ while (object_size != 0) {
+ if (((object_size >> shift) & 0x03) == 0) {
+ shift += 2;
+ } else {
+ int bits = object_size & (0xff << shift);
+ object_size -= bits;
+ shift += 8;
+ Operand bits_operand(bits);
+ DCHECK(bits_operand.instructions_required(this) == 1);
+ add(result_end, source, bits_operand, LeaveCC, cond);
+ source = result_end;
+ cond = cc;
+ }
+ }
+
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
+
+ add(result, result, Operand(kHeapObjectTag));
+}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
@@ -2218,12 +2277,8 @@
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -2247,12 +2302,8 @@
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -2266,7 +2317,7 @@
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -2280,12 +2331,8 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2298,7 +2345,7 @@
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -2314,7 +2361,7 @@
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2414,12 +2461,6 @@
DONT_DO_SMI_CHECK);
vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- // Force a canonical NaN.
- if (emit_debug_code()) {
- vmrs(ip);
- tst(ip, Operand(kVFPDefaultNaNModeControlBit));
- Assert(ne, kDefaultNaNModeNotSet);
- }
VFPCanonicalizeNaN(double_scratch);
b(&store);
@@ -3129,6 +3170,17 @@
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject);
+ push(object);
+ CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAGeneratorObject);
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -3225,12 +3277,11 @@
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -3238,11 +3289,7 @@
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- if (tagging_mode == TAG_RESULT) {
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -3267,7 +3314,8 @@
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -3662,7 +3710,7 @@
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- Usat(output_reg, 8, Operand(input_reg));
+ usat(output_reg, 8, Operand(input_reg));
}
@@ -3770,7 +3818,7 @@
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -3780,7 +3828,9 @@
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- eor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ mov(ip, Operand(new_space_allocation_top_adr));
+ ldr(ip, MemOperand(ip));
+ eor(scratch_reg, scratch_reg, Operand(ip));
tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
b(eq, &top_check);
// The object is on a different page than allocation top. Bail out if the
@@ -3796,7 +3846,9 @@
// we are below top.
bind(&top_check);
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- cmp(scratch_reg, Operand(new_space_allocation_top));
+ mov(ip, Operand(new_space_allocation_top_adr));
+ ldr(ip, MemOperand(ip));
+ cmp(scratch_reg, ip);
b(gt, no_memento_found);
// Memento map check.
bind(&map_check);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index f326304..8fa197c 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -19,8 +19,8 @@
const Register kReturnRegister2 = {Register::kCode_r2};
const Register kJSFunctionRegister = {Register::kCode_r1};
const Register kContextRegister = {Register::kCode_r7};
+const Register kAllocateSizeRegister = {Register::kCode_r1};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
@@ -157,8 +157,6 @@
int width,
Condition cond = al);
void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
- void Usat(Register dst, int satpos, const Operand& src,
- Condition cond = al);
void Call(Label* target);
void Push(Register src) { push(src); }
@@ -489,15 +487,6 @@
const MemOperand& dst,
Condition cond = al);
- // Ensure that FPSCR contains values needed by JavaScript.
- // We need the NaNModeControlBit to be sure that operations like
- // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
- // In VFP3 it will be always the Canonical NaN.
- // In VFP2 it will be either the Canonical NaN or the negative version
- // of the Canonical NaN. It doesn't matter if we have two values. The aim
- // is to be sure to never generate the hole NaN.
- void VFPEnsureFPSCRState(Register scratch);
-
// If the value is a NaN, canonicalize the value else, do nothing.
void VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
@@ -792,6 +781,15 @@
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register scratch1,
+ Register scratch2, AllocationFlags flags);
+
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ Register scratch, AllocationFlags flags);
+
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -826,7 +824,6 @@
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
@@ -1326,6 +1323,10 @@
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 6c22a0a..1a870c5 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -387,7 +387,7 @@
end = cur + words;
while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d",
+ PrintF(" 0x%08" V8PRIxPTR ": 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
@@ -449,8 +449,8 @@
while (cur < end) {
prev = cur;
cur += dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(prev), buffer.start());
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
+ buffer.start());
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
@@ -1271,7 +1271,7 @@
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
- PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
+ PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED();
}
@@ -4028,6 +4028,45 @@
UNIMPLEMENTED();
}
break;
+ case 0x1C:
+ if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ // VSEL* (floating-point)
+ bool condition_holds;
+ switch (instr->Bits(21, 20)) {
+ case 0x0: // VSELEQ
+ condition_holds = (z_flag_ == 1);
+ break;
+ case 0x1: // VSELVS
+ condition_holds = (v_flag_ == 1);
+ break;
+ case 0x2: // VSELGE
+ condition_holds = (n_flag_ == v_flag_);
+ break;
+ case 0x3: // VSELGT
+ condition_holds = ((z_flag_ == 0) && (n_flag_ == v_flag_));
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
+ }
+ if (instr->SzValue() == 0x1) {
+ int n = instr->VFPNRegValue(kDoublePrecision);
+ int m = instr->VFPMRegValue(kDoublePrecision);
+ int d = instr->VFPDRegValue(kDoublePrecision);
+ double result = get_double_from_d_register(condition_holds ? n : m);
+ set_d_register_from_double(d, result);
+ } else {
+ int n = instr->VFPNRegValue(kSinglePrecision);
+ int m = instr->VFPMRegValue(kSinglePrecision);
+ int d = instr->VFPDRegValue(kSinglePrecision);
+ float result = get_float_from_s_register(condition_holds ? n : m);
+ set_s_register_from_float(d, result);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
default:
UNIMPLEMENTED();
break;
@@ -4048,7 +4087,8 @@
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
reinterpret_cast<byte*>(instr));
- PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(instr),
+ buffer.start());
}
if (instr->ConditionField() == kSpecialCondition) {
DecodeSpecialCondition(instr);