Upgrade V8 to 5.1.281.57 DO NOT MERGE
FPIIM-449
Change-Id: Id981b686b4d587ac31697662eb98bb34be42ad90
(cherry picked from commit 3b9bc31999c9787eb726ecdbfd5796bfdec32a18)
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 0de9642..b0b22b6 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -71,6 +71,10 @@
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -114,6 +118,18 @@
}
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+ icache_flush_mode);
+}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index b0fa462..62516e8 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -145,7 +145,10 @@
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER &&
cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
- supported_ |= 1u << COHERENT_CACHE;
+ // TODO(jkummerow): This is turned off as an experiment to see if it
+ // affects crash rates. Keep an eye on crash reports and either remove
+ // coherent cache support permanently, or re-enable it!
+ // supported_ |= 1u << COHERENT_CACHE;
}
#endif
@@ -1966,7 +1969,8 @@
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
Condition cond) {
- DCHECK(fields >= B16 && fields < B20); // at least one field set
+ DCHECK((fields & 0x000f0000) != 0); // At least one field must be set.
+ DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
Instr instr;
if (!src.rm_.is_valid()) {
// Immediate.
@@ -2546,12 +2550,6 @@
}
-void Assembler::vmov(const SwVfpRegister dst, float imm) {
- mov(ip, Operand(bit_cast<int32_t>(imm)));
- vmov(dst, ip);
-}
-
-
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
@@ -2563,7 +2561,7 @@
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
-static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
+static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
DCHECK(CpuFeatures::IsSupported(VFP3));
// VMOV can accept an immediate of the form:
@@ -2592,12 +2590,12 @@
return false;
}
- // Bits 62:55 must be all clear or all set.
+ // Bits 61:54 must be all clear or all set.
if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
return false;
}
- // Bit 63 must be NOT bit 62.
+ // Bit 62 must be NOT bit 61.
if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
return false;
}
@@ -2612,6 +2610,25 @@
}
+void Assembler::vmov(const SwVfpRegister dst, float imm) {
+ uint32_t enc;
+ if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
+ // The float can be encoded in the instruction.
+ //
+ // Sd = immediate
+ // Instruction details available in ARM DDI 0406C.b, A8-936.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=0(8) | imm4L(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
+ } else {
+ mov(ip, Operand(bit_cast<int32_t>(imm)));
+ vmov(dst, ip);
+ }
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
@@ -2622,7 +2639,7 @@
// pointer (pp) is valid.
bool can_use_pool =
!FLAG_enable_embedded_constant_pool || is_constant_pool_available();
- if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
+ if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
//
// Dd = immediate
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index d381653..08ad64c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1390,7 +1390,9 @@
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@@ -1637,8 +1639,8 @@
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index a6bfdb1..1fffcb6 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -531,6 +531,7 @@
// -- r1 : constructor function
// -- r2 : allocation site or undefined
// -- r3 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -543,6 +544,7 @@
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(r2, r4);
+ __ Push(cp);
__ SmiTag(r0);
__ Push(r2, r0);
@@ -622,7 +624,7 @@
// r0: result
// sp[0]: receiver
// sp[1]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -751,9 +753,6 @@
// r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Clear the context before we push it when entering the internal frame.
- __ mov(cp, Operand::Zero());
-
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -855,8 +854,7 @@
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushFixedFrame(r1);
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(r1);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -1192,8 +1190,7 @@
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
- __ PushFixedFrame(r1);
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(r1);
// Jump to point after the code-age stub.
__ add(r0, r0, Operand(kNoCodeAgeSequenceLength));
@@ -1430,24 +1427,6 @@
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&ok);
- __ Ret();
-}
-
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
@@ -1494,6 +1473,27 @@
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- sp[0] : first argument (left-hand side)
+ // -- sp[4] : receiver (right-hand side)
+ // -----------------------------------
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ ldr(InstanceOfDescriptor::LeftRegister(),
+ MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
+ __ ldr(InstanceOfDescriptor::RightRegister(),
+ MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ Ret(2);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1933,19 +1933,21 @@
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ mov(scratch1, Operand(debug_is_active));
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
__ ldrb(scratch1, MemOperand(scratch1));
__ cmp(scratch1, Operand(0));
- __ b(ne, &done);
+ __ b(eq, &done);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ ldr(scratch3,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
__ b(ne, &no_interpreter_frame);
__ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1953,73 +1955,37 @@
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(scratch3,
- MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_arguments_adaptor);
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ ldr(scratch1,
+ __ ldr(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ b(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
// Load caller's formal parameter count
- __ ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
__ ldr(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(scratch1,
+ __ ldr(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ bind(&formal_parameter_count_loaded);
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch2;
- __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
- __ add(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = scratch1;
- __ add(src_reg, sp, Operand(args_reg, LSL, kPointerSizeLog2));
- // Count receiver argument as well (not included in args_reg).
- __ add(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- __ cmp(src_reg, dst_reg);
- __ Check(lo, kStackAccessBelowStackPointer);
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch3;
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
- __ str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ cmp(sp, src_reg);
- __ b(ne, &loop);
-
- // Leave current frame.
- __ mov(sp, dst_reg);
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
__ bind(&done);
}
} // namespace
@@ -2473,27 +2439,6 @@
{ // Too few parameters: Actual < expected
__ bind(&too_few);
-
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r5, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
- kSmiTagSize)));
- __ b(eq, &no_strong_error);
-
- // What we really care about is the required number of arguments.
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kLengthOffset));
- __ cmp(r0, Operand::SmiUntag(r4));
- __ b(ge, &no_strong_error);
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 82fb51d..31e3e95 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -4,9 +4,10 @@
#if V8_TARGET_ARCH_ARM
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -77,6 +78,10 @@
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -477,7 +482,9 @@
}
-// Fast negative check for internalized-to-internalized equality.
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register lhs, Register rhs,
Label* possible_strings,
@@ -486,7 +493,7 @@
(lhs.is(r1) && rhs.is(r0)));
// r2 is object type of rhs.
- Label object_test, return_unequal, undetectable;
+ Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
@@ -524,6 +531,16 @@
__ bind(&undetectable);
__ tst(r5, Operand(1 << Map::kIsUndetectable));
__ b(eq, &return_unequal);
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ CompareInstanceType(r2, r2, ODDBALL_TYPE);
+ __ b(eq, &return_equal);
+ __ CompareInstanceType(r3, r3, ODDBALL_TYPE);
+ __ b(ne, &return_unequal);
+
+ __ bind(&return_equal);
__ mov(r0, Operand(EQUAL));
__ Ret();
}
@@ -1049,9 +1066,9 @@
if (result_size() > 2) {
DCHECK_EQ(3, result_size());
// Read result values stored on stack.
- __ ldr(r2, MemOperand(r0, 2 * kPointerSize));
- __ ldr(r1, MemOperand(r0, 1 * kPointerSize));
- __ ldr(r0, MemOperand(r0, 0 * kPointerSize));
+ __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
}
// Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
@@ -1358,8 +1375,12 @@
__ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
__ b(ne, &slow_case);
- // Ensure that {function} has an instance prototype.
+ // Go to the runtime if the function is not a constructor.
__ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsConstructor));
+ __ b(eq, &slow_case);
+
+ // Ensure that {function} has an instance prototype.
__ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ b(ne, &slow_case);
@@ -1427,7 +1448,8 @@
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf);
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -1480,29 +1502,6 @@
}
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
-
- // Check that the key is an array index, that is Uint32.
- __ NonNegativeSmiTst(key);
- __ b(ne, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2633,29 +2632,28 @@
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in r0.
- Label not_smi;
- __ JumpIfNotSmi(r0, ¬_smi);
- __ Ret();
- __ bind(¬_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ Ret(eq);
__ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
// r0: receiver
// r1: receiver instance type
__ Ret(eq);
- Label not_string, slow_string;
- __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
- __ b(hs, ¬_string);
- // Check if string has a cached array index.
- __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
- __ b(ne, &slow_string);
- __ IndexFromHash(r2, r0);
- __ Ret();
- __ bind(&slow_string);
- __ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
- __ bind(¬_string);
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes one argument in r0.
+ __ AssertNotNumber(r0);
+
+ __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
+ // r0: receiver
+ // r1: receiver instance type
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub, lo);
Label not_oddball;
__ cmp(r1, Operand(ODDBALL_TYPE));
@@ -2664,26 +2662,27 @@
__ Ret();
__ bind(¬_oddball);
- __ push(r0); // Push argument.
+ __ Push(r0); // Push argument.
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes one argument in r0.
+ __ AssertString(r0);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes one argument in r0.
- Label not_smi;
- __ JumpIfNotSmi(r0, ¬_smi);
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, r0);
- __ mov(r0, Operand(0), LeaveCC, lt);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
+ __ b(ne, &runtime);
+ __ IndexFromHash(r2, r0);
__ Ret();
- __ bind(¬_smi);
- __ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength);
+ __ bind(&runtime);
+ __ Push(r0); // Push argument.
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in r0.
Label is_number;
@@ -2839,42 +2838,6 @@
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : left
- // -- r0 : right
- // -- lr : return address
- // -----------------------------------
- __ AssertString(r1);
- __ AssertString(r0);
-
- Label not_same;
- __ cmp(r0, r1);
- __ b(ne, ¬_same);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r1,
- r2);
- __ Ret();
-
- __ bind(¬_same);
-
- // Check that both objects are sequential one-byte strings.
- Label runtime;
- __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
-
- // Compare flat one-byte strings natively.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
- r3);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ Push(r1, r0);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : left
@@ -3168,10 +3131,17 @@
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(left, right);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ LoadRoot(r1, Heap::kTrueValueRootIndex);
+ __ sub(r0, r0, r1);
+ __ Ret();
} else {
+ __ Push(left, right);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3710,7 +3680,7 @@
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ ldr(r1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ add(r1, r1, Operand(1));
@@ -4703,7 +4673,7 @@
__ bind(&loop);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+ __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
__ b(ne, &loop);
}
@@ -4712,7 +4682,7 @@
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ ldr(ip, MemOperand(r2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_rest_parameters);
@@ -4851,7 +4821,7 @@
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
+ __ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor_frame);
@@ -5050,7 +5020,7 @@
__ bind(&loop);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+ __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
__ b(ne, &loop);
}
@@ -5058,7 +5028,7 @@
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r3, StandardFrameConstants::kContextOffset));
+ __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
@@ -5424,16 +5394,12 @@
__ jmp(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : callee
// -- r4 : call_data
// -- r2 : holder
// -- r1 : api_function_address
- // -- r3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -5459,11 +5425,9 @@
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || r3.is(argc.reg()));
-
// context save
__ push(context);
- if (!is_lazy) {
+ if (!is_lazy()) {
// load context from callee
__ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
@@ -5475,7 +5439,7 @@
__ push(call_data);
Register scratch = call_data;
- if (!call_data_undefined) {
+ if (!call_data_undefined()) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// return value
@@ -5504,29 +5468,15 @@
__ add(r0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ str(scratch, MemOperand(r0, 0 * kPointerSize));
- if (argc.is_immediate()) {
- // FunctionCallbackInfo::values_
- __ add(ip, scratch,
- Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ mov(ip, Operand(argc.immediate()));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ mov(ip, Operand::Zero());
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
- } else {
- // FunctionCallbackInfo::values_
- __ add(ip, scratch, Operand(argc.reg(), LSL, kPointerSizeLog2));
- __ add(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ str(argc.reg(), MemOperand(r0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_
- __ add(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
- __ mov(ip, Operand(argc.reg(), LSL, kPointerSizeLog2));
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
- }
+ // FunctionCallbackInfo::values_
+ __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+ __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ mov(ip, Operand(argc()));
+ __ str(ip, MemOperand(r0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ mov(ip, Operand::Zero());
+ __ str(ip, MemOperand(r0, 3 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5536,7 +5486,7 @@
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (return_first_arg) {
+ if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5545,33 +5495,15 @@
int stack_space = 0;
MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
MemOperand* stack_space_operand = &is_construct_call_operand;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_operand = NULL;
- }
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
+
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_operand, return_value_operand,
&context_restore_operand);
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- sp[0] : name
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 2dee363..7e1a550 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -898,10 +898,8 @@
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
- patcher->masm()->PushFixedFrame(r1);
+ patcher->masm()->PushStandardFrame(r1);
patcher->masm()->nop(ip.code());
- patcher->masm()->add(
- fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index b9d4788..a162051 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -654,7 +654,7 @@
inline bool HasH() const { return HValue() == 1; }
inline bool HasLink() const { return LinkValue() == 1; }
- // Decoding the double immediate in the vmov instruction.
+ // Decode the double immediate from a vmov instruction.
double DoubleImmedVmov() const;
// Instructions are read of out a code stream. The only way to get a
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 3e9fac7..2785b75 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -103,12 +103,6 @@
}
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- // There is no dynamic alignment padding on ARM in the input frame.
- return false;
-}
-
-
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -162,7 +156,12 @@
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
__ PrepareCallCFunction(6, r5);
+ __ mov(r0, Operand(0));
+ Label context_check;
+ __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r1, &context_check);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
__ mov(r1, Operand(type())); // bailout type,
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
@@ -235,6 +234,8 @@
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
+ __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
+
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 9258703..287152a 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -604,6 +604,26 @@
Print("s");
}
return 4;
+ } else if (format[1] == 'p') {
+ if (format[8] == '_') { // 'spec_reg_fields
+ DCHECK(STRING_STARTS_WITH(format, "spec_reg_fields"));
+ Print("_");
+ int mask = instr->Bits(19, 16);
+ if (mask == 0) Print("(none)");
+ if ((mask & 0x8) != 0) Print("f");
+ if ((mask & 0x4) != 0) Print("s");
+ if ((mask & 0x2) != 0) Print("x");
+ if ((mask & 0x1) != 0) Print("c");
+ return 15;
+ } else { // 'spec_reg
+ DCHECK(STRING_STARTS_WITH(format, "spec_reg"));
+ if (instr->Bit(22) == 0) {
+ Print("CPSR");
+ } else {
+ Print("SPSR");
+ }
+ return 8;
+ }
}
// 's: S field of data processing instructions
if (instr->HasS()) {
@@ -822,7 +842,13 @@
return;
}
} else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
+ if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
+ (instr->Bits(15, 4) == 0xf00)) {
+ Format(instr, "msr'cond 'spec_reg'spec_reg_fields, 'rm");
+ } else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
+ (instr->Bits(11, 0) == 0)) {
+ Format(instr, "mrs'cond 'rd, 'spec_reg");
+ } else if (instr->Bits(22, 21) == 1) {
switch (instr->BitField(7, 4)) {
case BX:
Format(instr, "bx'cond 'rm");
@@ -1404,7 +1430,7 @@
if (instr->SzValue() == 0x1) {
Format(instr, "vmov'cond.f64 'Dd, 'd");
} else {
- Unknown(instr); // Not used by V8.
+ Format(instr, "vmov'cond.f32 'Sd, 'd");
}
} else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
// vrintz - round towards zero (truncate)
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 1ea7b1a..3792775 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -93,16 +93,11 @@
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize =
- FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
-
- static const int kConstantPoolOffset =
- FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
@@ -120,7 +115,7 @@
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index 1f55c0b..b6cac76 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -111,35 +111,8 @@
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
// static
-const Register ToLengthDescriptor::ReceiverRegister() { return r0; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return r0; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return r0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -267,6 +240,13 @@
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -311,6 +291,12 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (arg count)
+ Register registers[] = {r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -319,20 +305,6 @@
}
-void CompareNilDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
@@ -408,25 +380,7 @@
&default_descriptor);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- Register registers[] = {
- r0, // callee
- r4, // call_data
- r2, // holder
- r1, // api_function_address
- r3, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &default_descriptor);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 80aef0c..6af3d6c 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -738,12 +738,12 @@
str(scratch, MemOperand(ip));
// Call stub on end of buffer.
// Check for end of buffer.
- tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
if (and_then == kFallThroughAtEnd) {
- b(eq, &done);
+ b(ne, &done);
} else {
DCHECK(and_then == kReturnAtEnd);
- Ret(eq);
+ Ret(ne);
}
push(lr);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
@@ -755,20 +755,65 @@
}
}
-
-void MacroAssembler::PushFixedFrame(Register marker_reg) {
- DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
- stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
- (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
- fp.bit() | lr.bit());
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ if (marker_reg.code() > pp.code()) {
+ stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
+ add(fp, sp, Operand(kPointerSize));
+ Push(marker_reg);
+ } else {
+ stm(db_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
+ add(fp, sp, Operand(2 * kPointerSize));
+ }
+ } else {
+ if (marker_reg.code() > fp.code()) {
+ stm(db_w, sp, fp.bit() | lr.bit());
+ mov(fp, Operand(sp));
+ Push(marker_reg);
+ } else {
+ stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
+ add(fp, sp, Operand(kPointerSize));
+ }
+ }
+ } else {
+ stm(db_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
+ add(fp, sp, Operand(FLAG_enable_embedded_constant_pool ? kPointerSize : 0));
+ }
}
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ if (marker_reg.code() > pp.code()) {
+ pop(marker_reg);
+ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+ } else {
+ ldm(ia_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
+ }
+ } else {
+ if (marker_reg.code() > fp.code()) {
+ pop(marker_reg);
+ ldm(ia_w, sp, fp.bit() | lr.bit());
+ } else {
+ ldm(ia_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
+ }
+ }
+ } else {
+ ldm(ia_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
+ }
+}
-void MacroAssembler::PopFixedFrame(Register marker_reg) {
- DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
- ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+ DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
+ stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
+ int offset = -StandardFrameConstants::kContextOffset;
+ offset += function_reg.is_valid() ? kPointerSize : 0;
+ add(fp, sp, Operand(offset));
}
@@ -1056,7 +1101,144 @@
vmov(dst, VmovIndexLo, src);
}
}
+void MacroAssembler::LslPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ DCHECK(!AreAliased(dst_high, src_low));
+ DCHECK(!AreAliased(dst_high, shift));
+ Label less_than_32;
+ Label done;
+ rsb(scratch, shift, Operand(32), SetCC);
+ b(gt, &less_than_32);
+ // If shift >= 32
+ and_(scratch, shift, Operand(0x1f));
+ lsl(dst_high, src_low, Operand(scratch));
+ mov(dst_low, Operand(0));
+ jmp(&done);
+ bind(&less_than_32);
+ // If shift < 32
+ lsl(dst_high, src_high, Operand(shift));
+ orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
+ lsl(dst_low, src_low, Operand(shift));
+ bind(&done);
+}
+
+void MacroAssembler::LslPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ DCHECK(!AreAliased(dst_high, src_low));
+ Label less_than_32;
+ Label done;
+ if (shift == 0) {
+ Move(dst_high, src_high);
+ Move(dst_low, src_low);
+ } else if (shift == 32) {
+ Move(dst_high, src_low);
+ Move(dst_low, Operand(0));
+ } else if (shift >= 32) {
+ shift &= 0x1f;
+ lsl(dst_high, src_low, Operand(shift));
+ mov(dst_low, Operand(0));
+ } else {
+ lsl(dst_high, src_high, Operand(shift));
+ orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
+ lsl(dst_low, src_low, Operand(shift));
+ }
+}
+
+void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ DCHECK(!AreAliased(dst_low, shift));
+
+ Label less_than_32;
+ Label done;
+ rsb(scratch, shift, Operand(32), SetCC);
+ b(gt, &less_than_32);
+ // If shift >= 32
+ and_(scratch, shift, Operand(0x1f));
+ lsr(dst_low, src_high, Operand(scratch));
+ mov(dst_high, Operand(0));
+ jmp(&done);
+ bind(&less_than_32);
+ // If shift < 32
+
+ lsr(dst_low, src_low, Operand(shift));
+ orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
+ lsr(dst_high, src_high, Operand(shift));
+ bind(&done);
+}
+
+void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ Label less_than_32;
+ Label done;
+ if (shift == 32) {
+ mov(dst_low, src_high);
+ mov(dst_high, Operand(0));
+ } else if (shift > 32) {
+ shift &= 0x1f;
+ lsr(dst_low, src_high, Operand(shift));
+ mov(dst_high, Operand(0));
+ } else if (shift == 0) {
+ Move(dst_low, src_low);
+ Move(dst_high, src_high);
+ } else {
+ lsr(dst_low, src_low, Operand(shift));
+ orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
+ lsr(dst_high, src_high, Operand(shift));
+ }
+}
+
+void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ DCHECK(!AreAliased(dst_low, shift));
+
+ Label less_than_32;
+ Label done;
+ rsb(scratch, shift, Operand(32), SetCC);
+ b(gt, &less_than_32);
+ // If shift >= 32
+ and_(scratch, shift, Operand(0x1f));
+ asr(dst_low, src_high, Operand(scratch));
+ asr(dst_high, src_high, Operand(31));
+ jmp(&done);
+ bind(&less_than_32);
+ // If shift < 32
+ lsr(dst_low, src_low, Operand(shift));
+ orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
+ asr(dst_high, src_high, Operand(shift));
+ bind(&done);
+}
+
+void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ Label less_than_32;
+ Label done;
+ if (shift == 32) {
+ mov(dst_low, src_high);
+ asr(dst_high, src_high, Operand(31));
+ } else if (shift > 32) {
+ shift &= 0x1f;
+ asr(dst_low, src_high, Operand(shift));
+ asr(dst_high, src_high, Operand(31));
+ } else if (shift == 0) {
+ Move(dst_low, src_low);
+ Move(dst_high, src_high);
+ } else {
+ lsr(dst_low, src_low, Operand(shift));
+ orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
+ asr(dst_high, src_high, Operand(shift));
+ }
+}
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
@@ -1074,19 +1256,15 @@
LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
}
-
-void MacroAssembler::StubPrologue() {
- PushFixedFrame();
- Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
+ mov(ip, Operand(Smi::FromInt(type)));
+ PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
}
}
-
void MacroAssembler::Prologue(bool code_pre_aging) {
{ PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
@@ -1099,10 +1277,8 @@
ldr(pc, MemOperand(pc, -4));
emit_code_stub_address(stub);
} else {
- PushFixedFrame(r1);
+ PushStandardFrame(r1);
nop(ip.code());
- // Adjust FP to point to saved FP.
- add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
if (FLAG_enable_embedded_constant_pool) {
@@ -1123,17 +1299,15 @@
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
- PushFixedFrame();
+ mov(ip, Operand(Smi::FromInt(type)));
+ PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
}
- mov(ip, Operand(Smi::FromInt(type)));
- push(ip);
- mov(ip, Operand(CodeObject()));
- push(ip);
- // Adjust FP to point to saved FP.
- add(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+ if (type == StackFrame::INTERNAL) {
+ mov(ip, Operand(CodeObject()));
+ push(ip);
+ }
}
@@ -1164,10 +1338,10 @@
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- Push(lr, fp);
- mov(fp, Operand(sp)); // Set up new frame pointer.
+ mov(ip, Operand(Smi::FromInt(StackFrame::EXIT)));
+ PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
- sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
+ sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
if (emit_debug_code()) {
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -1249,7 +1423,7 @@
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
- const int offset = ExitFrameConstants::kFrameSize;
+ const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
sub(r3, fp,
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
RestoreFPRegs(r3, ip);
@@ -1300,6 +1474,64 @@
MovFromFloatResult(dst);
}
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg,
+ Register scratch0, Register scratch1) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+#endif
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch0;
+ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
+ add(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = caller_args_count_reg;
+ // Calculate the end of source area. +kPointerSize is for the receiver.
+ if (callee_args_count.is_reg()) {
+ add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ add(src_reg, src_reg, Operand(kPointerSize));
+ } else {
+ add(src_reg, sp,
+ Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ }
+
+ if (FLAG_debug_code) {
+ cmp(src_reg, dst_reg);
+ Check(lo, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch1;
+ Label loop, entry;
+ b(&entry);
+ bind(&loop);
+ ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+ str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+ bind(&entry);
+ cmp(sp, src_reg);
+ b(ne, &loop);
+
+ // Leave current frame.
+ mov(sp, dst_reg);
+}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -1578,8 +1810,19 @@
DCHECK(!holder_reg.is(ip));
DCHECK(!scratch.is(ip));
- // Load current lexical context from the stack frame.
- ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ DCHECK(!ip.is(scratch));
+ mov(ip, fp);
+ bind(&load_context);
+ ldr(scratch, MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
+ JumpIfNotSmi(scratch, &has_context);
+ ldr(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
+ b(&load_context);
+ bind(&has_context);
+
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
cmp(scratch, Operand::Zero());
@@ -2803,6 +3046,17 @@
b(eq, on_either_smi);
}
+void MacroAssembler::AssertNotNumber(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsANumber);
+ push(object);
+ CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
+ pop(object);
+ Check(ne, kOperandIsANumber);
+ }
+}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
@@ -3510,28 +3764,45 @@
b(ne, &next);
}
-
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- add(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- cmp(scratch_reg, Operand(new_space_start));
- b(lt, no_memento_found);
- mov(ip, Operand(new_space_allocation_top));
- ldr(ip, MemOperand(ip));
- cmp(scratch_reg, ip);
- b(gt, no_memento_found);
- ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
- cmp(scratch_reg,
- Operand(isolate()->factory()->allocation_memento_map()));
-}
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ eor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
+ b(eq, &top_check);
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ eor(scratch_reg, scratch_reg, Operand(receiver_reg));
+ tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
+ b(ne, no_memento_found);
+ // Continue with the actual map check.
+ jmp(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ cmp(scratch_reg, Operand(new_space_allocation_top));
+ b(gt, no_memento_found);
+ // Memento map check.
+ bind(&map_check);
+ ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+ cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
+}
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 468f4b5..f326304 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -457,10 +457,14 @@
}
// Push a fixed frame, consisting of lr, fp, constant pool (if
- // FLAG_enable_embedded_constant_pool), context and JS function / marker id if
- // marker_reg is a valid register.
- void PushFixedFrame(Register marker_reg = no_reg);
- void PopFixedFrame(Register marker_reg = no_reg);
+ // FLAG_enable_embedded_constant_pool)
+ void PushCommonFrame(Register marker_reg = no_reg);
+
+ // Push a standard frame, consisting of lr, fp, constant pool (if
+ // FLAG_enable_embedded_constant_pool), context and JS function
+ void PushStandardFrame(Register function_reg);
+
+ void PopCommonFrame(Register marker_reg = no_reg);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
@@ -545,6 +549,19 @@
void VmovLow(Register dst, DwVfpRegister src);
void VmovLow(DwVfpRegister dst, Register src);
+ void LslPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void LslPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void LsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void LsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void AsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void AsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
@@ -580,7 +597,7 @@
Label* not_int32);
// Generates function and stub prologue code.
- void StubPrologue();
+ void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
// Enter exit frame.
@@ -637,6 +654,15 @@
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@@ -1280,6 +1306,9 @@
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+ // Abort execution if argument is a number, enabled via --debug-code.
+ void AssertNotNumber(Register object);
+
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 4630b94..6c22a0a 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1041,6 +1041,32 @@
return value;
}
+void Simulator::SetSpecialRegister(SRegisterFieldMask reg_and_mask,
+ uint32_t value) {
+ // Only CPSR_f is implemented. Of that, only N, Z, C and V are implemented.
+ if ((reg_and_mask == CPSR_f) && ((value & ~kSpecialCondition) == 0)) {
+ n_flag_ = ((value & (1 << 31)) != 0);
+ z_flag_ = ((value & (1 << 30)) != 0);
+ c_flag_ = ((value & (1 << 29)) != 0);
+ v_flag_ = ((value & (1 << 28)) != 0);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+uint32_t Simulator::GetFromSpecialRegister(SRegister reg) {
+ uint32_t result = 0;
+ // Only CPSR_f is implemented.
+ if (reg == CPSR) {
+ if (n_flag_) result |= (1 << 31);
+ if (z_flag_) result |= (1 << 30);
+ if (c_flag_) result |= (1 << 29);
+ if (v_flag_) result |= (1 << 28);
+ } else {
+ UNIMPLEMENTED();
+ }
+ return result;
+}
// Runtime FP routines take:
// - two double arguments
@@ -1307,11 +1333,12 @@
// Calculate C flag value for subtractions.
-bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+bool Simulator::BorrowFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
- return (uright > uleft);
+ return (uright > uleft) ||
+ (!carry && (((uright + 1) > uleft) || (uright > (uleft - 1))));
}
@@ -2312,7 +2339,22 @@
return;
}
} else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
+ if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
+ (instr->Bits(15, 4) == 0xf00)) {
+ // MSR
+ int rm = instr->RmValue();
+ DCHECK_NE(pc, rm); // UNPREDICTABLE
+ SRegisterFieldMask sreg_and_mask =
+ instr->BitField(22, 22) | instr->BitField(19, 16);
+ SetSpecialRegister(sreg_and_mask, get_register(rm));
+ } else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
+ (instr->Bits(11, 0) == 0)) {
+ // MRS
+ int rd = instr->RdValue();
+ DCHECK_NE(pc, rd); // UNPREDICTABLE
+ SRegister sreg = static_cast<SRegister>(instr->BitField(22, 22));
+ set_register(rd, GetFromSpecialRegister(sreg));
+ } else if (instr->Bits(22, 21) == 1) {
int rm = instr->RmValue();
switch (instr->BitField(7, 4)) {
case BX:
@@ -2452,8 +2494,15 @@
}
case SBC: {
- Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+ // Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+ alu_out = (rn_val - shifter_operand) - (GetCarry() ? 0 : 1);
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(!BorrowFrom(rn_val, shifter_operand, GetCarry()));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
+ }
break;
}
@@ -3215,7 +3264,7 @@
if (instr->SzValue() == 0x1) {
set_d_register_from_double(vd, instr->DoubleImmedVmov());
} else {
- UNREACHABLE(); // Not used by v8.
+ set_s_register_from_float(d, instr->DoubleImmedVmov());
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 6567607..b3c8eb4 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -262,7 +262,7 @@
void SetCFlag(bool val);
void SetVFlag(bool val);
bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
- bool BorrowFrom(int32_t left, int32_t right);
+ bool BorrowFrom(int32_t left, int32_t right, int32_t carry = 1);
bool OverflowFrom(int32_t alu_out,
int32_t left,
int32_t right,
@@ -363,6 +363,9 @@
template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value);
+ void SetSpecialRegister(SRegisterFieldMask reg_and_mask, uint32_t value);
+ uint32_t GetFromSpecialRegister(SRegister reg);
+
void CallInternal(byte* entry);
// Architecture state.