Update V8 to version 4.1.0.21
This is a cherry-pick of all commits up to and including the
4.1.0.21 cherry-pick in Chromium.
Original commit message:
Version 4.1.0.21 (cherry-pick)
Merged 206e9136bde0f2b5ae8cb77afbb1e7833e5bd412
Unlink pages from the space page list after evacuation.
BUG=430201
LOG=N
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/953813002
Cr-Commit-Position: refs/branch-heads/4.1@{#22}
Cr-Branched-From: 2e08d2a7aa9d65d269d8c57aba82eb38a8cb0a18-refs/heads/candidates@{#25353}
---
FPIIM-449
Change-Id: I8c23c7bbb70772b4858fe8a47b64fa97ee0d1f8c
diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h
index 5e1bed1..4547efe 100644
--- a/src/arm64/assembler-arm64-inl.h
+++ b/src/arm64/assembler-arm64-inl.h
@@ -503,7 +503,7 @@
DCHECK(addrmode == Offset);
regoffset_ = offset.reg();
- shift_= offset.shift();
+ shift_ = offset.shift();
shift_amount_ = offset.shift_amount();
extend_ = NO_EXTEND;
@@ -520,7 +520,7 @@
extend_ = offset.extend();
shift_amount_ = offset.shift_amount();
- shift_= NO_SHIFT;
+ shift_ = NO_SHIFT;
offset_ = 0;
// These assertions match those in the extended-register constructor.
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index c1213e9..770d425 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -44,22 +44,27 @@
// CpuFeatures implementation.
void CpuFeatures::ProbeImpl(bool cross_compile) {
- if (cross_compile) {
- // Always align csp in cross compiled code - this is safe and ensures that
- // csp will always be aligned if it is enabled by probing at runtime.
- if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
- } else {
- base::CPU cpu;
- if (FLAG_enable_always_align_csp &&
- (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
- supported_ |= 1u << ALWAYS_ALIGN_CSP;
- }
+ // AArch64 has no configuration options, no further probing is required.
+ supported_ = 0;
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+
+ // Probe for runtime features
+ base::CPU cpu;
+ if (cpu.implementer() == base::CPU::NVIDIA &&
+ cpu.variant() == base::CPU::NVIDIA_DENVER) {
+ supported_ |= 1u << COHERENT_CACHE;
}
}
void CpuFeatures::PrintTarget() { }
-void CpuFeatures::PrintFeatures() { }
+
+
+void CpuFeatures::PrintFeatures() {
+ printf("COHERENT_CACHE=%d\n", CpuFeatures::IsSupported(COHERENT_CACHE));
+}
// -----------------------------------------------------------------------------
@@ -612,9 +617,12 @@
void Assembler::CheckLabelLinkChain(Label const * label) {
#ifdef DEBUG
if (label->is_linked()) {
+ static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
+ int links_checked = 0;
int linkoffset = label->pos();
bool end_of_chain = false;
while (!end_of_chain) {
+ if (++links_checked > kMaxLinksToCheck) break;
Instruction * link = InstructionAt(linkoffset);
int linkpcoffset = link->ImmPCOffset();
int prevlinkoffset = linkoffset + linkpcoffset;
@@ -1936,6 +1944,12 @@
}
+void Assembler::frintp(const FPRegister& fd, const FPRegister& fn) {
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTP);
+}
+
+
void Assembler::frintz(const FPRegister& fd,
const FPRegister& fn) {
DCHECK(fd.SizeInBits() == fn.SizeInBits());
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index 82b4500..53496f3 100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -276,6 +276,11 @@
(kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+ // TODO(turbofan): Proper float32 support.
+ static int NumAllocatableAliasedRegisters() {
+ return NumAllocatableRegisters();
+ }
+
// Return true if the register is one that crankshaft can allocate.
bool IsAllocatable() const {
return (Bit() & kAllocatableFPRegisters) != 0;
@@ -1658,6 +1663,9 @@
// FP round to integer (nearest with ties to even).
void frintn(const FPRegister& fd, const FPRegister& fn);
+ // FP round to integer (towards plus infinity).
+ void frintp(const FPRegister& fd, const FPRegister& fn);
+
// FP round to integer (towards zero.)
void frintz(const FPRegister& fd, const FPRegister& fn);
diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc
index 0013e24..9f140c2 100644
--- a/src/arm64/builtins-arm64.cc
+++ b/src/arm64/builtins-arm64.cc
@@ -10,7 +10,7 @@
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -156,7 +156,7 @@
__ Cbz(argc, &no_arguments);
// First args = sp[(argc - 1) * 8].
__ Sub(argc, argc, 1);
- __ Claim(argc, kXRegSize);
+ __ Drop(argc, kXRegSize);
// jssp now point to args[0], load and drop args[0] + receiver.
Register arg = argc;
__ Ldr(arg, MemOperand(jssp, 2 * kPointerSize, PostIndex));
@@ -367,13 +367,13 @@
FieldMemOperand(init_map, Map::kBitField3Offset);
// Check if slack tracking is enabled.
__ Ldr(x4, bit_field3);
- __ DecodeField<Map::ConstructionCount>(constructon_count, x4);
- __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
- __ B(eq, &allocate);
+ __ DecodeField<Map::Counter>(constructon_count, x4);
+ __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
+ __ B(lt, &allocate);
// Decrease generous allocation count.
- __ Subs(x4, x4, Operand(1 << Map::ConstructionCount::kShift));
+ __ Subs(x4, x4, Operand(1 << Map::Counter::kShift));
__ Str(x4, bit_field3);
- __ Cmp(constructon_count, Operand(JSFunction::kFinishSlackTracking));
+ __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
__ B(ne, &allocate);
// Push the constructor and map to the stack, and the constructor again
@@ -381,7 +381,7 @@
__ Push(constructor, init_map, constructor);
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(init_map, constructor);
- __ Mov(constructon_count, Operand(JSFunction::kNoSlackTracking));
+ __ Mov(constructon_count, Operand(Map::kSlackTrackingCounterEnd - 1));
__ Bind(&allocate);
}
@@ -434,8 +434,8 @@
Label no_inobject_slack_tracking;
// Check if slack tracking is enabled.
- __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
- __ B(eq, &no_inobject_slack_tracking);
+ __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
+ __ B(lt, &no_inobject_slack_tracking);
constructon_count = NoReg;
// Fill the pre-allocated fields with undef.
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index 4978e5e..e773b53 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -14,7 +14,7 @@
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -1412,6 +1412,11 @@
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
+ // Ensure that the vector and slot registers won't be clobbered before
+ // calling the miss handler.
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss);
@@ -1422,6 +1427,40 @@
}
+void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label miss;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
+ Register result = x0;
+ Register scratch = x10;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX,
+ RECEIVER_IS_STRING);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ Bind(&miss);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void InstanceofStub::Generate(MacroAssembler* masm) {
// Stack on entry:
// jssp[0]: function.
@@ -1569,7 +1608,7 @@
__ Mov(result, res_false);
// Null is not instance of anything.
- __ Cmp(object_type, Operand(isolate()->factory()->null_value()));
+ __ Cmp(object, Operand(isolate()->factory()->null_value()));
__ B(ne, &object_not_null);
__ Ret();
@@ -2683,13 +2722,13 @@
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
+ __ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ Bind(&megamorphic);
__ Add(scratch1, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
+ __ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
__ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
__ B(&done);
@@ -2988,6 +3027,10 @@
// x1 - function
// x3 - slot id (Smi)
+ const int with_types_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+ const int generic_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
@@ -3036,24 +3079,72 @@
}
__ bind(&extra_checks_or_miss);
- Label miss;
+ Label uninitialized, miss;
- __ JumpIfRoot(x4, Heap::kMegamorphicSymbolRootIndex, &slow_start);
- __ JumpIfRoot(x4, Heap::kUninitializedSymbolRootIndex, &miss);
+ __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
- if (!FLAG_trace_ic) {
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(x4);
- __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ LoadRoot(x5, Heap::kMegamorphicSymbolRootIndex);
- __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
- __ B(&slow_start);
+ // The following cases attempt to handle MISS cases without going to the
+ // runtime.
+ if (FLAG_trace_ic) {
+ __ jmp(&miss);
}
- // We are here because tracing is on or we are going monomorphic.
+ __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
+
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(x4);
+ __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
+ __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
+ // We have to update statistics for runtime profiling.
+ __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ __ Subs(x4, x4, Operand(Smi::FromInt(1)));
+ __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
+ __ Adds(x4, x4, Operand(Smi::FromInt(1)));
+ __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
+ __ B(&slow_start);
+
+ __ bind(&uninitialized);
+
+ // We are going monomorphic, provided we actually have a JSFunction.
+ __ JumpIfSmi(function, &miss);
+
+ // Goto miss case if we do not have a function.
+ __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
+
+ // Make sure the function is not the Array() function, which requires special
+ // behavior on MISS.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
+ __ Cmp(function, x5);
+ __ B(eq, &miss);
+
+ // Update stats.
+ __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ __ Adds(x4, x4, Operand(Smi::FromInt(1)));
+ __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+
+ // Store the function.
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Str(function, FieldMemOperand(x4, FixedArray::kHeaderSize));
+
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(function, MemOperand(x4, 0));
+
+ // Update the write barrier.
+ __ Mov(x5, function);
+ __ RecordWrite(feedback_vector, x4, x5, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ B(&have_js_function);
+
+ // We are here because tracing is on or we encountered a MISS case we can't
+ // handle here.
__ bind(&miss);
GenerateMiss(masm);
@@ -3097,14 +3188,16 @@
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
+ if (check_mode_ == RECEIVER_IS_UNKNOWN) {
+ __ JumpIfSmi(object_, receiver_not_string_);
- // Fetch the instance type of the receiver into result register.
- __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
+ // If the receiver is not a string trigger the non-string case.
+ __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
+ }
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
@@ -3782,9 +3875,9 @@
// x12: input_type
// x15: from (untagged)
__ SmiTag(from);
- StringCharAtGenerator generator(
- input_string, from, result_length, x0,
- &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ StringCharAtGenerator generator(input_string, from, result_length, x0,
+ &runtime, &runtime, &runtime,
+ STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ Drop(3);
__ Ret();
@@ -3792,6 +3885,49 @@
}
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in x0.
+ Label not_smi;
+ __ JumpIfNotSmi(x0, ¬_smi);
+ __ Ret();
+ __ Bind(¬_smi);
+
+ Label not_heap_number;
+ __ Ldr(x1, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x1, FieldMemOperand(x1, Map::kInstanceTypeOffset));
+ // x0: object
+ // x1: instance type
+ __ Cmp(x1, HEAP_NUMBER_TYPE);
+ __ B(ne, ¬_heap_number);
+ __ Ret();
+ __ Bind(¬_heap_number);
+
+ Label not_string, slow_string;
+ __ Cmp(x1, FIRST_NONSTRING_TYPE);
+ __ B(hs, ¬_string);
+ // Check if string has a cached array index.
+ __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
+ __ B(ne, &slow_string);
+ __ IndexFromHash(x2, x0);
+ __ Ret();
+ __ Bind(&slow_string);
+ __ Push(x0); // Push argument.
+ __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ Bind(¬_string);
+
+ Label not_oddball;
+ __ Cmp(x1, ODDBALL_TYPE);
+ __ B(ne, ¬_oddball);
+ __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
+ __ Ret();
+ __ Bind(¬_oddball);
+
+ __ Push(x0); // Push argument.
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -4236,18 +4372,10 @@
}
-static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
- // The entry hook is a "BumpSystemStackPointer" instruction (sub),
- // followed by a "Push lr" instruction, followed by a call.
- unsigned int size =
- Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
- // "BumpSystemStackPointer".
- size += kInstructionSize;
- }
- return size;
-}
+// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
+// a "Push lr" instruction, followed by a call.
+static const unsigned int kProfileEntryHookCallSize =
+ Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -4260,7 +4388,7 @@
__ Push(lr);
__ CallStub(&stub);
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
- GetProfileEntryHookCallSize(masm));
+ kProfileEntryHookCallSize);
__ Pop(lr);
}
@@ -4278,7 +4406,7 @@
const int kNumSavedRegs = kCallerSaved.Count();
// Compute the function's address as the first argument.
- __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
+ __ Sub(x0, lr, kProfileEntryHookCallSize);
#if V8_HOST_ARCH_ARM64
uintptr_t entry_hook =
diff --git a/src/arm64/code-stubs-arm64.h b/src/arm64/code-stubs-arm64.h
index 03dab5b..c9ee2c9 100644
--- a/src/arm64/code-stubs-arm64.h
+++ b/src/arm64/code-stubs-arm64.h
@@ -97,7 +97,7 @@
INCREMENTAL_COMPACTION
};
- virtual bool SometimesSetsUpAFrame() { return false; }
+ bool SometimesSetsUpAFrame() OVERRIDE { return false; }
static Mode GetMode(Code* stub) {
// Find the mode depending on the first two instructions.
@@ -275,9 +275,9 @@
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
- virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+ inline Major MajorKey() const FINAL { return RecordWrite; }
- virtual void Generate(MacroAssembler* masm) OVERRIDE;
+ void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -285,7 +285,7 @@
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) {
+ void Activate(Code* code) OVERRIDE {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
@@ -328,7 +328,7 @@
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() { return true; }
+ bool NeedsImmovableCode() OVERRIDE { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@@ -360,7 +360,7 @@
Register scratch1,
Register scratch2);
- virtual bool SometimesSetsUpAFrame() { return false; }
+ bool SometimesSetsUpAFrame() OVERRIDE { return false; }
private:
static const int kInlinedProbes = 4;
diff --git a/src/arm64/codegen-arm64.cc b/src/arm64/codegen-arm64.cc
index 91eaba7..cda6e5b 100644
--- a/src/arm64/codegen-arm64.cc
+++ b/src/arm64/codegen-arm64.cc
@@ -290,15 +290,28 @@
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
+ Register the_hole = x14;
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ Add(src_elements, elements,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedArray::kHeaderSize);
- __ Add(array, array, kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
- Register the_hole = x14;
+ // Allocating heap numbers in the loop below can fail and cause a jump to
+ // gc_required. We can't leave a partly initialized FixedArray behind,
+ // so pessimistically fill it with holes now.
+ Label initialization_loop, initialization_loop_entry;
+ __ B(&initialization_loop_entry);
+ __ bind(&initialization_loop);
+ __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
+ __ bind(&initialization_loop_entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &initialization_loop);
+
+ __ Add(dst_elements, array, FixedArray::kHeaderSize);
+ __ Add(array, array, kHeapObjectTag);
+
Register heap_num_map = x15;
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
Label entry;
diff --git a/src/arm64/cpu-arm64.cc b/src/arm64/cpu-arm64.cc
index 39beb6d..11ba7c9 100644
--- a/src/arm64/cpu-arm64.cc
+++ b/src/arm64/cpu-arm64.cc
@@ -43,6 +43,8 @@
void CpuFeatures::FlushICache(void* address, size_t length) {
if (length == 0) return;
+ if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
+
#ifdef USE_SIMULATOR
// TODO(all): consider doing some cache simulation to ensure every address
// run has been synced.
diff --git a/src/arm64/debug-arm64.cc b/src/arm64/debug-arm64.cc
index f57d5b5..dae5a28 100644
--- a/src/arm64/debug-arm64.cc
+++ b/src/arm64/debug-arm64.cc
@@ -238,7 +238,11 @@
// Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10);
+ RegList regs = receiver.Bit() | name.Bit();
+ if (FLAG_vector_ics) {
+ regs |= VectorLoadICTrampolineDescriptor::SlotRegister().Bit();
+ }
+ Generate_DebugBreakCallHelper(masm, regs, 0, x10);
}
diff --git a/src/arm64/delayed-masm-arm64.cc b/src/arm64/delayed-masm-arm64.cc
index c3bda91..b51e77e 100644
--- a/src/arm64/delayed-masm-arm64.cc
+++ b/src/arm64/delayed-masm-arm64.cc
@@ -16,8 +16,8 @@
void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
- DCHECK(src->IsStackSlot());
- DCHECK(dst->IsStackSlot());
+ DCHECK((src->IsStackSlot() && dst->IsStackSlot()) ||
+ (src->IsDoubleStackSlot() && dst->IsDoubleStackSlot()));
MemOperand src_operand = cgen_->ToMemOperand(src);
MemOperand dst_operand = cgen_->ToMemOperand(dst);
if (pending_ == kStackSlotMove) {
diff --git a/src/arm64/deoptimizer-arm64.cc b/src/arm64/deoptimizer-arm64.cc
index d67dc8f..b83bbbe 100644
--- a/src/arm64/deoptimizer-arm64.cc
+++ b/src/arm64/deoptimizer-arm64.cc
@@ -21,6 +21,11 @@
}
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+ // Empty because there is no need for relocation information for the code
+ // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Invalidate the relocation information, as it will become invalid by the
diff --git a/src/arm64/disasm-arm64.cc b/src/arm64/disasm-arm64.cc
index ac7cb37..36bad37 100644
--- a/src/arm64/disasm-arm64.cc
+++ b/src/arm64/disasm-arm64.cc
@@ -1695,7 +1695,7 @@
DCHECK(format[0] == 'M');
USE(format);
- static const char* options[4][4] = {
+ static const char* const options[4][4] = {
{ "sy (0b0000)", "oshld", "oshst", "osh" },
{ "sy (0b0100)", "nshld", "nshst", "nsh" },
{ "sy (0b1000)", "ishld", "ishst", "ish" },
diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc
index 25a9efd..0d3d34b 100644
--- a/src/arm64/full-codegen-arm64.cc
+++ b/src/arm64/full-codegen-arm64.cc
@@ -196,10 +196,10 @@
// Argument to NewContext is the function, which is still in x1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
- if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ Mov(x10, Operand(info->scope()->GetScopeInfo()));
__ Push(x1, x10);
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
@@ -299,24 +299,26 @@
}
VisitDeclarations(scope()->declarations());
}
- }
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- DCHECK(jssp.Is(__ StackPointer()));
- __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
- PredictableCodeSizeScope predictable(masm_,
- Assembler::kCallSizeWithRelocation);
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ Bind(&ok);
- }
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ DCHECK(jssp.Is(__ StackPointer()));
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ Bind(&ok);
+ }
- { Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
- DCHECK(loop_depth() == 0);
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(function()->body());
+ DCHECK(loop_depth() == 0);
+ }
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -932,7 +934,7 @@
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
- __ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ LoadContext(x1, scope_->ContextChainLength(scope_->ScriptScope()));
__ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
__ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
@@ -1097,7 +1099,7 @@
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
Comment cmnt(masm_, "[ ForInStatement");
- int slot = stmt->ForInFeedbackSlot();
+ FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
// TODO(all): This visitor probably needs better comments and a revisit.
SetStatementPosition(stmt);
@@ -1107,6 +1109,7 @@
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
+ SetExpressionPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
Register null_value = x15;
@@ -1124,6 +1127,7 @@
__ Push(x0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ Bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(x0);
// Check for proxies.
@@ -1147,6 +1151,7 @@
__ Bind(&call_runtime);
__ Push(x0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1181,7 +1186,8 @@
__ LoadObject(x1, FeedbackVector());
__ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
+ int vector_index = FeedbackVector()->GetIndex(slot);
+ __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
__ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
__ Peek(x10, 0); // Get enumerated object.
@@ -1197,6 +1203,8 @@
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ Bind(&loop);
+ SetExpressionPosition(stmt->each());
+
// Load the current count to x0, load the length to x1.
__ PeekPair(x0, x1, 0);
__ Cmp(x0, x1); // Compare to the array length.
@@ -1266,48 +1274,6 @@
}
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
- Comment cmnt(masm_, "[ ForOfStatement");
- SetStatementPosition(stmt);
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- // var iterator = iterable[Symbol.iterator]();
- VisitForEffect(stmt->assign_iterator());
-
- // Loop entry.
- __ Bind(loop_statement.continue_label());
-
- // result = iterator.next()
- VisitForEffect(stmt->next_result());
-
- // if (result.done) break;
- Label result_not_done;
- VisitForControl(stmt->result_done(),
- loop_statement.break_label(),
- &result_not_done,
- &result_not_done);
- __ Bind(&result_not_done);
-
- // each = result.value
- VisitForEffect(stmt->assign_each());
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
- EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
- __ B(loop_statement.continue_label());
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ Bind(loop_statement.break_label());
- decrement_loop_depth();
-}
-
-
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new space for
@@ -1350,7 +1316,13 @@
Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
__ Mov(LoadDescriptor::NameRegister(), Operand(home_object_symbol));
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+ if (FLAG_vector_ics) {
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
+ SmiFromSlot(expr->HomeObjectFeedbackSlot()));
+ CallLoadIC(NOT_CONTEXTUAL);
+ } else {
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+ }
__ Mov(x10, Operand(isolate()->factory()->undefined_value()));
__ cmp(x0, x10);
@@ -1361,6 +1333,18 @@
}
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+ int offset) {
+ if (NeedsHomeObject(initializer)) {
+ __ Peek(StoreDescriptor::ReceiverRegister(), 0);
+ __ Mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
+ CallStoreIC();
+ }
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1408,7 +1392,7 @@
__ Mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(proxy->VariableFeedbackSlot()));
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
}
ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
@@ -1493,7 +1477,7 @@
__ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(proxy->VariableFeedbackSlot()));
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
}
CallLoadIC(CONTEXTUAL);
context()->Plug(x0);
@@ -1671,6 +1655,7 @@
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in x0.
@@ -1699,6 +1684,8 @@
DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
@@ -1707,6 +1694,14 @@
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+ if (NeedsHomeObject(value)) {
+ __ Mov(StoreDescriptor::ReceiverRegister(), x0);
+ __ Mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ Peek(StoreDescriptor::ValueRegister(), 0);
+ CallStoreIC();
+ }
} else {
VisitForEffect(value);
}
@@ -1718,6 +1713,7 @@
__ Push(x0);
VisitForStackValue(key);
VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
__ Mov(x0, Smi::FromInt(SLOPPY)); // Strict mode
__ Push(x0);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1732,7 +1728,7 @@
__ Peek(x0, 0);
__ Push(x0);
VisitForStackValue(value);
- __ CallRuntime(Runtime::kSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
} else {
VisitForEffect(value);
}
@@ -1755,7 +1751,9 @@
__ Push(x10);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
+ EmitSetHomeObjectIfNeeded(it->second->getter, 2);
EmitAccessor(it->second->setter);
+ EmitSetHomeObjectIfNeeded(it->second->setter, 3);
__ Mov(x10, Smi::FromInt(NONE));
__ Push(x10);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1862,16 +1860,8 @@
Comment cmnt(masm_, "[ Assignment");
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
+ LhsKind assign_type = GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1887,6 +1877,30 @@
VisitForStackValue(property->obj());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ const Register scratch = x10;
+ __ Peek(scratch, kPointerSize);
+ __ Push(scratch, result_register());
+ }
+ break;
+ case KEYED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForAccumulatorValue(property->key());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ const Register scratch1 = x10;
+ const Register scratch2 = x11;
+ __ Peek(scratch1, 2 * kPointerSize);
+ __ Peek(scratch2, kPointerSize);
+ __ Push(scratch1, scratch2, result_register());
+ }
+ break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@@ -1913,6 +1927,14 @@
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1959,6 +1981,14 @@
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyStore(property);
+ context()->Plug(x0);
+ break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyStore(property);
+ context()->Plug(x0);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
@@ -1974,7 +2004,7 @@
__ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(prop->PropertyFeedbackSlot()));
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
@@ -1983,15 +2013,12 @@
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object.
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ Push(x0);
- VisitForStackValue(super_ref->this_var());
__ Push(key->value());
__ CallRuntime(Runtime::kLoadFromSuper, 3);
}
@@ -1999,11 +2026,11 @@
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
+ // Call keyed load IC. It has arguments key and receiver in x0 and x1.
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(prop->PropertyFeedbackSlot()));
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
CallIC(ic);
} else {
CallIC(ic, prop->PropertyFeedbackId());
@@ -2011,6 +2038,14 @@
}
+void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object, key.
+ SetSourcePosition(prop->position());
+
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
@@ -2126,19 +2161,68 @@
}
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in x0.
+ DCHECK(lit != NULL);
+ __ push(x0);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = x1;
+ __ Ldr(scratch,
+ FieldMemOperand(x0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ Peek(scratch, kPointerSize); // constructor
+ } else {
+ __ Peek(scratch, 0); // prototype
+ }
+ __ Push(scratch);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
void FullCodeGenerator::EmitAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
+ LhsKind assign_type = GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
@@ -2159,6 +2243,42 @@
CallStoreIC();
break;
}
+ case NAMED_SUPER_PROPERTY: {
+ __ Push(x0);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ // stack: value, this; x0: home_object
+ Register scratch = x10;
+ Register scratch2 = x11;
+ __ mov(scratch, result_register()); // home_object
+ __ Peek(x0, kPointerSize); // value
+ __ Peek(scratch2, 0); // this
+ __ Poke(scratch2, kPointerSize); // this
+ __ Poke(scratch, 0); // home_object
+ // stack: this, home_object; x0: value
+ EmitNamedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ __ Push(x0);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForAccumulatorValue(prop->key());
+ Register scratch = x10;
+ Register scratch2 = x11;
+ __ Peek(scratch2, 2 * kPointerSize); // value
+ // stack: value, this, home_object; x0: key, x11: value
+ __ Peek(scratch, kPointerSize); // this
+ __ Poke(scratch, 2 * kPointerSize);
+ __ Peek(scratch, 0); // home_object
+ __ Poke(scratch, kPointerSize);
+ __ Poke(x0, 0);
+ __ Move(x0, scratch2);
+ // stack: this, home_object, key; x0: value.
+ EmitKeyedSuperPropertyStore(prop);
+ break;
+ }
case KEYED_PROPERTY: {
__ Push(x0); // Preserve value.
VisitForStackValue(prop->obj());
@@ -2253,8 +2373,9 @@
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
+ } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
- // Non-initializing assignments to consts are ignored.
}
@@ -2277,6 +2398,35 @@
}
+void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
+ // Assignment to named property of super.
+ // x0 : value
+ // stack : receiver ('this'), home_object
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ Push(key->value());
+ __ Push(x0);
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+}
+
+
+void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
+ // Assignment to named property of super.
+ // x0 : value
+ // stack : receiver ('this'), home_object, key
+ DCHECK(prop != NULL);
+
+ __ Push(x0);
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
// Assignment to a property, using a keyed store IC.
@@ -2305,16 +2455,27 @@
__ Move(LoadDescriptor::ReceiverRegister(), x0);
EmitNamedPropertyLoad(expr);
} else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(x0);
} else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), x0);
- __ Pop(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Move(LoadDescriptor::NameRegister(), x0);
+ __ Pop(LoadDescriptor::ReceiverRegister());
+ EmitKeyedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForStackValue(expr->key());
+ EmitKeyedSuperPropertyLoad(expr);
+ }
context()->Plug(x0);
}
}
@@ -2379,14 +2540,14 @@
VisitForAccumulatorValue(super_ref->this_var());
__ Push(x0);
__ Peek(scratch, kPointerSize);
- __ Push(scratch, x0);
+ __ Push(x0, scratch);
__ Push(key->value());
// Stack here:
// - home_object
// - this (receiver)
- // - home_object <-- LoadFromSuper will pop here and below.
- // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
// - key
__ CallRuntime(Runtime::kLoadFromSuper, 3);
@@ -2423,6 +2584,43 @@
}
+void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+
+ // Load the function from the receiver.
+ const Register scratch = x10;
+ SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ Push(x0);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(x0);
+ __ Peek(scratch, kPointerSize);
+ __ Push(x0, scratch);
+ VisitForStackValue(prop->key());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ Poke(x0, kPointerSize);
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2437,7 +2635,7 @@
Handle<Code> ic = CallIC::initialize_stub(
isolate(), arg_count, call_type);
- __ Mov(x3, Smi::FromInt(expr->CallFeedbackSlot()));
+ __ Mov(x3, SmiFromSlot(expr->CallFeedbackSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2478,6 +2676,14 @@
}
+void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
+ DCHECK(super_ref != NULL);
+ __ ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kGetPrototype, 1);
+}
+
+
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths
@@ -2517,6 +2723,8 @@
// The runtime call returns a pair of values in x0 (function) and
// x1 (receiver). Touch up the stack with the right values.
__ PokePair(x1, x0, arg_count * kPointerSize);
+
+ PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
}
// Record source position for debugger.
@@ -2552,6 +2760,7 @@
__ Push(context_register(), x10);
__ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ Push(x0, x1); // Receiver, function.
+ PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
@@ -2574,9 +2783,12 @@
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
bool is_named_call = property->key()->IsPropertyName();
- // super.x() is handled in EmitCallWithLoadIC.
- if (property->IsSuperAccess() && is_named_call) {
- EmitSuperCallWithLoadIC(expr);
+ if (property->IsSuperAccess()) {
+ if (is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
+ } else {
+ EmitKeyedSuperCallWithLoadIC(expr);
+ }
} else {
{
PreservePositionScope scope(masm()->positions_recorder());
@@ -2588,6 +2800,12 @@
EmitKeyedCallWithLoadIC(expr, property->key());
}
}
+ } else if (call_type == Call::SUPER_CALL) {
+ SuperReference* super_ref = callee->AsSuperReference();
+ EmitLoadSuperConstructor(super_ref);
+ __ Push(result_register());
+ VisitForStackValue(super_ref->this_var());
+ EmitCall(expr, CallICState::METHOD);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -2616,7 +2834,12 @@
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- VisitForStackValue(expr->expression());
+ if (expr->expression()->IsSuperReference()) {
+ EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
+ __ Push(result_register());
+ } else {
+ VisitForStackValue(expr->expression());
+ }
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -2636,12 +2859,12 @@
// Record call targets in unoptimized code.
if (FLAG_pretenuring_call_new) {
EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot() ==
- expr->CallNewFeedbackSlot() + 1);
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
}
__ LoadObject(x2, FeedbackVector());
- __ Mov(x3, Smi::FromInt(expr->CallNewFeedbackSlot()));
+ __ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
@@ -2964,6 +3187,32 @@
}
+void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ Register map = x10;
+ Register type_reg = x11;
+ __ Ldr(map, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
+ __ Cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ls, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -3843,7 +4092,7 @@
__ Mov(LoadDescriptor::NameRegister(), Operand(name));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
+ SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
@@ -3997,17 +4246,8 @@
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
+ LhsKind assign_type = GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4019,18 +4259,52 @@
if (expr->is_postfix() && !context()->IsEffect()) {
__ Push(xzr);
}
- if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in the register.
- VisitForStackValue(prop->obj());
- __ Peek(LoadDescriptor::ReceiverRegister(), 0);
- EmitNamedPropertyLoad(prop);
- } else {
- // KEYED_PROPERTY
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
- __ Peek(LoadDescriptor::NameRegister(), 0);
- EmitKeyedPropertyLoad(prop);
+ switch (assign_type) {
+ case NAMED_PROPERTY: {
+ // Put the object both on the stack and in the register.
+ VisitForStackValue(prop->obj());
+ __ Peek(LoadDescriptor::ReceiverRegister(), 0);
+ EmitNamedPropertyLoad(prop);
+ break;
+ }
+
+ case NAMED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ const Register scratch = x10;
+ __ Peek(scratch, kPointerSize);
+ __ Push(scratch, result_register());
+ EmitNamedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForAccumulatorValue(prop->key());
+ __ Push(result_register());
+ const Register scratch1 = x10;
+ const Register scratch2 = x11;
+ __ Peek(scratch1, 2 * kPointerSize);
+ __ Peek(scratch2, kPointerSize);
+ __ Push(scratch1, scratch2, result_register());
+ EmitKeyedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_PROPERTY: {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
+ __ Peek(LoadDescriptor::NameRegister(), 0);
+ EmitKeyedPropertyLoad(prop);
+ break;
+ }
+
+ case VARIABLE:
+ UNREACHABLE();
}
}
@@ -4064,9 +4338,15 @@
case NAMED_PROPERTY:
__ Poke(x0, kPointerSize);
break;
+ case NAMED_SUPER_PROPERTY:
+ __ Poke(x0, kPointerSize * 2);
+ break;
case KEYED_PROPERTY:
__ Poke(x0, kPointerSize * 2);
break;
+ case KEYED_SUPER_PROPERTY:
+ __ Poke(x0, kPointerSize * 3);
+ break;
}
}
}
@@ -4094,9 +4374,15 @@
case NAMED_PROPERTY:
__ Poke(x0, kXRegSize);
break;
+ case NAMED_SUPER_PROPERTY:
+ __ Poke(x0, 2 * kXRegSize);
+ break;
case KEYED_PROPERTY:
__ Poke(x0, 2 * kXRegSize);
break;
+ case KEYED_SUPER_PROPERTY:
+ __ Poke(x0, 3 * kXRegSize);
+ break;
}
}
}
@@ -4154,6 +4440,28 @@
}
break;
}
+ case NAMED_SUPER_PROPERTY: {
+ EmitNamedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ EmitKeyedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
case KEYED_PROPERTY: {
__ Pop(StoreDescriptor::NameRegister());
__ Pop(StoreDescriptor::ReceiverRegister());
@@ -4184,7 +4492,7 @@
__ Mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(proxy->VariableFeedbackSlot()));
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
}
// Use a regular load, not a contextual load, to avoid a reference
// error.
@@ -4540,7 +4848,7 @@
__ Peek(load_name, 2 * kPointerSize);
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
+ SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
}
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
@@ -4560,7 +4868,7 @@
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(expr->DoneFeedbackSlot()));
+ SmiFromSlot(expr->DoneFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // x0=result.done
// The ToBooleanStub argument (result.done) is in x0.
@@ -4573,7 +4881,7 @@
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(expr->ValueFeedbackSlot()));
+ SmiFromSlot(expr->ValueFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // x0=result.value
context()->DropAndPlug(2, x0); // drop iter and g
@@ -4587,7 +4895,6 @@
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
- Register value_reg = x0;
Register generator_object = x1;
Register the_hole = x2;
Register operand_stack_size = w3;
@@ -4595,21 +4902,12 @@
// The value stays in x0, and is ultimately read by the resumed generator, as
// if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed. r1
+ // is read to throw the value when the resumed generator is already closed. x1
// will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ Pop(generator_object);
- // Check generator state.
- Label wrong_state, closed_state, done;
- __ Ldr(x10, FieldMemOperand(generator_object,
- JSGeneratorObject::kContinuationOffset));
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
- __ CompareAndBranch(x10, Smi::FromInt(0), eq, &closed_state);
- __ CompareAndBranch(x10, Smi::FromInt(0), lt, &wrong_state);
-
// Load suspended function and context.
__ Ldr(cp, FieldMemOperand(generator_object,
JSGeneratorObject::kContextOffset));
@@ -4635,7 +4933,7 @@
// Enter a new JavaScript frame, and initialize its slots as they were when
// the generator was suspended.
- Label resume_frame;
+ Label resume_frame, done;
__ Bl(&resume_frame);
__ B(&done);
@@ -4680,26 +4978,6 @@
// Not reached: the runtime call returns elsewhere.
__ Unreachable();
- // Reach here when generator is closed.
- __ Bind(&closed_state);
- if (resume_mode == JSGeneratorObject::NEXT) {
- // Return completed iterator result when generator is closed.
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- __ Push(x10);
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(true);
- } else {
- // Throw the provided value.
- __ Push(value_reg);
- __ CallRuntime(Runtime::kThrow, 1);
- }
- __ B(&done);
-
- // Throw error if we attempt to operate on a running generator.
- __ Bind(&wrong_state);
- __ Push(generator_object);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
__ Bind(&done);
context()->Plug(result_register());
}
@@ -4709,16 +4987,18 @@
Label gc_required;
Label allocated;
- Handle<Map> map(isolate()->native_context()->iterator_result_map());
+ const int instance_size = 5 * kPointerSize;
+ DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
+ instance_size);
// Allocate and populate an object with this form: { value: VAL, done: DONE }
Register result = x0;
- __ Allocate(map->instance_size(), result, x10, x11, &gc_required, TAG_OBJECT);
+ __ Allocate(instance_size, result, x10, x11, &gc_required, TAG_OBJECT);
__ B(&allocated);
__ Bind(&gc_required);
- __ Push(Smi::FromInt(map->instance_size()));
+ __ Push(Smi::FromInt(instance_size));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ Ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -4729,11 +5009,13 @@
Register boolean_done = x3;
Register empty_fixed_array = x4;
Register untagged_result = x5;
- __ Mov(map_reg, Operand(map));
+ __ Ldr(map_reg, GlobalObjectMemOperand());
+ __ Ldr(map_reg, FieldMemOperand(map_reg, GlobalObject::kNativeContextOffset));
+ __ Ldr(map_reg,
+ ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
__ Pop(result_value);
__ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
__ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
- DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
JSObject::kElementsOffset);
STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==
@@ -4785,7 +5067,7 @@
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
+ if (declaration_scope->is_script_scope() ||
declaration_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
diff --git a/src/arm64/interface-descriptors-arm64.cc b/src/arm64/interface-descriptors-arm64.cc
index 690c8c2..57eebcc 100644
--- a/src/arm64/interface-descriptors-arm64.cc
+++ b/src/arm64/interface-descriptors-arm64.cc
@@ -29,6 +29,9 @@
const Register StoreDescriptor::ValueRegister() { return x0; }
+const Register StoreTransitionDescriptor::MapRegister() { return x3; }
+
+
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
@@ -182,6 +185,14 @@
}
+void AllocateHeapNumberDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ Register registers[] = {cp};
+ data->Initialize(arraysize(registers), registers, nullptr);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
diff --git a/src/arm64/lithium-arm64.cc b/src/arm64/lithium-arm64.cc
index 502b046..2d5f7f2 100644
--- a/src/arm64/lithium-arm64.cc
+++ b/src/arm64/lithium-arm64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <sstream>
+
#include "src/v8.h"
#include "src/arm64/lithium-codegen-arm64.h"
@@ -282,9 +284,9 @@
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- OStringStream os;
+ std::ostringstream os;
os << hydrogen()->access();
- stream->Add(os.c_str());
+ stream->Add(os.str().c_str());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -1562,9 +1564,17 @@
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
@@ -1673,7 +1683,7 @@
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
@@ -1736,7 +1746,7 @@
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
@@ -1758,7 +1768,7 @@
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
@@ -2234,11 +2244,7 @@
(JSShiftAmountFromHConstant(instr->right()) == 0);
bool can_deopt = false;
if ((op == Token::SHR) && right_can_be_zero) {
- if (FLAG_opt_safe_uint32_operations) {
- can_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- can_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
- }
+ can_deopt = !instr->CheckFlag(HInstruction::kUint32);
}
LInstruction* result;
@@ -2402,7 +2408,7 @@
LOperand* temp1 = NULL;
if (instr->access().IsExternalMemory() ||
- instr->field_representation().IsDouble()) {
+ (!FLAG_unbox_double_fields && instr->field_representation().IsDouble())) {
value = UseRegister(instr->value());
} else if (instr->NeedsWriteBarrier()) {
value = UseRegisterAndClobber(instr->value());
diff --git a/src/arm64/lithium-arm64.h b/src/arm64/lithium-arm64.h
index 6ead3fe..424ecba 100644
--- a/src/arm64/lithium-arm64.h
+++ b/src/arm64/lithium-arm64.h
@@ -178,17 +178,13 @@
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const FINAL OVERRIDE { \
- return LInstruction::k##type; \
- } \
- virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
- virtual const char* Mnemonic() const FINAL OVERRIDE { \
- return mnemonic; \
- } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ Opcode opcode() const FINAL { return LInstruction::k##type; } \
+ void CompileToNative(LCodeGen* generator) FINAL; \
+ const char* Mnemonic() const FINAL { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ DCHECK(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -295,11 +291,9 @@
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const FINAL OVERRIDE {
- return (R != 0) && (result() != NULL);
- }
+ bool HasResult() const FINAL { return (R != 0) && (result() != NULL); }
void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const { return results_[0]; }
+ LOperand* result() const OVERRIDE { return results_[0]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
@@ -317,28 +311,32 @@
private:
// Iterator support.
- virtual int InputCount() FINAL OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+ int InputCount() FINAL { return I; }
+ LOperand* InputAt(int i) FINAL { return inputs_[i]; }
- virtual int TempCount() FINAL OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
+ int TempCount() FINAL { return T; }
+ LOperand* TempAt(int i) FINAL { return temps_[i]; }
};
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+ LOperand* name, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
@@ -348,9 +346,7 @@
class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
- return false;
- }
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
@@ -360,7 +356,7 @@
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const FINAL OVERRIDE { return true; }
+ bool IsControl() const FINAL { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -410,8 +406,8 @@
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ bool IsGap() const OVERRIDE { return true; }
+ void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -451,7 +447,7 @@
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@@ -492,10 +488,10 @@
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
- virtual bool IsControl() const OVERRIDE { return true; }
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+ bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
@@ -525,12 +521,10 @@
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
- return false;
- }
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -550,9 +544,7 @@
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
- return false;
- }
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
@@ -573,7 +565,7 @@
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -721,11 +713,9 @@
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const OVERRIDE {
- return LInstruction::kArithmeticD;
- }
- virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
- virtual const char* Mnemonic() const OVERRIDE;
+ Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+ void CompileToNative(LCodeGen* generator) OVERRIDE;
+ const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
@@ -749,11 +739,9 @@
LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const OVERRIDE {
- return LInstruction::kArithmeticT;
- }
- virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
- virtual const char* Mnemonic() const OVERRIDE;
+ Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
+ void CompileToNative(LCodeGen* generator) OVERRIDE;
+ const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
@@ -838,7 +826,7 @@
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -853,7 +841,7 @@
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -889,7 +877,7 @@
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -908,7 +896,7 @@
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -925,7 +913,7 @@
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+ bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -1097,7 +1085,7 @@
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -1215,7 +1203,7 @@
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -1313,7 +1301,7 @@
class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool IsControl() const OVERRIDE { return true; }
+ bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -1447,7 +1435,7 @@
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -1465,7 +1453,7 @@
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -1479,7 +1467,7 @@
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -1555,11 +1543,12 @@
CallInterfaceDescriptor descriptor() { return descriptor_; }
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1567,11 +1556,11 @@
ZoneList<LOperand*> inputs_;
// Iterator support.
- virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+ int InputCount() FINAL { return inputs_.length(); }
+ LOperand* InputAt(int i) FINAL { return inputs_[i]; }
- virtual int TempCount() FINAL OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+ int TempCount() FINAL { return 0; }
+ LOperand* TempAt(int i) FINAL { return NULL; }
};
@@ -1588,7 +1577,7 @@
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1624,7 +1613,7 @@
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -1641,7 +1630,7 @@
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -1656,7 +1645,7 @@
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -1674,7 +1663,7 @@
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -1691,7 +1680,7 @@
int slot_index() const { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -2272,7 +2261,7 @@
class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
@@ -2331,11 +2320,11 @@
private:
// Iterator support.
- virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+ int InputCount() FINAL { return inputs_.length(); }
+ LOperand* InputAt(int i) FINAL { return inputs_[i]; }
- virtual int TempCount() FINAL OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+ int TempCount() FINAL { return 0; }
+ LOperand* TempAt(int i) FINAL { return NULL; }
};
@@ -2585,7 +2574,7 @@
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2609,7 +2598,7 @@
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
Representation representation() const {
return hydrogen()->field_representation();
@@ -2632,7 +2621,7 @@
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
@@ -2707,7 +2696,7 @@
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -2786,7 +2775,7 @@
LOperand* code_object() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@@ -2810,7 +2799,7 @@
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -2916,7 +2905,7 @@
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@@ -2991,7 +2980,7 @@
Handle<String> type_literal() const { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc
index 2998642..df9e7b5 100644
--- a/src/arm64/lithium-codegen-arm64.cc
+++ b/src/arm64/lithium-codegen-arm64.cc
@@ -557,11 +557,6 @@
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
-
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
@@ -1063,9 +1058,8 @@
}
-void LCodeGen::Deoptimize(LInstruction* instr,
- Deoptimizer::BailoutType* override_bailout_type,
- const char* detail) {
+void LCodeGen::Deoptimize(LInstruction* instr, const char* detail,
+ Deoptimizer::BailoutType* override_bailout_type) {
DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
}
@@ -1516,7 +1510,7 @@
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, right);
}
@@ -1530,7 +1524,7 @@
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, right);
}
@@ -1656,7 +1650,7 @@
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr);
+ DeoptimizeIf(hi, instr, "too many arguments");
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -1838,7 +1832,7 @@
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
- DeoptimizeIf(cond, instr);
+ DeoptimizeIf(cond, instr, "out of bounds");
}
}
@@ -1917,7 +1911,7 @@
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr);
+ DeoptimizeIfSmi(value, instr, "Smi");
}
Register map = NoReg;
@@ -1978,7 +1972,7 @@
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- Deoptimize(instr);
+ Deoptimize(instr, "unexpected object");
}
}
}
@@ -2048,23 +2042,33 @@
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(x1));
DCHECK(name.is(x2));
-
- Register scratch = x3;
- Register extra = x4;
- Register extra2 = x5;
- Register extra3 = x6;
+ Register scratch = x4;
+ Register extra = x5;
+ Register extra2 = x6;
+ Register extra3 = x7;
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
+ scratch, extra, extra2, extra3));
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, extra, extra2, extra3);
+ if (!instr->hydrogen()->is_just_miss()) {
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, extra, extra2, extra3);
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
@@ -2072,25 +2076,44 @@
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- // TODO(all): on ARM we use a call descriptor to specify a storage mode
- // but on ARM64 we only have one storage mode so it isn't necessary. Check
- // this understanding is correct.
- __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ // TODO(all): on ARM we use a call descriptor to specify a storage mode
+ // but on ARM64 we only have one storage mode so it isn't necessary. Check
+ // this understanding is correct.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(target);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
- __ Call(target);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ // TODO(all): on ARM we use a call descriptor to specify a storage mode
+ // but on ARM64 we only have one storage mode so it isn't necessary. Check
+ // this understanding is correct.
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(target);
+ }
+ generator.AfterCall();
}
- generator.AfterCall();
+
after_push_argument_ = false;
}
@@ -2163,7 +2186,7 @@
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr);
+ DeoptimizeIfSmi(temp, instr, "instance migration failed");
}
@@ -2218,7 +2241,7 @@
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "wrong map");
}
__ Bind(&success);
@@ -2227,7 +2250,7 @@
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr);
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi");
}
}
@@ -2235,7 +2258,7 @@
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr);
+ DeoptimizeIfNotSmi(value, instr, "not a Smi");
}
@@ -2253,15 +2276,15 @@
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "wrong instance type");
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr);
+ DeoptimizeIf(lo, instr, "wrong instance type");
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr);
+ DeoptimizeIf(hi, instr, "wrong instance type");
}
} else {
uint8_t mask;
@@ -2271,9 +2294,11 @@
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
- DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr);
+ DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
+ "wrong instance type");
} else {
- DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr);
+ DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
+ "wrong instance type");
}
} else {
if (tag == 0) {
@@ -2282,7 +2307,7 @@
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "wrong instance type");
}
}
}
@@ -2321,7 +2346,8 @@
__ JumpIfHeapNumber(input, &is_heap_number);
// Check for undefined. Undefined is coverted to zero for clamping conversion.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+ "not a heap number/undefined");
__ Mov(result, 0);
__ B(&done);
@@ -2626,7 +2652,7 @@
} else {
__ Cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "value mismatch");
}
@@ -2650,9 +2676,9 @@
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
- DeoptimizeIfSmi(object, instr);
+ DeoptimizeIfSmi(object, instr, "Smi");
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "not a date object");
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -2688,7 +2714,7 @@
type = Deoptimizer::LAZY;
}
- Deoptimize(instr, &type, instr->hydrogen()->reason());
+ Deoptimize(instr, instr->hydrogen()->reason(), &type);
}
@@ -2702,21 +2728,21 @@
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr);
+ DeoptimizeIfZero(dividend, instr, "division by zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "lost precision");
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -2744,14 +2770,14 @@
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr);
+ Deoptimize(instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr);
+ DeoptimizeIfZero(dividend, instr, "minus zero");
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -2763,7 +2789,7 @@
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr);
+ DeoptimizeIfNotZero(temp, instr, "lost precision");
}
}
@@ -2786,7 +2812,7 @@
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr);
+ DeoptimizeIfZero(divisor, instr, "division by zero");
}
// Check for (0 / -x) as that will produce negative zero.
@@ -2798,7 +2824,7 @@
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr);
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Check for (kMinInt / -1).
@@ -2810,13 +2836,13 @@
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr);
+ DeoptimizeIf(eq, instr, "overflow");
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr);
+ DeoptimizeIfNotZero(remainder, instr, "lost precision");
}
@@ -2825,11 +2851,11 @@
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr);
+ DeoptimizeIfMinusZero(input, instr, "minus zero");
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->tag_result()) {
__ SmiTag(result.X());
@@ -2890,7 +2916,7 @@
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr);
+ DeoptimizeIfZero(result, instr, "no cache");
__ Bind(&done);
}
@@ -2903,17 +2929,17 @@
DCHECK(instr->IsMarkedAsCall());
DCHECK(object.Is(x0));
- DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr);
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined");
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(object, null_value);
- DeoptimizeIf(eq, instr);
+ DeoptimizeIf(eq, instr, "null");
- DeoptimizeIfSmi(object, instr);
+ DeoptimizeIfSmi(object, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr);
+ DeoptimizeIf(le, instr, "not a JavaScript object");
Label use_cache, call_runtime;
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
@@ -2927,7 +2953,7 @@
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr);
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map");
__ Bind(&use_cache);
}
@@ -3320,7 +3346,7 @@
__ Ldr(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
@@ -3341,7 +3367,7 @@
JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
@@ -3361,7 +3387,7 @@
__ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
}
}
@@ -3369,13 +3395,18 @@
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
- __ Mov(vector, instr->hydrogen()->feedback_vector());
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(x0));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Mov(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(instr->hydrogen()->slot()));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ Mov(slot_register, Smi::FromInt(index));
}
@@ -3389,7 +3420,7 @@
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3492,7 +3523,7 @@
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "negative value");
}
break;
case FLOAT32_ELEMENTS:
@@ -3589,7 +3620,7 @@
STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
__ Ldr(scratch, mem_op);
__ Cmn(scratch, 1);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "hole");
}
}
@@ -3627,9 +3658,9 @@
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr);
+ DeoptimizeIfNotSmi(result, instr, "not a Smi");
} else {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
}
}
}
@@ -3643,7 +3674,7 @@
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3662,6 +3693,7 @@
}
if (instr->hydrogen()->representation().IsDouble()) {
+ DCHECK(access.IsInobject());
FPRegister result = ToDoubleRegister(instr->result());
__ Ldr(result, FieldMemOperand(object, offset));
return;
@@ -3699,7 +3731,8 @@
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@@ -3731,7 +3764,7 @@
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
}
}
@@ -3883,7 +3916,7 @@
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr);
+ DeoptimizeIfMinusZero(input, instr, "minus zero");
}
__ Fcvtms(result, input);
@@ -3893,7 +3926,7 @@
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
}
@@ -3919,13 +3952,13 @@
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr);
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
}
return;
}
@@ -3948,14 +3981,14 @@
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr);
+ Deoptimize(instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr);
+ DeoptimizeIfZero(dividend, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -3998,14 +4031,14 @@
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
- DeoptimizeIfZero(divisor, instr);
+ DeoptimizeIfZero(divisor, instr, "division by zero");
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr);
+ DeoptimizeIf(eq, instr, "overflow");
}
// Check for (0 / -x) that will produce negative zero.
@@ -4015,7 +4048,7 @@
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr);
+ DeoptimizeIf(eq, instr, "minus zero");
}
Label done;
@@ -4174,18 +4207,18 @@
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
- DeoptimizeIf(hi, instr);
+ DeoptimizeIf(hi, instr, "overflow");
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr);
+ DeoptimizeIfNegative(result, instr, "minus zero");
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "NaN");
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
@@ -4263,7 +4296,7 @@
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr);
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ B(&done);
}
@@ -4282,7 +4315,7 @@
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
- Deoptimize(instr);
+ Deoptimize(instr, "division by zero");
return;
}
@@ -4296,7 +4329,7 @@
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr);
+ DeoptimizeIfNegative(dividend, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
@@ -4311,12 +4344,12 @@
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr);
+ DeoptimizeIfZero(divisor, instr, "division by zero");
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr);
+ DeoptimizeIfNegative(dividend, instr, "minus zero");
}
__ Bind(&done);
}
@@ -4339,10 +4372,10 @@
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr);
+ DeoptimizeIfZero(left, instr, "minus zero");
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr);
+ DeoptimizeIfNegative(left, instr, "minus zero");
}
}
@@ -4352,7 +4385,7 @@
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Neg(result, left);
}
@@ -4368,7 +4401,7 @@
case 2:
if (can_overflow) {
__ Adds(result, left, left);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, left);
}
@@ -4387,7 +4420,7 @@
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr);
+ DeoptimizeIf(lt, instr, "overflow");
}
if (right >= 0) {
@@ -4397,7 +4430,7 @@
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
@@ -4455,13 +4488,13 @@
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr);
+ DeoptimizeIf(mi, instr, "minus zero");
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "overflow");
} else {
__ Mul(result, left, right);
}
@@ -4485,7 +4518,7 @@
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr);
+ DeoptimizeIf(mi, instr, "minus zero");
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
@@ -4493,7 +4526,7 @@
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "overflow");
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
@@ -4669,13 +4702,14 @@
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr);
+ DeoptimizeIfMinusZero(result, instr, "minus zero");
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+ "not a heap number/undefined");
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -4766,6 +4800,7 @@
int parameter_count = ToInteger32(instr->constant_parameter_count());
__ Drop(parameter_count + 1);
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register parameter_count = ToRegister(instr->parameter_count());
__ DropBySMI(parameter_count);
}
@@ -4868,7 +4903,7 @@
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr);
+ DeoptimizeIfNegative(input.W(), instr, "overflow");
}
__ SmiTag(output, input);
}
@@ -4880,7 +4915,7 @@
Label done, untag;
if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr);
+ DeoptimizeIfNotSmi(input, instr, "not a Smi");
}
__ Bind(&untag);
@@ -4905,7 +4940,7 @@
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr);
+ DeoptimizeIfNegative(result, instr, "negative value");
}
break;
default: UNREACHABLE();
@@ -4915,7 +4950,7 @@
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr);
+ DeoptimizeIfNegative(left, instr, "negative value");
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
@@ -4968,7 +5003,7 @@
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr);
+ DeoptimizeIfNegative(result, instr, "negative value");
}
break;
default: UNREACHABLE();
@@ -4978,7 +5013,7 @@
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr);
+ DeoptimizeIfNegative(left, instr, "negative value");
}
__ Mov(result, left);
} else {
@@ -5017,7 +5052,6 @@
Register scratch2 = x6;
DCHECK(instr->IsMarkedAsCall());
- ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
// TODO(all): if Mov could handle object in new space then it could be used
// here.
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
@@ -5107,7 +5141,7 @@
if (instr->hydrogen()->RequiresHoleCheck()) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr);
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole");
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
@@ -5145,7 +5179,7 @@
if (instr->hydrogen()->RequiresHoleCheck()) {
Register payload = ToRegister(instr->temp2());
__ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr);
+ DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole");
}
// Store the value.
@@ -5349,7 +5383,7 @@
__ AssertNotSmi(object);
- if (representation.IsDouble()) {
+ if (!FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
DCHECK(!instr->hydrogen()->has_transition());
DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
@@ -5358,8 +5392,6 @@
return;
}
- Register value = ToRegister(instr->value());
-
DCHECK(!representation.IsSmi() ||
!instr->value()->IsConstantOperand() ||
IsInteger32Constant(LConstantOperand::cast(instr->value())));
@@ -5391,8 +5423,12 @@
destination = temp0;
}
- if (representation.IsSmi() &&
- instr->hydrogen()->value()->representation().IsInteger32()) {
+ if (FLAG_unbox_double_fields && representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ FPRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, FieldMemOperand(object, offset));
+ } else if (representation.IsSmi() &&
+ instr->hydrogen()->value()->representation().IsInteger32()) {
DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
#ifdef DEBUG
Register temp0 = ToRegister(instr->temp0());
@@ -5407,12 +5443,15 @@
#endif
STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
STATIC_ASSERT(kSmiTag == 0);
+ Register value = ToRegister(instr->value());
__ Store(value, UntagSmiFieldMemOperand(destination, offset),
Representation::Integer32());
} else {
+ Register value = ToRegister(instr->value());
__ Store(value, FieldMemOperand(destination, offset), representation);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
+ Register value = ToRegister(instr->value());
__ RecordWriteField(destination,
offset,
value, // Clobbered.
@@ -5562,7 +5601,7 @@
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Sub(result, left, right);
}
@@ -5576,7 +5615,7 @@
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Sub(result, left, right);
}
@@ -5616,7 +5655,8 @@
// Output contains zero, undefined is converted to zero for truncating
// conversions.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+ "not a heap number/undefined/true/false");
} else {
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
@@ -5774,7 +5814,7 @@
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr);
+ DeoptimizeIf(eq, instr, "memento found");
__ Bind(&no_memento_found);
}
@@ -5899,7 +5939,7 @@
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
- DeoptimizeIf(ne, instr);
+ DeoptimizeIf(ne, instr, "wrong map");
}
@@ -5933,10 +5973,10 @@
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr);
+ DeoptimizeIfSmi(receiver, instr, "Smi");
__ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, ©_receiver);
- Deoptimize(instr);
+ Deoptimize(instr, "not a JavaScript object");
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
@@ -5979,10 +6019,11 @@
object_(object),
index_(index) {
}
- virtual void Generate() OVERRIDE {
+ void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
- virtual LInstruction* instr() OVERRIDE { return instr_; }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
private:
LLoadFieldByIndex* instr_;
Register result_;
diff --git a/src/arm64/lithium-codegen-arm64.h b/src/arm64/lithium-codegen-arm64.h
index a141dfa..a73bb8c 100644
--- a/src/arm64/lithium-codegen-arm64.h
+++ b/src/arm64/lithium-codegen-arm64.h
@@ -217,32 +217,27 @@
BranchType branch_type, Register reg = NoReg,
int bit = -1,
Deoptimizer::BailoutType* override_bailout_type = NULL);
- void Deoptimize(LInstruction* instr,
- Deoptimizer::BailoutType* override_bailout_type = NULL,
- const char* detail = NULL);
- void DeoptimizeIf(Condition cond, LInstruction* instr,
- const char* detail = NULL);
- void DeoptimizeIfZero(Register rt, LInstruction* instr,
- const char* detail = NULL);
+ void Deoptimize(LInstruction* instr, const char* detail,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void DeoptimizeIf(Condition cond, LInstruction* instr, const char* detail);
+ void DeoptimizeIfZero(Register rt, LInstruction* instr, const char* detail);
void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- const char* detail = NULL);
+ const char* detail);
void DeoptimizeIfNegative(Register rt, LInstruction* instr,
- const char* detail = NULL);
- void DeoptimizeIfSmi(Register rt, LInstruction* instr,
- const char* detail = NULL);
- void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- const char* detail = NULL);
+ const char* detail);
+ void DeoptimizeIfSmi(Register rt, LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, const char* detail);
void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* detail = NULL);
+ LInstruction* instr, const char* detail);
void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* detail = NULL);
+ LInstruction* instr, const char* detail);
void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- const char* detail = NULL);
+ const char* detail);
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- const char* detail = NULL);
+ const char* detail);
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- const char* detail = NULL);
+ const char* detail);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
diff --git a/src/arm64/macro-assembler-arm64-inl.h b/src/arm64/macro-assembler-arm64-inl.h
index 23767e4..b691e21 100644
--- a/src/arm64/macro-assembler-arm64-inl.h
+++ b/src/arm64/macro-assembler-arm64-inl.h
@@ -825,6 +825,12 @@
}
+void MacroAssembler::Frintp(const FPRegister& fd, const FPRegister& fn) {
+ DCHECK(allow_macro_instructions_);
+ frintp(fd, fn);
+}
+
+
void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
frintz(fd, fn);
@@ -1120,6 +1126,14 @@
}
+void MacroAssembler::Umull(const Register& rd, const Register& rn,
+ const Register& rm) {
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ umaddl(rd, rn, rm, xzr);
+}
+
+
void MacroAssembler::Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst) {
@@ -1230,14 +1244,7 @@
void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
DCHECK(!csp.Is(sp_));
if (!TmpList()->IsEmpty()) {
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Sub(temp, StackPointer(), space);
- Bic(csp, temp, 0xf);
- } else {
- Sub(csp, StackPointer(), space);
- }
+ Sub(csp, StackPointer(), space);
} else {
// TODO(jbramley): Several callers rely on this not using scratch
// registers, so we use the assembler directly here. However, this means
@@ -1274,11 +1281,7 @@
DCHECK(emit_debug_code());
DCHECK(!csp.Is(sp_));
{ InstructionAccurateScope scope(this);
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- bic(csp, StackPointer(), 0xf);
- } else {
- mov(csp, StackPointer());
- }
+ mov(csp, StackPointer());
}
AssertStackConsistency();
}
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index f670403..0253e7c 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -13,7 +13,7 @@
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -1308,7 +1308,7 @@
// Avoid emitting code when !use_real_abort() since non-real aborts cause too
// much code to be generated.
if (emit_debug_code() && use_real_aborts()) {
- if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ if (csp.Is(StackPointer())) {
// Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
// can't check the alignment of csp without using a scratch register (or
// clobbering the flags), but the processor (or simulator) will abort if
@@ -3064,6 +3064,13 @@
}
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on arm64.
+ UNREACHABLE();
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
DCHECK(jssp.Is(StackPointer()));
UseScratchRegisterScope temps(this);
@@ -3781,23 +3788,38 @@
}
-void MacroAssembler::DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+ Register scratch2, Handle<WeakCell> cell,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
- Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- Cmp(scratch, Operand(map));
+ Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CmpWeakValue(scratch1, cell, scratch2);
B(ne, &fail);
Jump(success, RelocInfo::CODE_TARGET);
Bind(&fail);
}
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+ Register scratch) {
+ Mov(scratch, Operand(cell));
+ Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
+ Cmp(value, scratch);
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+ Label* miss) {
+ Mov(value, Operand(cell));
+ Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
+ JumpIfSmi(value, miss);
+}
+
+
void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@@ -4080,7 +4102,7 @@
// Check the context is a native context.
if (emit_debug_code()) {
- // Read the first word and compare to the global_context_map.
+ // Read the first word and compare to the native_context_map.
Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
Check(eq, kExpectedNativeContext);
@@ -4206,10 +4228,11 @@
}
Bind(&done);
- // Check that the value is a normal property.
+ // Check that the value is a field property.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ DCHECK_EQ(FIELD, 0);
TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
// Get the value at the masked, scaled index and return.
@@ -4626,17 +4649,6 @@
}
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
- Register scratch,
- Label* if_deprecated) {
- if (map->CanBeDeprecated()) {
- Mov(scratch, Operand(map));
- Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
- TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
- }
-}
-
-
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index 7a106a1..ee43589 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -422,6 +422,7 @@
inline void Frinta(const FPRegister& fd, const FPRegister& fn);
inline void Frintm(const FPRegister& fd, const FPRegister& fn);
inline void Frintn(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintp(const FPRegister& fd, const FPRegister& fn);
inline void Frintz(const FPRegister& fd, const FPRegister& fn);
inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
inline void Fsub(const FPRegister& fd,
@@ -489,6 +490,7 @@
inline void Smulh(const Register& rd,
const Register& rn,
const Register& rm);
+ inline void Umull(const Register& rd, const Register& rn, const Register& rm);
inline void Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst);
@@ -759,9 +761,9 @@
// it can be evidence of a potential bug because the ABI forbids accesses
// below csp.
//
- // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
- // enabled, then csp will be dereferenced to cause the processor
- // (or simulator) to abort if it is not properly aligned.
+ // If StackPointer() is the system stack pointer (csp), then csp will be
+ // dereferenced to cause the processor (or simulator) to abort if it is not
+ // properly aligned.
//
// If emit_debug_code() is false, this emits no code.
void AssertStackConsistency();
@@ -829,9 +831,7 @@
inline void BumpSystemStackPointer(const Operand& space);
// Re-synchronizes the system stack pointer (csp) with the current stack
- // pointer (according to StackPointer()). This function will ensure the
- // new value of the system stack pointer is remains aligned to 16 bytes, and
- // is lower than or equal to the value of the current stack pointer.
+ // pointer (according to StackPointer()).
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
@@ -1478,14 +1478,19 @@
Label* fail,
SmiCheckType smi_check_type);
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
+ // Check if the map of an object is equal to a specified weak map and branch
+ // to a specified target if equal. Skip the smi check if not required
+ // (object is known to be a heap object)
+ void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+ Handle<WeakCell> cell, Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Compare the given value and the value of weak cell.
+ void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+
+ // Load the value of the weak cell in the value register. Branch to the given
+ // miss label if the weak cell was cleared.
+ void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Test the bitfield of the heap object map with mask and set the condition
// flags. The object register is preserved.
@@ -1627,6 +1632,7 @@
// Activation support.
void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
// Returns map with validated enum cache in object register.
@@ -1774,10 +1780,6 @@
int mask,
Label* if_all_clear);
- void CheckMapDeprecated(Handle<Map> map,
- Register scratch,
- Label* if_deprecated);
-
// Check if object is in new space and jump accordingly.
// Register 'object' is preserved.
void JumpIfNotInNewSpace(Register object,
diff --git a/src/arm64/simulator-arm64.cc b/src/arm64/simulator-arm64.cc
index 129252b..bc524af 100644
--- a/src/arm64/simulator-arm64.cc
+++ b/src/arm64/simulator-arm64.cc
@@ -413,7 +413,7 @@
// Reset debug helpers.
breakpoints_.empty();
- break_on_next_= false;
+ break_on_next_ = false;
}
@@ -2463,6 +2463,12 @@
set_sreg(fd, FPRoundInt(sreg(fn), FPNegativeInfinity)); break;
case FRINTM_d:
set_dreg(fd, FPRoundInt(dreg(fn), FPNegativeInfinity)); break;
+ case FRINTP_s:
+ set_sreg(fd, FPRoundInt(sreg(fn), FPPositiveInfinity));
+ break;
+ case FRINTP_d:
+ set_dreg(fd, FPRoundInt(dreg(fn), FPPositiveInfinity));
+ break;
case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
@@ -2767,6 +2773,10 @@
// We always use floor(value).
break;
}
+ case FPPositiveInfinity: {
+ int_result = ceil(value);
+ break;
+ }
default: UNIMPLEMENTED();
}
return int_result;