Merge V8 5.2.361.47 DO NOT MERGE
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
FPIIM-449
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index f32f407..0af8f93 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -78,7 +78,8 @@
void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
RecordRelocInfo(rmode);
- emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start()));
+ emitl(static_cast<uint32_t>(
+ entry - isolate()->heap()->memory_allocator()->code_range()->start()));
}
@@ -299,7 +300,8 @@
Address Assembler::runtime_entry_at(Address pc) {
- return Memory::int32_at(pc) + isolate()->code_range()->start();
+ return Memory::int32_at(pc) +
+ isolate()->heap()->memory_allocator()->code_range()->start();
}
// -----------------------------------------------------------------------------
@@ -326,11 +328,6 @@
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Memory::Address_at(pc_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@@ -368,21 +365,6 @@
}
}
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Memory::Address_at(pc_) = updated_reference;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
- }
-}
-
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(pc_);
@@ -538,7 +520,7 @@
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 214b786..5f8fb68 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -114,6 +114,45 @@
CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM));
}
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Memory::Address_at(pc_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return Memory::uint32_at(pc_);
+}
+
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+ if (IsWasmMemoryReference(rmode_)) {
+ Address updated_reference;
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Memory::Address_at(pc_) = updated_reference;
+ } else if (IsWasmMemorySizeReference(rmode_)) {
+ uint32_t updated_size_reference;
+ DCHECK(wasm_memory_size_reference() <= old_size);
+ updated_size_reference =
+ new_size + (wasm_memory_size_reference() - old_size);
+ DCHECK(updated_size_reference <= new_size);
+ Memory::uint32_at(pc_) = updated_size_reference;
+ } else {
+ UNREACHABLE();
+ }
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
+ }
+}
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -563,14 +602,17 @@
if (is_int8(src.value_)) {
emit(0x83);
emit_modrm(subcode, dst);
+ if (!RelocInfo::IsNone(src.rmode_)) {
+ RecordRelocInfo(src.rmode_);
+ }
emit(src.value_);
} else if (dst.is(rax)) {
emit(0x05 | (subcode << 3));
- emitl(src.value_);
+ emit(src);
} else {
emit(0x81);
emit_modrm(subcode, dst);
- emitl(src.value_);
+ emit(src);
}
}
@@ -583,11 +625,14 @@
if (is_int8(src.value_)) {
emit(0x83);
emit_operand(subcode, dst);
+ if (!RelocInfo::IsNone(src.rmode_)) {
+ RecordRelocInfo(src.rmode_);
+ }
emit(src.value_);
} else {
emit(0x81);
emit_operand(subcode, dst);
- emitl(src.value_);
+ emit(src);
}
}
@@ -1484,7 +1529,6 @@
movq(dst, static_cast<int64_t>(value), rmode);
}
-
// Loads the ip-relative location of the src label into the target location
// (as a 32-bit offset sign extended to 64-bit).
void Assembler::movl(const Operand& dst, Label* src) {
@@ -1909,6 +1953,25 @@
emit_modrm(src, dst);
}
+void Assembler::xchgb(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ if (!reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg, op);
+ } else {
+ emit_optional_rex_32(reg, op);
+ }
+ emit(0x86);
+ emit_operand(reg, op);
+}
+
+void Assembler::xchgw(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg, op);
+ emit(0x87);
+ emit_operand(reg, op);
+}
void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
@@ -2032,14 +2095,14 @@
emit(0x66);
if (reg.is(rax)) {
emit(0xA9);
- emit(mask.value_);
+ emitw(mask.value_);
} else {
if (reg.low_bits() == 4) {
emit_rex_32(reg);
}
emit(0xF7);
emit_modrm(0x0, reg);
- emit(mask.value_);
+ emitw(mask.value_);
}
}
@@ -2050,7 +2113,7 @@
emit_optional_rex_32(rax, op);
emit(0xF7);
emit_operand(rax, op);
- emit(mask.value_);
+ emitw(mask.value_);
}
void Assembler::testw(const Operand& op, Register reg) {
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index e48f358..77a1a57 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -183,6 +183,8 @@
V(xmm14) \
V(xmm15)
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(xmm1) \
V(xmm2) \
@@ -200,8 +202,7 @@
V(xmm14) \
V(xmm15)
-
-struct DoubleRegister {
+struct XMMRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -212,15 +213,15 @@
static const int kMaxNumRegisters = Code::kAfterLast;
- static DoubleRegister from_code(int code) {
- DoubleRegister result = {code};
+ static XMMRegister from_code(int code) {
+ XMMRegister result = {code};
return result;
}
const char* ToString();
bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+ bool is(XMMRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
@@ -238,6 +239,11 @@
int reg_code;
};
+typedef XMMRegister FloatRegister;
+
+typedef XMMRegister DoubleRegister;
+
+typedef XMMRegister Simd128Register;
#define DECLARE_REGISTER(R) \
const DoubleRegister R = {DoubleRegister::kCode_##R};
@@ -245,11 +251,6 @@
#undef DECLARE_REGISTER
const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-
-typedef DoubleRegister XMMRegister;
-
-typedef DoubleRegister Simd128Register;
-
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -334,6 +335,8 @@
class Immediate BASE_EMBEDDED {
public:
explicit Immediate(int32_t value) : value_(value) {}
+ explicit Immediate(int32_t value, RelocInfo::Mode rmode)
+ : value_(value), rmode_(rmode) {}
explicit Immediate(Smi* value) {
DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
@@ -341,6 +344,7 @@
private:
int32_t value_;
+ RelocInfo::Mode rmode_ = RelocInfo::NONE32;
friend class Assembler;
};
@@ -784,6 +788,9 @@
void decb(Register dst);
void decb(const Operand& dst);
+ void xchgb(Register reg, const Operand& op);
+ void xchgw(Register reg, const Operand& op);
+
// Sign-extends rax into rdx:rax.
void cqo();
// Sign-extends eax into edx:eax.
@@ -1689,7 +1696,7 @@
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(const int reason, int raw_position, int id);
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
@@ -1755,7 +1762,12 @@
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
- void emit(Immediate x) { emitl(x.value_); }
+ void emit(Immediate x) {
+ if (!RelocInfo::IsNone(x.rmode_)) {
+ RecordRelocInfo(x.rmode_);
+ }
+ emitl(x.value_);
+ }
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of both register codes.
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 3163783..419ee0f 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -185,16 +185,9 @@
__ j(greater_equal, &loop);
// Call the function.
- if (is_api_function) {
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -464,6 +457,146 @@
Generate_JSEntryTrampolineHelper(masm, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the value to pass to the generator
+ // -- rbx : the JSGeneratorObject to resume
+ // -- rdx : the resume mode (tagged)
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(rbx);
+
+ // Store input value into generator object.
+ __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOffset), rax);
+ __ RecordWriteField(rbx, JSGeneratorObject::kInputOffset, rax, rcx,
+ kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
+
+ // Load suspended function and context.
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+ __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(masm->isolate());
+ Operand step_in_enabled_operand = masm->ExternalOperand(step_in_enabled);
+ __ cmpb(step_in_enabled_operand, Immediate(0));
+ __ j(equal, &skip_flooding);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rbx);
+ __ Push(rdx);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(rdx);
+ __ Pop(rbx);
+ __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ }
+ __ bind(&skip_flooding);
+
+ // Pop return address.
+ __ PopReturnAddressTo(rax);
+
+ // Push receiver.
+ __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+
+ // ----------- S t a t e -------------
+ // -- rax : return address
+ // -- rbx : the JSGeneratorObject to resume
+ // -- rdx : the resume mode (tagged)
+ // -- rdi : generator function
+ // -- rsi : generator context
+ // -- rsp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ subl(rcx, Immediate(1));
+ __ j(carry, &done_loop, Label::kNear);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
+ __ j(not_equal, &old_generator);
+
+ // New-style (ignition/turbofan) generator object.
+ {
+ __ PushReturnAddressFrom(rax);
+ __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ movp(rdx, rbx);
+ __ jmp(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Old-style (full-codegen) generator object.
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PushReturnAddressFrom(rax); // Return address.
+ __ Push(rbp); // Caller's frame pointer.
+ __ Move(rbp, rsp);
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
+
+ // Restore the operand stack.
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
+ __ SmiToInteger32(rax, FieldOperand(rsi, FixedArray::kLengthOffset));
+ {
+ Label done_loop, loop;
+ __ Set(rcx, 0);
+ __ bind(&loop);
+ __ cmpl(rcx, rax);
+ __ j(equal, &done_loop, Label::kNear);
+ __ Push(
+ FieldOperand(rsi, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ addl(rcx, Immediate(1));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset),
+ Heap::kEmptyFixedArrayRootIndex);
+
+ // Restore context.
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+
+ // Resume the generator function at the continuation.
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ SmiToInteger64(
+ rcx, FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
+ __ leap(rdx, FieldOperand(rdx, rcx, times_1, Code::kHeaderSize));
+ __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ movp(rax, rbx); // Continuation expects generator object in rax.
+ __ jmp(rdx);
+ }
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -480,6 +613,8 @@
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@@ -490,10 +625,9 @@
__ Push(rdi); // Callee's JS function.
__ Push(rdx); // Callee's new target.
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into edi (InterpreterBytecodeRegister).
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-
Label load_debug_bytecode_array, bytecode_array_loaded;
DCHECK_EQ(Smi::FromInt(0), DebugInfo::uninitialized());
__ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
@@ -503,18 +637,26 @@
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
+ // Check function data field is actually a BytecodeArray object.
+ Label bytecode_array_not_present;
+ __ CompareRoot(kInterpreterBytecodeArrayRegister,
+ Heap::kUndefinedValueRootIndex);
+ __ j(equal, &bytecode_array_not_present);
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rax);
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
- // Push bytecode array.
+ // Load initial bytecode offset.
+ __ movp(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push bytecode array and Smi tagged bytecode offset.
__ Push(kInterpreterBytecodeArrayRegister);
- // Push zero for bytecode array offset.
- __ Push(Immediate(0));
+ __ Integer32ToSmi(rcx, kInterpreterBytecodeOffsetRegister);
+ __ Push(rcx);
// Allocate the local and temporary register file on the stack.
{
@@ -545,19 +687,8 @@
__ j(greater_equal, &loop_header, Label::kNear);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ movp(kInterpreterRegisterFileRegister, rbp);
- __ addp(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ movp(kInterpreterBytecodeOffsetRegister,
- Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
@@ -567,13 +698,23 @@
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
times_pointer_size, 0));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rbx);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // The return value is in rax.
+
+ // Get the arguments + reciever count.
+ __ movp(rbx, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ movl(rbx, FieldOperand(rbx, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ leave();
+
+ // Drop receiver + arguments and return.
+ __ PopReturnAddressTo(rcx);
+ __ addp(rsp, rbx);
+ __ PushReturnAddressFrom(rcx);
+ __ ret(0);
// Load debug copy of the bytecode array.
__ bind(&load_debug_bytecode_array);
@@ -582,31 +723,20 @@
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
__ jmp(&bytecode_array_loaded);
+
+ // If the bytecode array is no longer present, then the underlying function
+ // has been switched to a different kind of code and we heal the closure by
+ // switching the code entry field over to the new code object as well.
+ __ bind(&bytecode_array_not_present);
+ __ leave(); // Leave the frame so we can tail call.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
+ __ RecordWriteCodeEntryField(rdi, rcx, r15);
+ __ jmp(rcx);
}
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
-
- // The return value is in accumulator, which is already in rax.
-
- // Leave the frame (also dropping the register file).
- __ leave();
-
- // Drop receiver + arguments and return.
- __ movl(rbx, FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ PopReturnAddressTo(rcx);
- __ addp(rsp, rbx);
- __ PushReturnAddressFrom(rcx);
- __ ret(0);
-}
-
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
bool push_receiver) {
// ----------- S t a t e -------------
@@ -637,7 +767,6 @@
__ j(greater, &loop_header, Label::kNear);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -661,7 +790,6 @@
RelocInfo::CODE_TARGET);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -689,26 +817,25 @@
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ Move(rbx, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+ __ Push(rbx);
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ movp(kInterpreterRegisterFileRegister, rbp);
- __ addp(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ // Initialize dispatch table register.
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
- // Get the context from the frame.
- __ movp(kContextRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ movp(
- kInterpreterBytecodeArrayRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ movp(kInterpreterBytecodeArrayRegister,
+ Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -719,10 +846,8 @@
}
// Get the target bytecode offset from the frame.
- __ movp(
- kInterpreterBytecodeOffsetRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ movp(kInterpreterBytecodeOffsetRegister,
+ Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
@@ -731,66 +856,144 @@
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
times_pointer_size, 0));
- __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rbx);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register and push PC at top
- // of stack (to simulate initial call to bytecode handler in interpreter entry
- // trampoline).
- __ Pop(rbx);
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
- __ Push(rbx);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee)
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register closure = rdi;
+ Register map = r8;
+ Register index = r9;
+ __ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset));
+ __ cmpl(index, Immediate(2));
+ __ j(less, &gotta_call_runtime);
+
+ // Find literals.
+ // r14 : native context
+ // r9 : length / index
+ // r8 : optimized code map
+ // rdx : new target
+ // rdi : closure
+ Register native_context = r14;
+ __ movp(native_context, NativeContextOperand());
+
+ __ bind(&loop_top);
+ // Native context match?
+ Register temp = r11;
+ __ movp(temp, FieldOperand(map, index, times_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ cmpp(temp, native_context);
+ __ j(not_equal, &loop_bottom);
+ // OSR id set to none?
+ __ movp(temp, FieldOperand(map, index, times_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ __ SmiToInteger32(temp, temp);
+ const int bailout_id = BailoutId::None().ToInt();
+ __ cmpl(temp, Immediate(bailout_id));
+ __ j(not_equal, &loop_bottom);
+ // Literals available?
+ __ movp(temp, FieldOperand(map, index, times_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp);
+ __ movp(r15, index);
+ __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r15,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Code available?
+ Register entry = rcx;
+ __ movp(entry, FieldOperand(map, index, times_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, r15);
+
+ // Link the closure into the optimized function list.
+ // rcx : code entry (entry)
+ // r14 : native context
+ // rdx : new target
+ // rdi : closure
+ __ movp(rbx,
+ ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), rbx);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, rbx, r15,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+ closure);
+ // Save closure before the write barrier.
+ __ movp(rbx, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, r15,
+ kDontSaveFPRegs);
+ __ movp(closure, rbx);
+ __ jmp(entry);
+
+ __ bind(&loop_bottom);
+ __ subl(index, Immediate(SharedFunctionInfo::kEntryLength));
+ __ cmpl(index, Immediate(1));
+ __ j(greater, &loop_top);
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ movp(entry, FieldOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ // Is the full code valid?
+ __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
+ __ andl(rbx, Immediate(Code::KindField::kMask));
+ __ shrl(rbx, Immediate(Code::KindField::kShift));
+ __ cmpl(rbx, Immediate(Code::BUILTIN));
+ __ j(equal, &gotta_call_runtime);
+ // Yes, install the full code.
+ __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, r15);
+ __ jmp(entry);
+
+ __ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
@@ -929,13 +1132,16 @@
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpp(kScratchRegister,
+ Immediate(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
__ j(not_equal, ¬_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(¬_no_registers);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
__ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpp(kScratchRegister,
+ Immediate(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
__ j(not_equal, ¬_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -1010,29 +1216,6 @@
}
// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rsp[0] : return address
- // -- rsp[8] : first argument (left-hand side)
- // -- rsp[16] : receiver (right-hand side)
- // -----------------------------------
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ movp(InstanceOfDescriptor::LeftRegister(),
- Operand(rbp, 2 * kPointerSize)); // Load left-hand side.
- __ movp(InstanceOfDescriptor::RightRegister(),
- Operand(rbp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
- }
-
- // Pop the argument and the receiver.
- __ ret(2 * kPointerSize);
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -1789,6 +1972,34 @@
__ PushReturnAddressFrom(rcx);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdx : requested object size (untagged)
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ Integer32ToSmi(rdx, rdx);
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdx);
+ __ PushReturnAddressFrom(rcx);
+ __ Move(rsi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdx : requested object size (untagged)
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ Integer32ToSmi(rdx, rdx);
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdx);
+ __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ PushReturnAddressFrom(rcx);
+ __ Move(rsi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index e737801..602d3a0 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -53,12 +53,6 @@
}
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
void ArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -71,11 +65,6 @@
}
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -2013,125 +2002,6 @@
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = rdx; // Object (lhs).
- Register const function = rax; // Function (rhs).
- Register const object_map = rcx; // Map of {object}.
- Register const function_map = r8; // Map of {function}.
- Register const function_prototype = rdi; // Prototype of {function}.
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi, Label::kNear);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- __ ret(0);
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &slow_case);
-
- // Ensure that {function} has an instance prototype.
- __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &slow_case);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ movp(function_prototype,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- Register const function_prototype_map = kScratchRegister;
- __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
- __ j(not_equal, &function_prototype_valid, Label::kNear);
- __ movp(function_prototype,
- FieldOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Label done, loop, fast_runtime_fallback;
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- __ bind(&loop);
-
- __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &fast_runtime_fallback, Label::kNear);
- __ CmpInstanceType(object_map, JS_PROXY_TYPE);
- __ j(equal, &fast_runtime_fallback, Label::kNear);
-
- __ movp(object, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmpp(object, function_prototype);
- __ j(equal, &done, Label::kNear);
- __ CompareRoot(object, Heap::kNullValueRootIndex);
- __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ j(not_equal, &loop);
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- __ bind(&done);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // Found Proxy or access check needed: Call the runtime.
- __ bind(&fast_runtime_fallback);
- __ PopReturnAddressTo(kScratchRegister);
- __ Push(object);
- __ Push(function_prototype);
- __ PushReturnAddressFrom(kScratchRegister);
- // Invalidate the instanceof cache.
- __ Move(rax, Smi::FromInt(0));
- __ StoreRoot(rax, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ PopReturnAddressTo(kScratchRegister);
- __ Push(object);
- __ Push(function);
- __ PushReturnAddressFrom(kScratchRegister);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -3735,8 +3605,8 @@
__ bind(¬_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
+ Code::Flags code_flags =
+ Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(
masm, Code::LOAD_IC, code_flags, receiver, name, feedback, no_reg);
@@ -3877,8 +3747,8 @@
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
+ Code::Flags code_flags =
+ Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
receiver, key, feedback, no_reg);
@@ -4449,15 +4319,15 @@
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ movp(Operand(rax, JSObject::kMapOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kMapOffset), rcx);
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(Operand(rax, JSObject::kPropertiesOffset), rbx);
- __ movp(Operand(rax, JSObject::kElementsOffset), rbx);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rbx);
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ leap(rbx, Operand(rax, JSObject::kHeaderSize));
+ __ leap(rbx, FieldOperand(rax, JSObject::kHeaderSize));
// ----------- S t a t e -------------
- // -- rax : result (untagged)
+ // -- rax : result (tagged)
// -- rbx : result fields (untagged)
// -- rdi : result end (untagged)
// -- rcx : initial map
@@ -4475,10 +4345,6 @@
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(rbx, rdi, r11);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ incp(rax);
__ Ret();
}
__ bind(&slack_tracking);
@@ -4498,10 +4364,6 @@
__ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(rdx, rdi, r11);
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ incp(rax);
-
// Check if we can finalize the instance size.
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4532,10 +4394,10 @@
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(rcx);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ decp(rax);
__ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
__ leap(rdi, Operand(rax, rbx, times_pointer_size, 0));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ decp(rdi); // Remove the tag from the end address.
__ jmp(&done_allocate);
// Fall back to %NewObject.
@@ -4557,19 +4419,19 @@
// -----------------------------------
__ AssertFunction(rdi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make rdx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ movp(rdx, rbp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make rdx point to the JavaScript frame.
+ __ movp(rdx, rbp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4601,7 +4463,7 @@
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in rax.
@@ -4642,7 +4504,7 @@
Label allocate, done_allocate;
__ leal(rcx, Operand(rax, times_pointer_size,
JSArray::kSize + FixedArray::kHeaderSize));
- __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(rcx, rdx, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Compute the arguments.length in rdi.
@@ -4709,11 +4571,26 @@
// -----------------------------------
__ AssertFunction(rdi);
+ // Make r9 point to the JavaScript frame.
+ __ movp(r9, rbp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ movp(r9, Operand(r9, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmpp(rdi, Operand(r9, StandardFrameConstants::kFunctionOffset));
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadSharedFunctionInfoSpecialField(
rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
- __ leap(rdx, Operand(rbp, rcx, times_pointer_size,
+ __ leap(rdx, Operand(r9, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ Integer32ToSmi(rcx, rcx);
@@ -4721,6 +4598,7 @@
// rdx : parameters pointer
// rdi : function
// rsp[0] : return address
+ // r9 : JavaScript frame pointer.
// Registers used over the whole function:
// rbx: the mapped parameter count (untagged)
// rax: the allocated object (tagged).
@@ -4731,7 +4609,7 @@
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(r9, StandardFrameConstants::kCallerFPOffset));
__ movp(r8, Operand(rax, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
@@ -4774,7 +4652,7 @@
__ addp(r8, Immediate(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT);
+ __ Allocate(r8, rax, r9, no_reg, &runtime, NO_ALLOCATION_FLAGS);
// rax = address of new object(s) (tagged)
// r11 = argument count (untagged)
@@ -4927,19 +4805,19 @@
// -----------------------------------
__ AssertFunction(rdi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make rdx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ movp(rdx, rbp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make rdx point to the JavaScript frame.
+ __ movp(rdx, rbp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -4978,7 +4856,7 @@
Label allocate, done_allocate;
__ leal(rcx, Operand(rax, times_pointer_size, JSStrictArgumentsObject::kSize +
FixedArray::kHeaderSize));
- __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(rcx, rdx, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Compute the arguments.length in rdi.
@@ -5404,10 +5282,14 @@
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
__ PopReturnAddressTo(return_address);
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+
// context save
__ Push(context);
@@ -5441,7 +5323,7 @@
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
PrepareCallApiFunction(masm, kApiStackSpace);
@@ -5453,8 +5335,6 @@
__ movp(StackSpaceOperand(1), scratch);
// FunctionCallbackInfo::length_.
__ Set(StackSpaceOperand(2), argc);
- // FunctionCallbackInfo::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
#if defined(__MINGW64__) || defined(_WIN64)
Register arguments_arg = rcx;
@@ -5479,11 +5359,11 @@
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
FCA::kArgsLength - FCA::kContextSaveIndex);
- Operand is_construct_call_operand = StackSpaceOperand(3);
+ Operand length_operand = StackSpaceOperand(2);
Operand return_value_operand = args_from_rbp.GetArgumentOperand(
this->is_store() ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
int stack_space = 0;
- Operand* stack_space_operand = &is_construct_call_operand;
+ Operand* stack_space_operand = &length_operand;
stack_space = argc + FCA::kArgsLength + 1;
stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
@@ -5493,14 +5373,6 @@
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16 .. (16 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- r8 : api_function_address
- // -----------------------------------
-
#if defined(__MINGW64__) || defined(_WIN64)
Register getter_arg = r8;
Register accessor_info_arg = rdx;
@@ -5510,9 +5382,36 @@
Register accessor_info_arg = rsi;
Register name_arg = rdi;
#endif
- Register api_function_address = ApiGetterDescriptor::function_address();
- DCHECK(api_function_address.is(r8));
+ Register api_function_address = r8;
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
Register scratch = rax;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ // Insert additional parameters into the stack frame above return address.
+ __ PopReturnAddressTo(scratch);
+ __ Push(receiver);
+ __ Push(FieldOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ Push(kScratchRegister); // return value
+ __ Push(kScratchRegister); // return value default
+ __ PushAddress(ExternalReference::isolate_address(isolate()));
+ __ Push(holder);
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Push(FieldOperand(callback, AccessorInfo::kNameOffset));
+ __ PushReturnAddressFrom(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5539,8 +5438,11 @@
// It's okay if api_function_address == getter_arg
// but not accessor_info_arg or name_arg
- DCHECK(!api_function_address.is(accessor_info_arg) &&
- !api_function_address.is(name_arg));
+ DCHECK(!api_function_address.is(accessor_info_arg));
+ DCHECK(!api_function_address.is(name_arg));
+ __ movp(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ movp(api_function_address,
+ FieldOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
@@ -5550,7 +5452,6 @@
NULL);
}
-
#undef __
} // namespace internal
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 33e987e..114cbdc 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -204,7 +204,7 @@
// Allocate new backing store.
__ bind(&new_backing_store);
__ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
- __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
+ __ Allocate(rdi, r14, r11, r15, fail, NO_ALLOCATION_FLAGS);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
__ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
@@ -296,7 +296,7 @@
// r8 : source FixedDoubleArray
// r9 : number of elements
__ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
- __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
+ __ Allocate(rdi, r11, r14, r15, &gc_required, NO_ALLOCATION_FLAGS);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index a9532dc..7126b89 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -8,6 +8,7 @@
#if V8_TARGET_ARCH_X64
+#include "src/base/compiler-specific.h"
#include "src/base/lazy-instance.h"
#include "src/disasm.h"
@@ -359,7 +360,7 @@
bool vex_128() {
DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
- return (checked & 4) != 1;
+ return (checked & 4) == 0;
}
bool vex_none() {
@@ -479,7 +480,7 @@
int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
int AVXInstruction(byte* data);
- void AppendToBuffer(const char* format, ...);
+ PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
void UnimplementedInstruction() {
if (abort_on_unimplemented_) {
@@ -618,7 +619,7 @@
value = 0; // Initialize variables on all paths to satisfy the compiler.
count = 0;
}
- AppendToBuffer("%" V8_PTR_PREFIX "x", value);
+ AppendToBuffer("%" PRIx64, value);
return count;
}
@@ -1999,7 +2000,7 @@
if (rex_w()) AppendToBuffer("REX.W ");
AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
} else {
- AppendToBuffer("%s", idesc.mnem, operand_size_code());
+ AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
}
data++;
break;
@@ -2141,9 +2142,11 @@
default:
mnem = "???";
}
- AppendToBuffer(((regop <= 1) ? "%s%c " : "%s "),
- mnem,
- operand_size_code());
+ if (regop <= 1) {
+ AppendToBuffer("%s%c ", mnem, operand_size_code());
+ } else {
+ AppendToBuffer("%s ", mnem);
+ }
data += PrintRightOperand(data);
}
break;
@@ -2334,9 +2337,7 @@
default:
UNREACHABLE();
}
- AppendToBuffer("test%c rax,0x%" V8_PTR_PREFIX "x",
- operand_size_code(),
- value);
+ AppendToBuffer("test%c rax,0x%" PRIx64, operand_size_code(), value);
break;
}
case 0xD1: // fall through
diff --git a/src/x64/interface-descriptors-x64.cc b/src/x64/interface-descriptors-x64.cc
index b10b522..e1e7f9c 100644
--- a/src/x64/interface-descriptors-x64.cc
+++ b/src/x64/interface-descriptors-x64.cc
@@ -46,16 +46,11 @@
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
-const Register InstanceOfDescriptor::LeftRegister() { return rdx; }
-const Register InstanceOfDescriptor::RightRegister() { return rax; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return rdx; }
const Register StringCompareDescriptor::RightRegister() { return rax; }
-
-const Register ApiGetterDescriptor::function_address() { return r8; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return rcx; }
+const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
const Register MathPowTaggedDescriptor::exponent() { return rdx; }
@@ -68,6 +63,8 @@
const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
+const Register HasPropertyDescriptor::ObjectRegister() { return rax; }
+const Register HasPropertyDescriptor::KeyRegister() { return rbx; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -243,13 +240,16 @@
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // register state
+ // rax -- number of arguments
+ // rdi -- function
+ // rbx -- allocation site with elements kind
+ Register registers[] = {rdi, rbx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -313,6 +313,11 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -373,9 +378,8 @@
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
- kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -410,6 +414,16 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rax, // the value to pass to the generator
+ rbx, // the JSGeneratorObject to resume
+ rdx // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 566091d..2efb529 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -489,7 +489,7 @@
// easier.
DCHECK(js_function.is(rdi));
DCHECK(code_entry.is(rcx));
- DCHECK(scratch.is(rax));
+ DCHECK(scratch.is(r15));
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
@@ -537,13 +537,13 @@
DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
movp(arg_reg_1, js_function); // rcx gets rdi.
- movp(arg_reg_2, dst); // rdx gets rax.
+ movp(arg_reg_2, dst); // rdx gets r15.
} else {
// AMD64 calling convention.
DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
// rdi is already loaded with js_function.
- movp(arg_reg_2, dst); // rsi gets rax.
+ movp(arg_reg_2, dst); // rsi gets r15.
}
Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
@@ -1116,15 +1116,6 @@
}
}
-void MacroAssembler::Set(Register dst, int64_t x, RelocInfo::Mode rmode) {
- if (rmode == RelocInfo::WASM_MEMORY_REFERENCE) {
- DCHECK(x != 0);
- movq(dst, x, rmode);
- } else {
- DCHECK(RelocInfo::IsNone(rmode));
- }
-}
-
void MacroAssembler::Set(const Operand& dst, intptr_t x) {
if (kPointerSize == kInt64Size) {
if (is_int32(x)) {
@@ -3970,6 +3961,16 @@
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+ Push(object);
+ CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAGeneratorObject);
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -4829,7 +4830,7 @@
Label aligned;
testl(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
- if ((flags & PRETENURE) != 0) {
+ if (((flags & ALLOCATION_FOLDED) == 0) && ((flags & PRETENURE) != 0)) {
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
cmpp(result, ExternalOperand(allocation_limit));
@@ -4872,6 +4873,7 @@
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4905,23 +4907,19 @@
movp(top_reg, result);
}
addp(top_reg, Immediate(object_size));
- j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
cmpp(top_reg, limit_operand);
j(above, gc_required);
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch, flags);
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
+ }
- bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if (tag_result) {
- subp(result, Immediate(object_size - kHeapObjectTag));
- } else {
- subp(result, Immediate(object_size));
- }
- } else if (tag_result) {
- // Tag the result if requested.
+ subp(result, Immediate(object_size - kHeapObjectTag));
+ } else {
+ // Tag the result.
DCHECK(kHeapObjectTag == 1);
incp(result);
}
@@ -4937,6 +4935,8 @@
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@@ -4949,6 +4949,7 @@
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4971,34 +4972,66 @@
MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
- // Calculate new top and bail out if new space is exhausted.
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!object_size.is(result_end)) {
movp(result_end, object_size);
}
addp(result_end, result);
- j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
cmpp(result_end, limit_operand);
j(above, gc_required);
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch, flags);
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addp(result, Immediate(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(result_end, scratch, flags);
}
+
+ // Tag the result.
+ addp(result, Immediate(kHeapObjectTag));
}
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
+ }
+
+ leap(result_end, Operand(result, object_size));
+
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ addp(result, Immediate(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
+ }
+
+ leap(result_end, Operand(result, object_size, times_1, 0));
+
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ addp(result, Immediate(kHeapObjectTag));
+}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,
Label* gc_required,
MutableMode mode) {
// Allocate heap number in new space.
- Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required,
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -5030,14 +5063,8 @@
}
// Allocate two byte string in new space.
- Allocate(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1, result, scratch2,
+ scratch3, gc_required, NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
@@ -5066,14 +5093,8 @@
}
// Allocate one-byte string in new space.
- Allocate(SeqOneByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1, result, scratch2,
+ scratch3, gc_required, NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
@@ -5091,7 +5112,7 @@
Label* gc_required) {
// Allocate heap number in new space.
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
@@ -5103,12 +5124,8 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
@@ -5122,7 +5139,7 @@
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
@@ -5136,7 +5153,7 @@
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
@@ -5152,7 +5169,8 @@
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index af3dd03..013d0f1 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -21,8 +21,8 @@
const Register kReturnRegister2 = {Register::kCode_r8};
const Register kJSFunctionRegister = {Register::kCode_rdi};
const Register kContextRegister = {Register::kCode_rsi};
+const Register kAllocateSizeRegister = {Register::kCode_rdx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r11};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
@@ -818,7 +818,6 @@
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(Register dst, int64_t x, RelocInfo::Mode rmode);
void Set(const Operand& dst, intptr_t x);
void Cvtss2sd(XMMRegister dst, XMMRegister src);
@@ -1224,6 +1223,10 @@
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -1304,6 +1307,15 @@
Label* gc_required,
AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register result_end,
+ AllocationFlags flags);
+
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ AllocationFlags flags);
+
// Allocate a heap number in new space with undefined value. Returns
// tagged pointer in result register, or jumps to gc_required if new
// space is full.