Merge V8 5.2.361.47 DO NOT MERGE
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
FPIIM-449
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/x87/assembler-x87-inl.h b/src/x87/assembler-x87-inl.h
index 802c80f..ba2a864 100644
--- a/src/x87/assembler-x87-inl.h
+++ b/src/x87/assembler-x87-inl.h
@@ -81,11 +81,6 @@
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Memory::Address_at(pc_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@@ -120,21 +115,6 @@
}
}
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Memory::Address_at(pc_) = updated_reference;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
- }
-}
-
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(pc_);
@@ -284,7 +264,7 @@
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc
index e74d770..5cc783c 100644
--- a/src/x87/assembler-x87.cc
+++ b/src/x87/assembler-x87.cc
@@ -101,6 +101,42 @@
return false;
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Memory::Address_at(pc_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return Memory::uint32_at(pc_);
+}
+
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+ if (IsWasmMemoryReference(rmode_)) {
+ Address updated_reference;
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Memory::Address_at(pc_) = updated_reference;
+ } else if (IsWasmMemorySizeReference(rmode_)) {
+ uint32_t updated_size_reference;
+ DCHECK(wasm_memory_size_reference() <= old_size);
+ updated_size_reference =
+ new_size + (wasm_memory_size_reference() - old_size);
+ DCHECK(updated_size_reference <= new_size);
+ Memory::uint32_at(pc_) = updated_size_reference;
+ } else {
+ UNREACHABLE();
+ }
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
+ }
+}
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -552,6 +588,18 @@
emit_operand(dst, src);
}
+void Assembler::xchg_b(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x86);
+ emit_operand(reg, op);
+}
+
+void Assembler::xchg_w(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x87);
+ emit_operand(reg, op);
+}
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h
index 96eced9..eaf28e9 100644
--- a/src/x87/assembler-x87.h
+++ b/src/x87/assembler-x87.h
@@ -74,6 +74,8 @@
V(stX_6) \
V(stX_7)
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(stX_0) \
V(stX_1) \
@@ -145,8 +147,7 @@
#undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg};
-
-struct DoubleRegister {
+struct X87Register {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -158,8 +159,8 @@
static const int kMaxNumRegisters = Code::kAfterLast;
static const int kMaxNumAllocatableRegisters = 6;
- static DoubleRegister from_code(int code) {
- DoubleRegister result = {code};
+ static X87Register from_code(int code) {
+ X87Register result = {code};
return result;
}
@@ -171,24 +172,26 @@
return reg_code;
}
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+ bool is(X87Register reg) const { return reg_code == reg.reg_code; }
const char* ToString();
int reg_code;
};
+typedef X87Register FloatRegister;
+
+typedef X87Register DoubleRegister;
+
+// TODO(x87) Define SIMD registers.
+typedef X87Register Simd128Register;
+
#define DECLARE_REGISTER(R) \
const DoubleRegister R = {DoubleRegister::kCode_##R};
DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-typedef DoubleRegister X87Register;
-
-// TODO(x87) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -648,6 +651,8 @@
// Exchange
void xchg(Register dst, Register src);
void xchg(Register dst, const Operand& src);
+ void xchg_b(Register reg, const Operand& op);
+ void xchg_w(Register reg, const Operand& op);
// Arithmetics
void adc(Register dst, int32_t imm32);
@@ -958,7 +963,7 @@
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(const int reason, int raw_position, int id);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
diff --git a/src/x87/builtins-x87.cc b/src/x87/builtins-x87.cc
index 9e13172..7018802 100644
--- a/src/x87/builtins-x87.cc
+++ b/src/x87/builtins-x87.cc
@@ -186,16 +186,9 @@
__ j(greater_equal, &loop);
// Call the function.
- if (is_api_function) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -392,6 +385,141 @@
Generate_JSEntryTrampolineHelper(masm, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the value to pass to the generator
+ // -- ebx : the JSGeneratorObject to resume
+ // -- edx : the resume mode (tagged)
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(ebx);
+
+ // Store input value into generator object.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), eax);
+ __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, eax, ecx,
+ kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
+
+ // Load suspended function and context.
+ __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(masm->isolate());
+ __ cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
+ __ j(equal, &skip_flooding);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx);
+ __ Push(edx);
+ __ Push(edi);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(edx);
+ __ Pop(ebx);
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ }
+ __ bind(&skip_flooding);
+
+ // Pop return address.
+ __ PopReturnAddressTo(eax);
+
+ // Push receiver.
+ __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+ // ----------- S t a t e -------------
+ // -- eax : return address
+ // -- ebx : the JSGeneratorObject to resume
+ // -- edx : the resume mode (tagged)
+ // -- edi : generator function
+ // -- esi : generator context
+ // -- esp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ sub(ecx, Immediate(Smi::FromInt(1)));
+ __ j(carry, &done_loop, Label::kNear);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+ __ j(not_equal, &old_generator);
+
+ // New-style (ignition/turbofan) generator object
+ {
+ __ PushReturnAddressFrom(eax);
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ mov(edx, ebx);
+ __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PushReturnAddressFrom(eax); // Return address.
+ __ Push(ebp); // Caller's frame pointer.
+ __ Move(ebp, esp);
+ __ Push(esi); // Callee's context.
+ __ Push(edi); // Callee's JS Function.
+
+ // Restore the operand stack.
+ __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+ {
+ Label done_loop, loop;
+ __ Move(ecx, Smi::FromInt(0));
+ __ bind(&loop);
+ __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ j(equal, &done_loop, Label::kNear);
+ __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ add(ecx, Immediate(Smi::FromInt(1)));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
+ Immediate(masm->isolate()->factory()->empty_fixed_array()));
+
+ // Resume the generator function at the continuation.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(ecx);
+ __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ mov(eax, ebx); // Continuation expects generator object in eax.
+ __ jmp(edx);
+ }
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -408,6 +536,8 @@
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@@ -418,10 +548,9 @@
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into edi (InterpreterBytecodeRegister).
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-
Label load_debug_bytecode_array, bytecode_array_loaded;
__ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
Immediate(DebugInfo::uninitialized()));
@@ -430,8 +559,12 @@
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
+ // Check function data field is actually a BytecodeArray object.
+ Label bytecode_array_not_present;
+ __ CompareRoot(kInterpreterBytecodeArrayRegister,
+ Heap::kUndefinedValueRootIndex);
+ __ j(equal, &bytecode_array_not_present);
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
eax);
@@ -440,8 +573,8 @@
// Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister);
- // Push zero for bytecode array offset.
- __ push(Immediate(0));
+ // Push Smi tagged initial bytecode array offset.
+ __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
// Allocate the local and temporary register file on the stack.
{
@@ -474,41 +607,36 @@
__ j(greater_equal, &loop_header);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator, bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ mov(kInterpreterRegisterFileRegister, ebp);
- __ add(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
-
- // Push dispatch table as a stack located parameter to the bytecode handler.
- DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
- __ push(ebx);
+ __ mov(kInterpreterDispatchTableRegister,
+ Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
- __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
- // Restore undefined_value in accumulator (eax)
- // TODO(rmcilroy): Remove this once we move the dispatch table back into a
- // register.
- __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+ times_pointer_size, 0));
__ call(ebx);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // The return value is in eax.
+
+ // Get the arguments + reciever count.
+ __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(ebx, FieldOperand(ebx, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ leave();
+
+ // Drop receiver + arguments and return.
+ __ pop(ecx);
+ __ add(esp, ebx);
+ __ push(ecx);
+ __ ret(0);
// Load debug copy of the bytecode array.
__ bind(&load_debug_bytecode_array);
@@ -517,31 +645,23 @@
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
__ jmp(&bytecode_array_loaded);
+
+ // If the bytecode array is no longer present, then the underlying function
+ // has been switched to a different kind of code and we heal the closure by
+ // switching the code entry field over to the new code object as well.
+ __ bind(&bytecode_array_not_present);
+ __ pop(edx); // Callee's new target.
+ __ pop(edi); // Callee's JS function.
+ __ pop(esi); // Callee's context.
+ __ leave(); // Leave the frame so we can tail call.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
+ __ RecordWriteCodeEntryField(edi, ecx, ebx);
+ __ jmp(ecx);
}
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
-
- // The return value is in accumulator, which is already in rax.
-
- // Leave the frame (also dropping the register file).
- __ leave();
-
- // Drop receiver + arguments and return.
- __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ pop(ecx);
- __ add(esp, ebx);
- __ push(ecx);
- __ ret(0);
-}
-
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register array_limit) {
// ----------- S t a t e -------------
@@ -559,7 +679,6 @@
__ j(greater, &loop_header, Label::kNear);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -628,17 +747,26 @@
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ LoadHeapObject(ebx,
+ masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+ __ push(ebx);
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register.
- __ mov(kInterpreterRegisterFileRegister, ebp);
- __ add(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ // Initialize the dispatch table register.
+ __ mov(kInterpreterDispatchTableRegister,
+ Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the bytecode array pointer from the frame.
__ mov(kInterpreterBytecodeArrayRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -649,92 +777,173 @@
}
// Get the target bytecode offset from the frame.
- __ mov(
- kInterpreterBytecodeOffsetRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Push dispatch table as a stack located parameter to the bytecode handler.
- __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
- DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
- __ Pop(esi);
- __ Push(ebx);
- __ Push(esi);
-
// Dispatch to the target bytecode.
- __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
-
- // Get the context from the frame.
- __ mov(kContextRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+ times_pointer_size, 0));
__ jmp(ebx);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register and push PC at top
- // of stack (to simulate initial call to bytecode handler in interpreter entry
- // trampoline).
- __ Pop(ebx);
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
- __ Push(ebx);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee)
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register closure = edi;
+ Register new_target = edx;
+ Register argument_count = eax;
+
+ __ push(argument_count);
+ __ push(new_target);
+ __ push(closure);
+
+ Register map = argument_count;
+ Register index = ebx;
+ __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
+ __ cmp(index, Immediate(Smi::FromInt(2)));
+ __ j(less, &gotta_call_runtime);
+
+ // Find literals.
+ // edx : native context
+ // ebx : length / index
+ // eax : optimized code map
+ // stack[0] : new target
+ // stack[4] : closure
+ Register native_context = edx;
+ __ mov(native_context, NativeContextOperand());
+
+ __ bind(&loop_top);
+ Register temp = edi;
+
+ // Does the native context match?
+ __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ cmp(temp, native_context);
+ __ j(not_equal, &loop_bottom);
+ // OSR id set to none?
+ __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
+ __ j(not_equal, &loop_bottom);
+ // Literals available?
+ __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ mov(ecx, Operand(esp, 0));
+ __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
+ __ push(index);
+ __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(index);
+
+ // Code available?
+ Register entry = ecx;
+ __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ pop(closure);
+ // Store code entry in the closure.
+ __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, eax);
+
+ // Link the closure into the optimized function list.
+ // ecx : code entry
+ // edx : native context
+ // edi : closure
+ __ mov(ebx,
+ ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+ closure);
+ // Save closure before the write barrier.
+ __ mov(ebx, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
+ kDontSaveFPRegs);
+ __ mov(closure, ebx);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ jmp(entry);
+
+ __ bind(&loop_bottom);
+ __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ cmp(index, Immediate(Smi::FromInt(1)));
+ __ j(greater, &loop_top);
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+ __ pop(closure);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ __ pop(new_target);
+ __ pop(argument_count);
+ // Is the full code valid?
+ __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
+ __ and_(ebx, Code::KindField::kMask);
+ __ shr(ebx, Code::KindField::kShift);
+ __ cmp(ebx, Immediate(Code::BUILTIN));
+ __ j(equal, &gotta_call_runtime_no_stack);
+ // Yes, install the full code.
+ __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, ebx);
+ __ jmp(entry);
+
+ __ bind(&gotta_call_runtime);
+ __ pop(closure);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ bind(&gotta_call_runtime_no_stack);
+
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
@@ -876,13 +1085,14 @@
// Switch on the state.
Label not_no_registers, not_tos_eax;
- __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+ __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
__ j(not_equal, ¬_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(¬_no_registers);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
__ mov(eax, Operand(esp, 2 * kPointerSize));
- __ cmp(ecx, FullCodeGenerator::TOS_REG);
+ __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
__ j(not_equal, ¬_tos_eax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, eax.
@@ -958,29 +1168,6 @@
}
// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : first argument (left-hand side)
- // -- esp[8] : receiver (right-hand side)
- // -----------------------------------
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ mov(InstanceOfDescriptor::LeftRegister(),
- Operand(ebp, 2 * kPointerSize)); // Load left-hand side.
- __ mov(InstanceOfDescriptor::RightRegister(),
- Operand(ebp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
- }
-
- // Pop the argument and the receiver.
- __ ret(2 * kPointerSize);
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -2385,6 +2572,34 @@
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : requested object size (untagged)
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ SmiTag(edx);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(esi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : requested object size (untagged)
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ SmiTag(edx);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ PushReturnAddressFrom(ecx);
+ __ Move(esi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index 71adfd3..fdb97ee 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -62,12 +62,6 @@
}
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
void ArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -80,11 +74,6 @@
}
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -1781,129 +1770,6 @@
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = edx; // Object (lhs).
- Register const function = eax; // Function (rhs).
- Register const object_map = ecx; // Map of {object}.
- Register const function_map = ebx; // Map of {function}.
- Register const function_prototype = function_map; // Prototype of {function}.
- Register const scratch = edi;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi, Label::kNear);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ CompareRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
- __ LoadRoot(eax, Heap::kFalseValueRootIndex);
- __ ret(0);
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &slow_case);
-
- // Ensure that {function} has an instance prototype.
- __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &slow_case);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ mov(function_prototype,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- Register const function_prototype_map = scratch;
- __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
- __ j(not_equal, &function_prototype_valid, Label::kNear);
- __ mov(function_prototype,
- FieldOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Label done, loop, fast_runtime_fallback;
- __ mov(eax, isolate()->factory()->true_value());
- __ bind(&loop);
-
- // Check if the object needs to be access checked.
- __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &fast_runtime_fallback, Label::kNear);
- // Check if the current object is a Proxy.
- __ CmpInstanceType(object_map, JS_PROXY_TYPE);
- __ j(equal, &fast_runtime_fallback, Label::kNear);
-
- __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object, function_prototype);
- __ j(equal, &done, Label::kNear);
- __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ cmp(object, isolate()->factory()->null_value());
- __ j(not_equal, &loop);
- __ mov(eax, isolate()->factory()->false_value());
-
- __ bind(&done);
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // Found Proxy or access check needed: Call the runtime.
- __ bind(&fast_runtime_fallback);
- __ PopReturnAddressTo(scratch);
- __ Push(object);
- __ Push(function_prototype);
- __ PushReturnAddressFrom(scratch);
- // Invalidate the instanceof cache.
- __ Move(eax, Immediate(Smi::FromInt(0)));
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ PopReturnAddressTo(scratch);
- __ Push(object);
- __ Push(function);
- __ PushReturnAddressFrom(scratch);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -3534,8 +3400,8 @@
__ j(not_equal, &miss);
__ push(slot);
__ push(vector);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
+ Code::Flags code_flags =
+ Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
receiver, name, vector, scratch);
__ pop(vector);
@@ -3795,8 +3661,8 @@
__ pop(value);
__ push(slot);
__ push(vector);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
+ Code::Flags code_flags =
+ Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
receiver, key, slot, no_reg);
__ pop(vector);
@@ -4386,16 +4252,16 @@
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ mov(Operand(eax, JSObject::kMapOffset), ecx);
- __ mov(Operand(eax, JSObject::kPropertiesOffset),
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
masm->isolate()->factory()->empty_fixed_array());
- __ mov(Operand(eax, JSObject::kElementsOffset),
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
masm->isolate()->factory()->empty_fixed_array());
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ lea(ebx, Operand(eax, JSObject::kHeaderSize));
+ __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize));
// ----------- S t a t e -------------
- // -- eax : result (untagged)
+ // -- eax : result (tagged)
// -- ebx : result fields (untagged)
// -- edi : result end (untagged)
// -- ecx : initial map
@@ -4413,10 +4279,6 @@
// Initialize all in-object fields with undefined.
__ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
__ InitializeFieldsWithFiller(ebx, edi, edx);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ inc(eax);
__ Ret();
}
__ bind(&slack_tracking);
@@ -4439,10 +4301,6 @@
__ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(ebx, edx, edi);
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ inc(eax);
-
// Check if we can finalize the instance size.
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4473,10 +4331,10 @@
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(ecx);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ dec(eax);
__ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
__ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ dec(edi);
__ jmp(&done_allocate);
// Fall back to %NewObject.
@@ -4497,19 +4355,19 @@
// -----------------------------------
__ AssertFunction(edi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make edx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(edx, ebp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make edx point to the JavaScript frame.
+ __ mov(edx, ebp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4539,7 +4397,7 @@
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in rax.
@@ -4581,7 +4439,7 @@
Label allocate, done_allocate;
__ lea(ecx, Operand(eax, times_half_pointer_size,
JSArray::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in edx.
@@ -4643,35 +4501,50 @@
// -----------------------------------
__ AssertFunction(edi);
+ // Make ecx point to the JavaScript frame.
+ __ mov(ecx, ebp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ mov(ecx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(edi, Operand(ecx, StandardFrameConstants::kFunctionOffset));
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewSloppyArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ lea(edx, Operand(ebp, ecx, times_half_pointer_size,
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx,
+ FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lea(edx, Operand(ecx, ebx, times_half_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- // ecx : number of parameters (tagged)
+ // ebx : number of parameters (tagged)
// edx : parameters pointer
// edi : function
+ // ecx : JavaScript frame pointer.
// esp[0] : return address
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ mov(eax, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(eax, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// No adaptor, parameter count = argument count.
- __ mov(ebx, ecx);
- __ push(ecx);
+ __ mov(ecx, ebx);
+ __ push(ebx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ mov(ebx, ecx);
- __ push(ecx);
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ push(ebx);
+ __ mov(edx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(edx,
Operand(edx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
@@ -4705,7 +4578,7 @@
__ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
+ __ Allocate(ebx, eax, edi, no_reg, &runtime, NO_ALLOCATION_FLAGS);
// eax = address of new object(s) (tagged)
// ecx = argument count (smi-tagged)
@@ -4883,19 +4756,19 @@
// -----------------------------------
__ AssertFunction(edi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make edx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(edx, ebp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make edx point to the JavaScript frame.
+ __ mov(edx, ebp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewStrictArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -4934,7 +4807,7 @@
__ lea(ecx,
Operand(eax, times_half_pointer_size,
JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in edx.
@@ -5354,9 +5227,14 @@
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
__ pop(return_address);
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+
// context save.
__ push(context);
@@ -5401,7 +5279,7 @@
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
@@ -5412,8 +5290,6 @@
__ mov(ApiParameterOperand(3), scratch);
// FunctionCallbackInfo::length_.
__ Move(ApiParameterOperand(4), Immediate(argc()));
- // FunctionCallbackInfo::is_construct_call_.
- __ Move(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
__ lea(scratch, ApiParameterOperand(2));
@@ -5433,8 +5309,8 @@
}
Operand return_value_operand(ebp, return_value_offset * kPointerSize);
int stack_space = 0;
- Operand is_construct_call_operand = ApiParameterOperand(5);
- Operand* stack_space_operand = &is_construct_call_operand;
+ Operand length_operand = ApiParameterOperand(4);
+ Operand* stack_space_operand = &length_operand;
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -5445,14 +5321,34 @@
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- edx : api_function_address
- // -----------------------------------
- DCHECK(edx.is(ApiGetterDescriptor::function_address()));
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = ebx;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ __ pop(scratch); // Pop return address to extend the frame.
+ __ push(receiver);
+ __ push(FieldOperand(callback, AccessorInfo::kDataOffset));
+ __ PushRoot(Heap::kUndefinedValueRootIndex); // ReturnValue
+ // ReturnValue default value
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ push(Immediate(ExternalReference::isolate_address(isolate())));
+ __ push(holder);
+ __ push(Immediate(Smi::FromInt(0))); // should_throw_on_error -> false
+ __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch); // Restore return address.
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5462,9 +5358,6 @@
// active) in non-GCed stack space.
const int kApiArgc = 3 + 1;
- Register api_function_address = edx;
- Register scratch = ebx;
-
// Load address of v8::PropertyAccessorInfo::args_ array.
__ lea(scratch, Operand(esp, 2 * kPointerSize));
@@ -5474,25 +5367,30 @@
Operand info_object = ApiParameterOperand(3);
__ mov(info_object, scratch);
+ // Name as handle.
__ sub(scratch, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(0), scratch); // name.
+ __ mov(ApiParameterOperand(0), scratch);
+ // Arguments pointer.
__ lea(scratch, info_object);
- __ mov(ApiParameterOperand(1), scratch); // arguments pointer.
+ __ mov(ApiParameterOperand(1), scratch);
// Reserve space for optional callback address parameter.
Operand thunk_last_arg = ApiParameterOperand(2);
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ mov(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+ Register function_address = edx;
+ __ mov(function_address,
+ FieldOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- thunk_last_arg, kStackUnwindSpace, nullptr,
- return_value_operand, NULL);
+ CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
+ kStackUnwindSpace, nullptr, return_value_operand,
+ NULL);
}
-
#undef __
} // namespace internal
diff --git a/src/x87/codegen-x87.cc b/src/x87/codegen-x87.cc
index 776edeb..8112d11 100644
--- a/src/x87/codegen-x87.cc
+++ b/src/x87/codegen-x87.cc
@@ -275,8 +275,7 @@
// Allocate new FixedDoubleArray.
// edx: receiver
// edi: length of source FixedArray (smi-tagged)
- AllocationFlags flags =
- static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+ AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
__ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
@@ -391,7 +390,7 @@
// Allocate new FixedArray.
// ebx: length of source FixedDoubleArray (smi-tagged)
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
- __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+ __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
// eax: destination FixedArray
// ebx: number of elements
diff --git a/src/x87/disasm-x87.cc b/src/x87/disasm-x87.cc
index 91ce227..2a90df9 100644
--- a/src/x87/disasm-x87.cc
+++ b/src/x87/disasm-x87.cc
@@ -8,6 +8,7 @@
#if V8_TARGET_ARCH_X87
+#include "src/base/compiler-specific.h"
#include "src/disasm.h"
namespace disasm {
@@ -29,18 +30,19 @@
};
static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
- {0x13, "adc", REG_OPER_OP_ORDER}, {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER}, {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER}, {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER}, {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER}, {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x39, "cmp", OPER_REG_OP_ORDER}, {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER}, {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER}, {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER}, {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER}, {-1, "", UNSET_OP_ORDER}};
+ {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x13, "adc", REG_OPER_OP_ORDER}, {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER}, {0x23, "and", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER}, {0x2A, "subb", REG_OPER_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER}, {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER}, {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x39, "cmp", OPER_REG_OP_ORDER}, {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER}, {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER}, {0x86, "xchg_b", REG_OPER_OP_ORDER},
+ {0x87, "xchg", REG_OPER_OP_ORDER}, {0x8A, "mov_b", REG_OPER_OP_ORDER},
+ {0x8B, "mov", REG_OPER_OP_ORDER}, {0x8D, "lea", REG_OPER_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}};
static const ByteMnemonic zero_operands_instr[] = {
{0xC3, "ret", UNSET_OP_ORDER},
@@ -325,8 +327,7 @@
int FPUInstruction(byte* data);
int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
- void AppendToBuffer(const char* format, ...);
-
+ PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
void UnimplementedInstruction() {
if (abort_on_unimplemented_) {
@@ -948,7 +949,7 @@
const InstructionDesc& idesc = instruction_table_->Get(*data);
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
- AppendToBuffer(idesc.mnem);
+ AppendToBuffer("%s", idesc.mnem);
data++;
break;
@@ -1267,6 +1268,13 @@
} else if (*data == 0x8B) {
data++;
data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+ } else if (*data == 0x87) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("xchg_w ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
} else if (*data == 0x89) {
data++;
int mod, regop, rm;
diff --git a/src/x87/interface-descriptors-x87.cc b/src/x87/interface-descriptors-x87.cc
index e41d42c..260d871 100644
--- a/src/x87/interface-descriptors-x87.cc
+++ b/src/x87/interface-descriptors-x87.cc
@@ -51,16 +51,11 @@
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
-const Register InstanceOfDescriptor::LeftRegister() { return edx; }
-const Register InstanceOfDescriptor::RightRegister() { return eax; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return edx; }
const Register StringCompareDescriptor::RightRegister() { return eax; }
-
-const Register ApiGetterDescriptor::function_address() { return edx; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
+const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
const Register MathPowTaggedDescriptor::exponent() { return eax; }
@@ -73,6 +68,8 @@
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
+const Register HasPropertyDescriptor::ObjectRegister() { return eax; }
+const Register HasPropertyDescriptor::KeyRegister() { return ebx; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -250,13 +247,16 @@
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ // ebx -- allocation site with elements kind
+ Register registers[] = {edi, ebx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -320,6 +320,11 @@
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -380,8 +385,8 @@
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -416,6 +421,16 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // the value to pass to the generator
+ ebx, // the JSGeneratorObject to resume
+ edx // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index b46167d..3cee0ea 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -973,6 +973,17 @@
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+ Push(object);
+ CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAGeneratorObject);
+ }
+}
+
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -1458,6 +1469,7 @@
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1499,26 +1511,23 @@
// Calculate new top and bail out if space is exhausted.
Register top_reg = result_end.is_valid() ? result_end : result;
+
if (!top_reg.is(result)) {
mov(top_reg, result);
}
add(top_reg, Immediate(object_size));
- j(carry, gc_required);
cmp(top_reg, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch, flags);
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
+ }
- // Tag result if requested.
- bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if (tag_result) {
- sub(result, Immediate(object_size - kHeapObjectTag));
- } else {
- sub(result, Immediate(object_size));
- }
- } else if (tag_result) {
+ sub(result, Immediate(object_size - kHeapObjectTag));
+ } else {
+ // Tag the result.
DCHECK(kHeapObjectTag == 1);
inc(result);
}
@@ -1535,6 +1544,8 @@
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1592,16 +1603,14 @@
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- if ((flags & TAG_OBJECT) != 0) {
- DCHECK(kHeapObjectTag == 1);
- inc(result);
- }
+ // Tag result.
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
}
-
void MacroAssembler::Allocate(Register object_size,
Register result,
Register result_end,
@@ -1609,6 +1618,7 @@
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1652,20 +1662,66 @@
mov(result_end, object_size);
}
add(result_end, result);
- j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- // Tag result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- DCHECK(kHeapObjectTag == 1);
- inc(result);
- }
+ // Tag result.
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch, flags);
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(result_end, scratch, flags);
+ }
}
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ lea(result_end, Operand(result, object_size));
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ lea(result_end, Operand(result, object_size, times_1, 0));
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
+}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
@@ -1674,7 +1730,7 @@
MutableMode mode) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
Handle<Map> map = mode == MUTABLE
? isolate()->factory()->mutable_heap_number_map()
@@ -1700,15 +1756,9 @@
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
- Allocate(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
+ REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1734,15 +1784,9 @@
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(SeqOneByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
+ REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1762,7 +1806,7 @@
// Allocate one-byte string in new space.
Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
- gc_required, TAG_OBJECT);
+ gc_required, NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1780,7 +1824,7 @@
Label* gc_required) {
// Allocate heap number in new space.
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1792,12 +1836,8 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1811,7 +1851,7 @@
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1825,7 +1865,7 @@
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1841,7 +1881,8 @@
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch);
diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h
index 5571413..42b7eb1 100644
--- a/src/x87/macro-assembler-x87.h
+++ b/src/x87/macro-assembler-x87.h
@@ -19,10 +19,11 @@
const Register kReturnRegister2 = {Register::kCode_edi};
const Register kJSFunctionRegister = {Register::kCode_edi};
const Register kContextRegister = {Register::kCode_esi};
+const Register kAllocateSizeRegister = {Register::kCode_edx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_esi};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
@@ -499,6 +500,23 @@
j(not_zero, not_smi_label, distance);
}
+ // Jump if the value cannot be represented by a smi.
+ inline void JumpIfNotValidSmiValue(Register value, Register scratch,
+ Label* on_invalid,
+ Label::Distance distance = Label::kFar) {
+ mov(scratch, value);
+ add(scratch, Immediate(0x40000000U));
+ j(sign, on_invalid, distance);
+ }
+
+ // Jump if the unsigned integer value cannot be represented by a smi.
+ inline void JumpIfUIntNotValidSmiValue(
+ Register value, Label* on_invalid,
+ Label::Distance distance = Label::kFar) {
+ cmp(value, Immediate(0x40000000U));
+ j(above_equal, on_invalid, distance);
+ }
+
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
@@ -552,6 +570,10 @@
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -607,6 +629,14 @@
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register result_end,
+ AllocationFlags flags);
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ AllocationFlags flags);
+
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or