Merge V8 5.2.361.47 DO NOT MERGE
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
FPIIM-449
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 9693a52..76d0640 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -603,16 +603,9 @@
// a0: number of arguments
// a1: constructor function
// a3: new target
- if (is_api_function) {
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -825,6 +818,139 @@
Generate_JSEntryTrampolineHelper(masm, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- v0 : the value to pass to the generator
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a2 : the resume mode (tagged)
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(a1);
+
+ // Store input value into generator object.
+ __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
+ __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, v0, a3,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
+
+ // Load suspended function and context.
+ __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+ __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(masm->isolate());
+ __ li(t1, Operand(step_in_enabled));
+ __ lb(t1, MemOperand(t1));
+ __ Branch(&skip_flooding, eq, t1, Operand(zero_reg));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a2, t0);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(a1, a2);
+ __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ }
+ __ bind(&skip_flooding);
+
+ // Push receiver.
+ __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ Push(t1);
+
+ // ----------- S t a t e -------------
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a2 : the resume mode (tagged)
+ // -- t0 : generator function
+ // -- cp : generator context
+ // -- ra : return address
+ // -- sp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Subu(a3, a3, Operand(Smi::FromInt(1)));
+ __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(a3, a3, a3);
+ __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+
+ // New-style (ignition/turbofan) generator object.
+ {
+ __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(a0);
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(a3, a1);
+ __ Move(a1, t0);
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Jump(a2);
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(ra, fp);
+ __ Move(fp, sp);
+ __ Push(cp, t0);
+
+ // Restore the operand stack.
+ __ lw(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+ __ lw(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
+ __ Addu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Lsa(a3, a0, a3, kPointerSizeLog2 - 1);
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a0, Operand(a3));
+ __ lw(t1, MemOperand(a0));
+ __ Push(t1);
+ __ Branch(USE_DELAY_SLOT, &loop);
+ __ addiu(a0, a0, kPointerSize); // In delay slot.
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(t1, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+
+ // Resume the generator function at the continuation.
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+ __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(a2);
+ __ Addu(a3, a3, Operand(a2));
+ __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ Move(v0, a1); // Continuation expects generator object in v0.
+ __ Jump(a3);
+ }
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -842,14 +968,16 @@
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1);
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into kInterpreterBytecodeRegister.
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Label load_debug_bytecode_array, bytecode_array_loaded;
Register debug_info = kInterpreterBytecodeArrayRegister;
@@ -861,8 +989,11 @@
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
+ // Check function data field is actually a BytecodeArray object.
+ Label bytecode_array_not_present;
+ __ JumpIfRoot(kInterpreterBytecodeArrayRegister,
+ Heap::kUndefinedValueRootIndex, &bytecode_array_not_present);
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, t0);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
Operand(zero_reg));
@@ -871,8 +1002,13 @@
Operand(BYTECODE_ARRAY_TYPE));
}
- // Push new.target, bytecode array and zero for bytecode array offset.
- __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+ // Load initial bytecode offset.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push new.target, bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(t0, kInterpreterBytecodeOffsetRegister);
+ __ Push(a3, kInterpreterBytecodeArrayRegister, t0);
// Allocate the local and temporary register file on the stack.
{
@@ -902,17 +1038,8 @@
__ Branch(&loop_header, ge, t0, Operand(zero_reg));
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load bytecode offset and dispatch table into registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Addu(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ li(kInterpreterBytecodeOffsetRegister,
- Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ li(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
@@ -923,42 +1050,41 @@
__ lbu(a0, MemOperand(a0));
__ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ lw(at, MemOperand(at));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(at);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // The return value is in v0.
+
+ // Get the arguments + reciever count.
+ __ lw(t0, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ lw(t0, FieldMemOperand(t0, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+ // Drop receiver + arguments and return.
+ __ Addu(sp, sp, t0);
+ __ Jump(ra);
// Load debug copy of the bytecode array.
__ bind(&load_debug_bytecode_array);
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
__ Branch(&bytecode_array_loaded);
-}
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
-
- // The return value is in accumulator, which is already in v0.
-
- // Leave the frame (also dropping the register file).
+ // If the bytecode array is no longer present, then the underlying function
+ // has been switched to a different kind of code and we heal the closure by
+ // switching the code entry field over to the new code object as well.
+ __ bind(&bytecode_array_not_present);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
- // Drop receiver + arguments and return.
- __ lw(at, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ Addu(sp, sp, at);
- __ Jump(ra);
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kCodeOffset));
+ __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(a1, t0, t1);
+ __ Jump(t0);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -991,7 +1117,6 @@
RelocInfo::CODE_TARGET);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1022,25 +1147,24 @@
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+ __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ Addu(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ // Initialize the dispatch table register.
__ li(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- // Get the context from the frame.
- __ lw(kContextRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ lw(
- kInterpreterBytecodeArrayRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ lw(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1054,9 +1178,7 @@
// Get the target bytecode offset from the frame.
__ lw(kInterpreterBytecodeOffsetRegister,
- MemOperand(
- kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
@@ -1065,63 +1187,164 @@
__ lbu(a1, MemOperand(a1));
__ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ lw(a1, MemOperand(a1));
- __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register.
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register argument_count = a0;
+ Register closure = a1;
+ Register new_target = a3;
+ __ push(argument_count);
+ __ push(new_target);
+ __ push(closure);
+
+ Register map = a0;
+ Register index = a2;
+ __ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+
+ // Find literals.
+ // a3 : native context
+ // a2 : length / index
+ // a0 : optimized code map
+ // stack[0] : new target
+ // stack[4] : closure
+ Register native_context = a3;
+ __ lw(native_context, NativeContextMemOperand());
+
+ __ bind(&loop_top);
+ Register temp = a1;
+ Register array_pointer = t1;
+
+ // Does the native context match?
+ __ sll(at, index, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(array_pointer, map, Operand(at));
+ __ lw(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ Branch(&loop_bottom, ne, temp, Operand(native_context));
+ // OSR id set to none?
+ __ lw(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
+ // Literals available?
+ __ lw(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ lw(t0, MemOperand(sp, 0));
+ __ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
+ __ push(index);
+ __ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index,
+ kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(index);
+
+ // Code available?
+ Register entry = t0;
+ __ lw(entry,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ pop(closure);
+ // Store code entry in the closure.
+ __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, t1);
+
+ // Link the closure into the optimized function list.
+ // t0 : code entry
+ // a3 : native context
+ // a1 : closure
+ __ lw(t1,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0,
+ kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ sw(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(t1, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, t1);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ Jump(entry);
+
+ __ bind(&loop_bottom);
+ __ Subu(index, index,
+ Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+ __ pop(closure);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ __ pop(new_target);
+ __ pop(argument_count);
+ // Is the full code valid?
+ __ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
+ __ And(t1, t1, Operand(Code::KindField::kMask));
+ __ srl(t1, t1, Code::KindField::kShift);
+ __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN));
+ // Yes, install the full code.
+ __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, t1);
+ __ Jump(entry);
+
+ __ bind(&gotta_call_runtime);
+ __ pop(closure);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
@@ -1264,15 +1487,17 @@
__ SmiUntag(t2);
// Switch on the state.
Label with_tos_register, unknown_state;
- __ Branch(&with_tos_register,
- ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ Branch(&with_tos_register, ne, t2,
+ Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
__ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
__ bind(&with_tos_register);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
__ lw(v0, MemOperand(sp, 1 * kPointerSize));
- __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+ __ Branch(&unknown_state, ne, t2,
+ Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
@@ -1478,28 +1703,6 @@
}
// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argc
- // -- sp[0] : first argument (left-hand side)
- // -- sp[4] : receiver (right-hand side)
- // -----------------------------------
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ lw(InstanceOfDescriptor::LeftRegister(),
- MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
- __ lw(InstanceOfDescriptor::RightRegister(),
- MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
- }
-
- // Pop the argument and the receiver.
- __ DropAndRet(2);
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
@@ -2469,6 +2672,30 @@
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : requested object size (untagged)
+ // -- ra : return address
+ // -----------------------------------
+ __ SmiTag(a0);
+ __ Push(a0);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : requested object size (untagged)
+ // -- ra : return address
+ // -----------------------------------
+ __ SmiTag(a0);
+ __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ Push(a0, a1);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.