Upgrade to 3.29
Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.
Bug: 17370214
Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 4e037ff..194d8a6 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1,37 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
+#include "src/code-factory.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
namespace v8 {
namespace internal {
@@ -44,56 +22,110 @@
CFunctionId id,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
- // -- rax : number of arguments excluding receiver
- // -- rdi : called function (only guaranteed when
- // extra_args requires it)
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
+ // -- rax : number of arguments excluding receiver
+ // -- rdi : called function (only guaranteed when
+ // extra_args requires it)
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
// -- ...
- // -- rsp[8 * argc] : first argument (argc == rax)
- // -- rsp[8 * (argc +1)] : receiver
+ // -- rsp[8 * argc] : first argument (argc == rax)
+ // -- rsp[8 * (argc + 1)] : receiver
// -----------------------------------
// Insert extra arguments.
int num_extra_args = 0;
if (extra_args == NEEDS_CALLED_FUNCTION) {
num_extra_args = 1;
- __ pop(kScratchRegister); // Save return address.
- __ push(rdi);
- __ push(kScratchRegister); // Restore return address.
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(rdi);
+ __ PushReturnAddressFrom(kScratchRegister);
} else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
- __ addq(rax, Immediate(num_extra_args + 1));
+ __ addp(rax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ Push(rdi);
+ // Function is also the parameter to the runtime call.
+ __ Push(rdi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore receiver.
+ __ Pop(rdi);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ movp(kScratchRegister,
+ FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(kScratchRegister,
+ FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
+ __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ __ jmp(kScratchRegister);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ leap(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool create_memento) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
+ // -- rbx: allocation site or undefined
// -----------------------------------
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ DCHECK(!is_api_function || !create_memento);
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(rbx);
+ __ Push(rbx);
+ }
+
// Store a smi-tagged arguments count on the stack.
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
// Push the function to invoke on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
@@ -101,20 +133,18 @@
if (FLAG_inline_new) {
Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ movq(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ Move(kScratchRegister, debug_step_in_fp);
+ __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
-#endif
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
// rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
- ASSERT(kSmiTag == 0);
+ DCHECK(kSmiTag == 0);
__ JumpIfSmi(rax, &rt_call);
// rdi: constructor
// rax: initial map (if proven valid below)
@@ -129,66 +159,103 @@
__ CmpInstanceType(rax, JS_FUNCTION_TYPE);
__ j(equal, &rt_call);
- if (count_constructions) {
+ if (!is_api_function) {
Label allocate;
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCount::kShift +
+ Map::ConstructionCount::kSize == 32);
+ // Check if slack tracking is enabled.
+ __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
+ __ shrl(rsi, Immediate(Map::ConstructionCount::kShift));
+ __ j(zero, &allocate); // JSFunction::kNoSlackTracking
// Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ decb(FieldOperand(rcx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
+ __ subl(FieldOperand(rax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCount::kShift));
- __ push(rax);
- __ push(rdi);
+ __ cmpl(rsi, Immediate(JSFunction::kFinishSlackTracking));
+ __ j(not_equal, &allocate);
- __ push(rdi); // constructor
- // The call will replace the stub, so the countdown is only done once.
+ __ Push(rax);
+ __ Push(rdi);
+
+ __ Push(rdi); // constructor
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ pop(rdi);
- __ pop(rax);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ xorl(rsi, rsi); // JSFunction::kNoSlackTracking
__ bind(&allocate);
}
// Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
+ __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shlp(rdi, Immediate(kPointerSizeLog2));
+ if (create_memento) {
+ __ addp(rdi, Immediate(AllocationMemento::kSize));
+ }
// rdi: size of new object
- __ AllocateInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
+ __ Allocate(rdi,
+ rbx,
+ rdi,
+ no_reg,
+ &rt_call,
+ NO_ALLOCATION_FLAGS);
+ Factory* factory = masm->isolate()->factory();
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ // rdi: start of next object (including memento if create_memento)
+ __ movp(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
- // rdi: start of next object
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ // rdi: start of next object (including memento if create_memento)
+ // rsi: slack tracking counter (non-API function case)
+ __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ movzxbq(rsi,
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ cmpl(rsi, Immediate(JSFunction::kNoSlackTracking));
+ __ j(equal, &no_inobject_slack_tracking);
+
+ // Allocate object with a slack.
+ __ movzxbp(rsi,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(rsi,
+ __ leap(rsi,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
+ __ cmpp(rsi, rdi);
__ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+ if (create_memento) {
+ __ leap(rsi, Operand(rdi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+
+ // Fill in memento fields if necessary.
+ // rsi: points to the allocated but uninitialized memento.
+ __ Move(Operand(rsi, AllocationMemento::kMapOffset),
+ factory->allocation_memento_map());
+ // Get the cell or undefined.
+ __ movp(rdx, Operand(rsp, kPointerSize*2));
+ __ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset), rdx);
+ } else {
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+ }
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -197,7 +264,7 @@
// rax: initial map
// rbx: JSObject
// rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
+ __ orp(rbx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -205,30 +272,30 @@
// rbx: JSObject
// rdi: start of next object
// Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx,
+ __ movzxbp(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbp(rcx,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
// Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
+ __ movzxbp(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ __ subp(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
+ __ Assert(positive, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
// rbx: JSObject
// rdi: start of next object (will be start of FixedArray)
// rdx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ Allocate(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// rbx: JSObject
@@ -236,9 +303,9 @@
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ movp(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
__ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+ __ movp(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject
@@ -247,13 +314,13 @@
// rdx: number of elements
{ Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
+ __ movp(Operand(rcx, 0), rdx);
+ __ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(below, &loop);
}
@@ -261,8 +328,8 @@
// the JSObject
// rbx: JSObject
// rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+ __ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
// Continue with JSObject being successfully allocated
@@ -280,62 +347,93 @@
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
- // Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ movp(rdi, Operand(rsp, kPointerSize*2));
+ __ Push(rdi);
+ offset = kPointerSize;
+ }
+
+ // Must restore rsi (context) and rdi (constructor) before calling runtime.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rdi, Operand(rsp, offset));
+ __ Push(rdi);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 1);
+ }
+ __ movp(rbx, rax); // store result in rbx
+
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
// New object allocated.
// rbx: newly allocated object
__ bind(&allocated);
+
+ if (create_memento) {
+ __ movp(rcx, Operand(rsp, kPointerSize*2));
+ __ Cmp(rcx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // rcx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ SmiAddConstant(
+ FieldOperand(rcx, AllocationSite::kPretenureCreateCountOffset),
+ Smi::FromInt(1));
+ __ bind(&count_incremented);
+ }
+
// Retrieve the function from the stack.
- __ pop(rdi);
+ __ Pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
__ SmiToInteger32(rax, rax);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
+ __ Push(rbx);
+ __ Push(rbx);
// Set up pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
- __ decq(rcx);
+ __ decp(rcx);
__ j(greater_equal, &loop);
// Call the function.
if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
@@ -353,33 +451,28 @@
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
// Restore the arguments count and leave the construct frame.
__ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ PushReturnAddressFrom(rcx);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1);
__ ret(0);
}
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
@@ -390,6 +483,8 @@
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Expects five C++ function parameters.
// - Address entry (ignored)
// - JSFunction* function (
@@ -407,10 +502,10 @@
#ifdef _WIN64
// MSVC parameters in:
- // rcx : entry (ignored)
- // rdx : function
- // r8 : receiver
- // r9 : argc
+ // rcx : entry (ignored)
+ // rdx : function
+ // r8 : receiver
+ // r9 : argc
// [rsp+0x20] : argv
// Clear the context before we push it when entering the internal frame.
@@ -419,19 +514,19 @@
FrameScope scope(masm, StackFrame::INTERNAL);
// Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
+ __ Push(rdx);
+ __ Push(r8);
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
+ __ movp(rax, r9);
// Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ __ movp(kScratchRegister, Operand(rbp, 0));
+ __ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
- __ movq(rdi, rdx);
+ __ movp(rdi, rdx);
#else // _WIN64
// GCC parameters in:
// rdi : entry (ignored)
@@ -440,7 +535,7 @@
// rcx : argc
// r8 : argv
- __ movq(rdi, rsi);
+ __ movp(rdi, rsi);
// rdi : function
// Clear the context before we push it when entering the internal frame.
@@ -449,19 +544,19 @@
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ Push(rdi);
+ __ Push(rdx);
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
+ __ movp(rax, rcx);
+ __ movp(rbx, r8);
#endif // _WIN64
// Current stack contents:
- // [rsp + 2 * kPointerSize ... ]: Internal frame
- // [rsp + kPointerSize] : function
- // [rsp] : receiver
+ // [rsp + 2 * kPointerSize ... ] : Internal frame
+ // [rsp + kPointerSize] : function
+ // [rsp] : receiver
// Current register contents:
// rax : argc
// rbx : argv
@@ -475,23 +570,24 @@
__ Set(rcx, 0); // Set loop variable to 0.
__ jmp(&entry);
__ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
+ __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addp(rcx, Immediate(1));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
if (is_construct) {
+ // No type feedback cell is available
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
// Expects rdi to hold function pointer.
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub);
} else {
ParameterCount actual(rax);
// Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
}
// Exit the internal frame. Notice that this also removes the empty
// context and the function left on the stack by the code
@@ -513,57 +609,140 @@
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+static void CallCompileOptimized(MacroAssembler* masm,
+ bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ Push(rdi);
+ // Function is also the parameter to the runtime call.
+ __ Push(rdi);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+ // Restore receiver.
+ __ Pop(rdi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Re-execute the code that was patched back to the young age when
+ // the stub returns.
+ __ subp(Operand(rsp, 0), Immediate(5));
+ __ Pushad();
+ __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ }
+ __ Popad();
+ __ ret(0);
+}
+
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ Pushad();
+ __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ }
+ __ Popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ PopReturnAddressTo(kScratchRegister);
+ __ pushq(rbp); // Caller's frame pointer.
+ __ movp(rbp, rsp);
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
+ __ PushReturnAddressFrom(kScratchRegister);
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
-
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ Pushad();
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ Popad();
// Tear down internal frame.
}
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+ __ DropUnderReturnAddress(1); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
@@ -581,76 +760,67 @@
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(kScratchRegister, Operand(rsp, kPCOnStackSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, ¬_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(¬_no_registers);
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+ __ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, ¬_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
__ bind(¬_tos_rax);
- __ Abort("no cases left");
+ __ Abort(kNoCasesLeft);
}
+
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ Pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ Popad();
- __ ret(0);
-}
-
-
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Stack Layout:
- // rsp[0]: Return address
- // rsp[1]: Argument n
- // rsp[2]: Argument n-1
+ // rsp[0] : Return address
+ // rsp[8] : Argument n
+ // rsp[16] : Argument n-1
// ...
- // rsp[n]: Argument 1
- // rsp[n+1]: Receiver (function to call)
+ // rsp[8 * n] : Argument 1
+ // rsp[8 * (n + 1)] : Receiver (function to call)
//
// rax contains the number of arguments, n, not counting the receiver.
//
// 1. Make sure we have at least one argument.
{ Label done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &done);
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
- __ push(rbx);
- __ incq(rax);
+ __ PushReturnAddressFrom(rbx);
+ __ incp(rax);
__ bind(&done);
}
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
Label slow, non_function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, rax);
+ __ movp(rdi, args.GetReceiverOperand());
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -658,12 +828,12 @@
// 3a. Patch the first argument if necessary when calling a function.
Label shift_arguments;
__ Set(rdx, 0); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_receiver, patch_receiver;
+ { Label convert_to_object, use_global_proxy, patch_receiver;
// Change context eagerly in case we need the global receiver.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &shift_arguments);
@@ -674,14 +844,14 @@
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_zero, &shift_arguments);
- // Compute the receiver in non-strict mode.
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+ // Compute the receiver in sloppy mode.
+ __ movp(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
+ __ j(equal, &use_global_proxy);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
+ __ j(equal, &use_global_proxy);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
@@ -692,33 +862,28 @@
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
- __ pop(rax);
+ __ Pop(rax);
__ SmiToInteger32(rax, rax);
}
// Restore the function to rdi.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ movp(rdi, args.GetReceiverOperand());
__ jmp(&patch_receiver, Label::kNear);
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ bind(&use_global_proxy);
+ __ movp(rbx,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalProxyOffset));
__ bind(&patch_receiver);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+ __ movp(args.GetArgumentOperand(1), rbx);
__ jmp(&shift_arguments);
}
@@ -735,37 +900,37 @@
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
+ __ movp(args.GetArgumentOperand(1), rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
__ bind(&shift_arguments);
{ Label loop;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
+ StackArgumentsAccessor args(rsp, rcx);
__ bind(&loop);
- __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ movp(args.GetArgumentOperand(0), rbx);
+ __ decp(rcx);
+ __ j(not_zero, &loop); // While non-zero.
+ __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
+ __ decp(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ testq(rdx, rdx);
+ __ testp(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rdx, Immediate(1));
+ __ cmpp(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
- __ pop(rdx); // return address
- __ push(rdi); // re-add proxy object as additional argument
- __ push(rdx);
- __ incq(rax);
+ __ PopReturnAddressTo(rdx);
+ __ Push(rdi); // re-add proxy object as additional argument
+ __ PushReturnAddressFrom(rdx);
+ __ incp(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -780,43 +945,40 @@
// 5b. Get the code to call from the function and check that the number of
// expected arguments matches what we're providing. If so, jump
// (tail-call) to the code in register edx without checking arguments.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rbx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rax, rbx);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(rbx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ cmpp(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Stack at entry:
- // rsp: return address
- // rsp+8: arguments
- // rsp+16: receiver ("this")
- // rsp+24: function
+ // rsp : return address
+ // rsp[8] : arguments
+ // rsp[16] : receiver ("this")
+ // rsp[24] : function
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
+ // rbp : Old base pointer
+ // rbp[8] : return address
+ // rbp[16] : function arguments
+ // rbp[24] : receiver
+ // rbp[32] : function
+ static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying to catch
@@ -824,21 +986,21 @@
// limit" is checked.
Label okay;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
+ __ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
+ __ subp(rcx, kScratchRegister);
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
+ __ cmpp(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(rax);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
// End of stack check.
@@ -846,24 +1008,24 @@
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
+ __ Push(rax); // limit
+ __ Push(Immediate(0)); // index
// Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
+ __ movp(rbx, Operand(rbp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &push_receiver);
// Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ Label call_to_object, use_global_proxy;
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &push_receiver);
@@ -873,12 +1035,12 @@
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(rbx, &call_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
+ __ j(equal, &use_global_proxy);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
+ __ j(equal, &use_global_proxy);
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
@@ -888,34 +1050,34 @@
// Convert the receiver to an object.
__ bind(&call_to_object);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&push_receiver, Label::kNear);
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ bind(&use_global_proxy);
+ __ movp(rbx,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalProxyOffset));
// Push the receiver.
__ bind(&push_receiver);
- __ push(rbx);
+ __ Push(rbx);
// Copy all arguments from the array to the stack.
Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ __ movp(key, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
+ __ movp(receiver, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ if (FLAG_vector_ics) {
+ __ Move(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(0));
+ }
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
__ Call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
@@ -923,36 +1085,34 @@
// case, we know that we are not generating a test instruction next.
// Push the nth argument.
- __ push(rax);
+ __ Push(rax);
- // Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
+ // Update the index on the stack and in register key.
+ __ movp(key, Operand(rbp, kIndexOffset));
+ __ SmiAddConstant(key, key, Smi::FromInt(1));
+ __ movp(Operand(rbp, kIndexOffset), key);
__ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ cmpp(key, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(rax);
- __ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ SmiToInteger32(rax, key);
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
+ __ Push(rdi); // add function proxy as last argument
+ __ incp(rax);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -963,374 +1123,9 @@
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
-
- __ LoadInitialArrayMap(array_function, scratch2, scratch1);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
- __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ Move(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- factory->fixed_array_map());
- __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Smi::FromInt(initial_capacity));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- for (int i = 0; i < initial_capacity; i++) {
- __ movq(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ movq(scratch2, Immediate(initial_capacity));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(FieldOperand(scratch1,
- scratch2,
- times_pointer_size,
- FixedArray::kHeaderSize),
- scratch3);
- __ bind(&entry);
- __ decq(scratch2);
- __ j(not_sign, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- __ LoadInitialArrayMap(array_function, scratch, elements_array);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ testq(array_size, array_size);
- __ Assert(not_zero, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- SmiIndex index =
- masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- index.scale,
- index.reg,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, factory->empty_fixed_array());
- __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ lea(elements_array, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(elements_array, 0), scratch);
- __ addq(elements_array, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(elements_array, elements_array_end);
- __ j(below, &loop);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// rdi: constructor (built-in Array function)
-// rax: argc
-// rsp[0]: return address
-// rsp[8]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in rdi needs to be preserved for
-// entering the generic code. In both cases argc in rax needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// a construct call and a normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments.
- __ testq(rax, rax);
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- rdi,
- rbx,
- rcx,
- rdx,
- r8,
- call_generic_code);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmpq(rax, Immediate(1));
- __ j(not_equal, &argc_two_or_more);
- __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
-
- __ SmiTest(rdx);
- __ j(not_zero, ¬_empty_array);
- __ pop(r8); // Adjust stack.
- __ Drop(1);
- __ push(r8);
- __ movq(rax, Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(¬_empty_array);
- __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is to large to actually allocate an elements array.
- __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
- __ j(greater_equal, call_generic_code);
-
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0]: return address
- // esp[8]: argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ movq(rdx, rax);
- __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0] : return address
- // esp[8] : last argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
-
- // rax: argc
- // rbx: JSArray
- // rcx: elements_array
- // r8: elements_array_end (untagged)
- // esp[0]: return address
- // esp[8]: last argument
-
- // Location of the last argument
- __ lea(r9, Operand(rsp, kPointerSize));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in rcx).
- __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // rax: argc
- // rbx: JSArray
- // rdx: location of the first array element
- // r9: location of the last argument
- // esp[0]: return address
- // esp[8]: last argument
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r8, &has_non_smi_element);
- }
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // rax: argc
- // rbx: JSArray
- // esp[0]: return address
- // esp[8]: last argument
- __ bind(&finish);
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(r8,
- masm->isolate()->factory()->heap_number_map(),
- ¬_double,
- DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(rbx);
- __ jmp(call_generic_code);
-
- __ bind(¬_double);
- // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
- // rbx: JSArray
- __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- r11,
- kScratchRegister,
- &cant_transition_map);
-
- __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
- __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Finish the array initialization loop.
- Label loop2;
- __ bind(&loop2);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
- __ j(greater_equal, &loop2);
- __ jmp(&finish);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : argc
+ // -- rax : argc
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
@@ -1341,31 +1136,26 @@
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for InternalArray function");
+ __ Check(not_smi, kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for InternalArray function");
+ __ Check(equal, kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : argc
+ // -- rax : argc
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
@@ -1376,58 +1166,20 @@
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
+ __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
+ __ Check(equal, kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -1444,31 +1196,29 @@
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
- __ Assert(equal, "Unexpected String function");
+ __ cmpp(rdi, rcx);
+ __ Assert(equal, kUnexpectedStringFunction);
}
// Load the first argument into rax and get rid of the rest
// (including the receiver).
+ StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &no_arguments);
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- rax, // Input.
- rbx, // Result.
- rcx, // Scratch 1.
- rdx, // Scratch 2.
- false, // Input is known to be smi?
- ¬_cached);
+ __ LookupNumberStringCache(rax, // Input.
+ rbx, // Result.
+ rcx, // Scratch 1.
+ rdx, // Scratch 2.
+ ¬_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
@@ -1480,31 +1230,31 @@
// Allocate a JSValue and put the tagged pointer into rax.
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- rax, // Result.
- rcx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ rax, // Result.
+ rcx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
// Set the map.
__ LoadGlobalFunctionInitialMap(rdi, rcx);
if (FLAG_debug_code) {
__ cmpb(FieldOperand(rcx, Map::kInstanceSizeOffset),
Immediate(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected string wrapper instance size");
+ __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
__ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
- __ Assert(equal, "Unexpected unused properties of string wrapper");
+ __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
}
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
// Set properties and elements.
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set the value.
- __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
+ __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
// Ensure the object is fully initialized.
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -1520,7 +1270,7 @@
__ JumpIfSmi(rax, &convert_argument);
Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
__ j(NegateCondition(is_string), &convert_argument);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ IncrementCounter(counters->string_ctor_string_value(), 1);
__ jmp(&argument_is_string);
@@ -1529,21 +1279,21 @@
__ IncrementCounter(counters->string_ctor_conversions(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdi); // Preserve the function.
- __ push(rax);
+ __ Push(rdi); // Preserve the function.
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(rdi);
+ __ Pop(rdi);
}
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&argument_is_string);
// Load the empty string into rbx, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ LoadRoot(rbx, Heap::kEmptyStringRootIndex);
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, kPointerSize));
- __ push(rcx);
+ __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
__ jmp(&argument_is_string);
// At this point the argument is already a string. Call runtime to
@@ -1552,44 +1302,70 @@
__ IncrementCounter(counters->string_ctor_gc_required(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rbx);
+ __ Push(rbx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
__ ret(0);
}
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- rax : actual number of arguments
+ // -- rbx : expected number of arguments
+ // -- rdi: function (passed through to callee)
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(rdx, Heap::kRealStackLimitRootIndex);
+ __ movp(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subp(rcx, rdx);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ movp(rdx, rbx);
+ __ shlp(rdx, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(rcx, rdx);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
- __ movq(rbp, rsp);
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
// Push the function on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Preserve the number of arguments on the stack. Must preserve rax,
// rbx and rcx because these registers are used when copying the
// arguments and the receiver.
__ Integer32ToSmi(r8, rax);
- __ push(r8);
+ __ Push(r8);
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
// Leave the frame.
- __ movq(rsp, rbp);
- __ pop(rbp);
+ __ movp(rsp, rbp);
+ __ popq(rbp);
// Remove caller arguments from the stack.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ PushReturnAddressFrom(rcx);
}
@@ -1597,18 +1373,21 @@
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
- // -- rcx : call kind information
- // -- rdx : code entry to call
+ // -- rdi: function (passed through to callee)
// -----------------------------------
Label invoke, dont_adapt_arguments;
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->arguments_adaptors(), 1);
+ Label stack_overflow;
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
Label enough, too_few;
- __ cmpq(rax, rbx);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ cmpp(rax, rbx);
__ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
@@ -1617,15 +1396,15 @@
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(©);
- __ incq(r8);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(Operand(rax, 0));
+ __ subp(rax, Immediate(kPointerSize));
+ __ cmpp(r8, rbx);
__ j(less, ©);
__ jmp(&invoke);
}
@@ -1636,28 +1415,28 @@
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(©);
- __ incq(r8);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(r8, rax);
+ __ incp(r8);
+ __ Push(Operand(rdi, 0));
+ __ subp(rdi, Immediate(kPointerSize));
+ __ cmpp(r8, rax);
__ j(less, ©);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
- __ incq(r8);
- __ push(kScratchRegister);
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(kScratchRegister);
+ __ cmpp(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// Call the entry point.
@@ -1676,65 +1455,67 @@
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ jmp(rdx);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ int3();
+ }
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(rax, depth) instruction right after the call.
- Label stack_check;
- __ movq(rbx, Operand(rsp, 0)); // return address
- __ movzxbq(rbx, Operand(rbx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
+ // Lookup the function in the JavaScript frame.
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
+ // Pass function as argument.
+ __ Push(rax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
Label skip;
- __ SmiCompare(rax, Smi::FromInt(-1));
+ // If the code object is null, just return to the unoptimized code.
+ __ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
- // If we decide not to perform on-stack replacement we perform a
- // stack guard check to enable interrupts.
- __ bind(&stack_check);
+ __ bind(&skip);
+
+ // Load deoptimization data from the code object.
+ __ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ movq(StackOperandForReturnAddress(0), rax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
-
- StackCheckStub stub;
- __ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
+ __ j(above_equal, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
}
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
__ bind(&ok);
__ ret(0);
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiToInteger32(rax, rax);
- __ push(rax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
}