Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index e773b53..a1e9207 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/bootstrapper.h"
@@ -11,11 +9,15 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
+#include "src/arm64/code-stubs-arm64.h"
+#include "src/arm64/frames-arm64.h"
+
namespace v8 {
namespace internal {
@@ -35,7 +37,7 @@
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -69,7 +71,7 @@
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -101,17 +103,17 @@
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) ||
- x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) {
- queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
+ queue.Queue(descriptor.GetRegisterParameter(i));
}
queue.PushQueued();
@@ -202,13 +204,11 @@
// See call site for description.
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch,
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
+ Register right, Register scratch,
FPRegister double_scratch,
- Label* slow,
- Condition cond) {
+ Label* slow, Condition cond,
+ Strength strength) {
DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
@@ -220,18 +220,46 @@
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
+ Register right_type = scratch;
if ((cond == lt) || (cond == gt)) {
- __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
- ge);
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ JumpIfObjectType(right, right_type, right_type, FIRST_JS_RECEIVER_TYPE,
+ slow, ge);
+ // Call runtime on identical symbols since we need to throw a TypeError.
+ __ Cmp(right_type, SYMBOL_TYPE);
+ __ B(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Cmp(right_type, SIMD128_VALUE_TYPE);
+ __ B(eq, slow);
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics, since
+ // we need to throw a TypeError. Smis have already been ruled out.
+ __ Cmp(right_type, Operand(HEAP_NUMBER_TYPE));
+ __ B(eq, &return_equal);
+ __ Tst(right_type, Operand(kIsNotStringMask));
+ __ B(ne, slow);
+ }
} else if (cond == eq) {
__ JumpIfHeapNumber(right, &heap_number);
} else {
- Register right_type = scratch;
__ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
&heap_number);
// Comparing JS objects with <=, >= is complicated.
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
__ B(ge, slow);
+ // Call runtime on identical symbols since we need to throw a TypeError.
+ __ Cmp(right_type, SYMBOL_TYPE);
+ __ B(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Cmp(right_type, SIMD128_VALUE_TYPE);
+ __ B(eq, slow);
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics,
+ // since we need to throw a TypeError. Smis and heap numbers have
+ // already been ruled out.
+ __ Tst(right_type, Operand(kIsNotStringMask));
+ __ B(ne, slow);
+ }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -308,10 +336,10 @@
// If either operand is a JS object or an oddball value, then they are not
// equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label right_non_object;
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
__ B(lt, &right_non_object);
// Return non-zero - x0 already contains a non-zero pointer.
@@ -328,9 +356,9 @@
// If right is not ODDBALL, test left. Otherwise, set eq condition.
__ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
- // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
+ // If right or left is not ODDBALL, test left >= FIRST_JS_RECEIVER_TYPE.
// Otherwise, right or left is ODDBALL, so set a ge condition.
- __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
+ __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NVFlag, ne);
__ B(ge, &return_not_equal);
@@ -443,11 +471,11 @@
__ Bind(&object_test);
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
- // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
- // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
- __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
+ // If right >= FIRST_JS_RECEIVER_TYPE, test left.
+ // Otherwise, right < FIRST_JS_RECEIVER_TYPE, so set lt condition.
+ __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NFlag, ge);
__ B(lt, not_both_strings);
@@ -508,7 +536,8 @@
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
+ EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond,
+ strength());
// If either is a smi (we know that at least one is not a smi), then they can
// only be strictly equal if the other is a HeapNumber.
@@ -623,11 +652,9 @@
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
if (cond == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
- native = Builtins::COMPARE;
int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) {
ncr = GREATER;
@@ -637,11 +664,12 @@
}
__ Mov(x10, Smi::FromInt(ncr));
__ Push(x10);
- }
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
+ }
__ Bind(&miss);
GenerateMiss(masm);
@@ -936,7 +964,7 @@
__ Bind(&call_runtime);
// Put the arguments back on the stack.
__ Push(base_tagged, exponent_tagged);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// Return.
__ Bind(&done);
@@ -973,10 +1001,13 @@
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+ StoreFastElementStub::GenerateAheadOfTime(isolate);
+ TypeofStub::GenerateAheadOfTime(isolate);
}
@@ -1034,6 +1065,8 @@
// Register parameters:
// x0: argc (including receiver, untagged)
// x1: target
+ // If argv_in_register():
+ // x11: argv (pointer to first argument)
//
// The stack on entry holds the arguments and the receiver, with the receiver
// at the highest address:
@@ -1065,9 +1098,11 @@
// (arg[argc-2]), or just below the receiver in case there are no arguments.
// - Adjust for the arg[] array.
Register temp_argv = x11;
- __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
- // - Adjust for the receiver.
- __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+ if (!argv_in_register()) {
+ __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ // - Adjust for the receiver.
+ __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+ }
// Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
// registers.
@@ -1171,12 +1206,10 @@
__ LeaveExitFrame(save_doubles(), x10, true);
DCHECK(jssp.Is(__ StackPointer()));
- // Pop or drop the remaining stack slots and return from the stub.
- // jssp[24]: Arguments array (of size argc), including receiver.
- // jssp[16]: Preserved x23 (used for target).
- // jssp[8]: Preserved x22 (used for argc).
- // jssp[0]: Preserved x21 (used for argv).
- __ Drop(x11);
+ if (!argv_in_register()) {
+ // Drop the remaining stack slots and return from the stub.
+ __ Drop(x11);
+ }
__ AssertFPCRState();
__ Ret();
@@ -1187,28 +1220,29 @@
// Handling of exception.
__ Bind(&exception_returned);
- // Retrieve the pending exception.
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
- const Register& exception = result;
- const Register& exception_address = x11;
- __ Mov(exception_address, Operand(pending_exception_address));
- __ Ldr(exception, MemOperand(exception_address));
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
- // Clear the pending exception.
- __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
- __ Str(x10, MemOperand(exception_address));
-
- // x0 exception The exception descriptor.
- // x21 argv
- // x22 argc
- // x23 target
-
- // Special handling of termination exceptions, which are uncatchable by
- // JavaScript code.
- Label throw_termination_exception;
- __ Cmp(exception, Operand(isolate()->factory()->termination_exception()));
- __ B(eq, &throw_termination_exception);
+ // Ask the runtime for help to determine the handler. This will set x0 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
+ isolate());
+ DCHECK(csp.Is(masm->StackPointer()));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Mov(x0, 0); // argc.
+ __ Mov(x1, 0); // argv.
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
+ __ CallCFunction(find_handler, 3);
+ }
// We didn't execute a return case, so the stack frame hasn't been updated
// (except for the return address slot). However, we don't need to initialize
@@ -1216,18 +1250,29 @@
// unwinds the stack.
__ SetStackPointer(jssp);
- ASM_LOCATION("Throw normal");
- __ Mov(argv, 0);
- __ Mov(argc, 0);
- __ Mov(target, 0);
- __ Throw(x0, x10, x11, x12, x13);
+ // Retrieve the handler context, SP and FP.
+ __ Mov(cp, Operand(pending_handler_context_address));
+ __ Ldr(cp, MemOperand(cp));
+ __ Mov(jssp, Operand(pending_handler_sp_address));
+ __ Ldr(jssp, MemOperand(jssp));
+ __ Mov(fp, Operand(pending_handler_fp_address));
+ __ Ldr(fp, MemOperand(fp));
- __ Bind(&throw_termination_exception);
- ASM_LOCATION("Throw termination");
- __ Mov(argv, 0);
- __ Mov(argc, 0);
- __ Mov(target, 0);
- __ ThrowUncatchable(x0, x10, x11, x12, x13);
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label not_js_frame;
+ __ Cbz(cp, ¬_js_frame);
+ __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Bind(¬_js_frame);
+
+ // Compute the handler entry address and jump to it.
+ __ Mov(x10, Operand(pending_handler_code_address));
+ __ Ldr(x10, MemOperand(x10));
+ __ Mov(x11, Operand(pending_handler_offset_address));
+ __ Ldr(x11, MemOperand(x11));
+ __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, x11);
+ __ Br(x10);
}
@@ -1332,10 +1377,9 @@
__ LoadRoot(x0, Heap::kExceptionRootIndex);
__ B(&exit);
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ Bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ __ PushStackHandler();
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the B(&invoke) above, which
// restores all callee-saved registers (including cp and fp) to their
@@ -1369,7 +1413,7 @@
__ Blr(x12);
// Unlink this frame from the handler chain.
- __ PopTryHandler();
+ __ PopStackHandler();
__ Bind(&exit);
@@ -1414,9 +1458,8 @@
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister()));
+ DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss);
@@ -1436,9 +1479,8 @@
Register result = x0;
Register scratch = x10;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!FLAG_vector_ics ||
- (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
- result.is(VectorLoadICDescriptor::SlotRegister())));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+ result.is(LoadWithVectorDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@@ -1453,7 +1495,7 @@
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ Bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1461,183 +1503,116 @@
}
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Stack on entry:
- // jssp[0]: function.
- // jssp[8]: object.
- //
- // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
- // instanceof.
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = x1; // Object (lhs).
+ Register const function = x0; // Function (rhs).
+ Register const object_map = x2; // Map of {object}.
+ Register const function_map = x3; // Map of {function}.
+ Register const function_prototype = x4; // Prototype of {function}.
+ Register const scratch = x5;
- Register result = x0;
- Register function = right();
- Register object = left();
- Register scratch1 = x6;
- Register scratch2 = x7;
- Register res_true = x8;
- Register res_false = x9;
- // Only used if there was an inline map check site. (See
- // LCodeGen::DoInstanceOfKnownGlobal().)
- Register map_check_site = x4;
- // Delta for the instructions generated between the inline map check and the
- // instruction setting the result.
- const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
- Label not_js_object, slow;
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi);
- if (!HasArgsInRegisters()) {
- __ Pop(function, object);
- }
-
- if (ReturnTrueFalseObject()) {
- __ LoadTrueFalseRoots(res_true, res_false);
- } else {
- // This is counter-intuitive, but correct.
- __ Mov(res_true, Smi::FromInt(0));
- __ Mov(res_false, Smi::FromInt(1));
- }
-
- // Check that the left hand side is a JS object and load its map as a side
- // effect.
- Register map = x12;
- __ JumpIfSmi(object, ¬_js_object);
- __ IsObjectJSObjectType(object, map, scratch2, ¬_js_object);
-
- // If there is a call site cache, don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- Label miss;
- __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
- __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
- __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
- __ Bind(&miss);
- }
-
- // Get the prototype of the function.
- Register prototype = x13;
- __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
- MacroAssembler::kMissOnBoundFunction);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (HasCallSiteInlineCheck()) {
- // Patch the (relocated) inlined map check.
- __ GetRelocatedValueLocation(map_check_site, scratch1);
- // We have a cell, so need another level of dereferencing.
- __ Ldr(scratch1, MemOperand(scratch1));
- __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
- } else {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- }
-
- Label return_true, return_result;
- Register smi_value = scratch1;
- {
- // Loop through the prototype chain looking for the function prototype.
- Register chain_map = x1;
- Register chain_prototype = x14;
- Register null_value = x15;
- Label loop;
- __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- // Speculatively set a result.
- __ Mov(result, res_false);
- if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
- // Value to store in the cache cannot be an object.
- __ Mov(smi_value, Smi::FromInt(1));
- }
-
- __ Bind(&loop);
-
- // If the chain prototype is the object prototype, return true.
- __ Cmp(chain_prototype, prototype);
- __ B(eq, &return_true);
-
- // If the chain prototype is null, we've reached the end of the chain, so
- // return false.
- __ Cmp(chain_prototype, null_value);
- __ B(eq, &return_result);
-
- // Otherwise, load the next prototype in the chain, and loop.
- __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
- __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
- __ B(&loop);
- }
-
- // Return sequence when no arguments are on the stack.
- // We cannot fall through to here.
- __ Bind(&return_true);
- __ Mov(result, res_true);
- if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
- // Value to store in the cache cannot be an object.
- __ Mov(smi_value, Smi::FromInt(0));
- }
- __ Bind(&return_result);
- if (HasCallSiteInlineCheck()) {
- DCHECK(ReturnTrueFalseObject());
- __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
- __ GetRelocatedValueLocation(map_check_site, scratch2);
- __ Str(result, MemOperand(scratch2));
- } else {
- Register cached_value = ReturnTrueFalseObject() ? smi_value : result;
- __ StoreRoot(cached_value, Heap::kInstanceofCacheAnswerRootIndex);
- }
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex,
+ &fast_case);
+ __ JumpIfNotRoot(object_map, Heap::kInstanceofCacheMapRootIndex, &fast_case);
+ __ LoadRoot(x0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- Label object_not_null, object_not_null_or_smi;
-
- __ Bind(¬_js_object);
- Register object_type = x14;
- // x0 result result return register (uninit)
- // x10 function pointer to function
- // x11 object pointer to object
- // x14 object_type type of object (uninit)
-
- // Before null, smi and string checks, check that the rhs is a function.
- // For a non-function rhs, an exception must be thrown.
- __ JumpIfSmi(function, &slow);
- __ JumpIfNotObjectType(
- function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
-
- __ Mov(result, res_false);
-
- // Null is not instance of anything.
- __ Cmp(object, Operand(isolate()->factory()->null_value()));
- __ B(ne, &object_not_null);
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ Bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
+ &slow_case);
+ __ LoadRoot(x0, Heap::kFalseValueRootIndex);
__ Ret();
- __ Bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ Bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
+ &slow_case);
+
+ // Ensure that {function} has an instance prototype.
+ __ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
+
+ // Get the "prototype" (or initial map) of the {function}.
+ __ Ldr(function_prototype,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ __ JumpIfNotObjectType(function_prototype, scratch, scratch, MAP_TYPE,
+ &function_prototype_valid);
+ __ Ldr(function_prototype,
+ FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+ __ Bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
+ Register const null = scratch;
+ Register const result = x0;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ LoadRoot(null, Heap::kNullValueRootIndex);
+ __ Bind(&loop);
+
+ // Check if the object needs to be access checked.
+ __ Ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(map_bit_field, 1 << Map::kIsAccessCheckNeeded,
+ &fast_runtime_fallback);
+ // Check if the current object is a Proxy.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ __ B(eq, &fast_runtime_fallback);
+
+ __ Ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Cmp(object, function_prototype);
+ __ B(eq, &done);
+ __ Cmp(object, null);
+ __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ B(ne, &loop);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- __ Bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch2, &slow);
- __ Ret();
+ // Found Proxy or access check needed: Call the runtime
+ __ Bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ __ Move(scratch, Smi::FromInt(0));
+ __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
- // Slow-case. Tail call builtin.
- __ Bind(&slow);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Arguments have either been passed into registers or have been previously
- // popped. We need to push them before calling builtin.
- __ Push(object, function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- if (ReturnTrueFalseObject()) {
- // Reload true/false because they were clobbered in the builtin call.
- __ LoadTrueFalseRoots(res_true, res_false);
- __ Cmp(result, 0);
- __ Csel(result, res_true, res_false, eq);
- }
- __ Ret();
+ // Slow-case: Call the %InstanceOf runtime function.
+ __ bind(&slow_case);
+ __ Push(object, function);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1688,15 +1663,18 @@
// the runtime system.
__ Bind(&slow);
__ Push(key);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: number of parameters (tagged)
- // jssp[8]: address of receiver argument
- // jssp[16]: function
+ // x1 : function
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
+
+ DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -1709,33 +1687,35 @@
__ B(ne, &runtime);
// Patch the arguments.length and parameters pointer in the current frame.
- __ Ldr(x11, MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Poke(x11, 0 * kXRegSize);
- __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
- __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset);
- __ Poke(x10, 1 * kXRegSize);
+ __ Ldr(x2,
+ MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Add(x3, caller_fp, Operand::UntagSmiAndScale(x2, kPointerSizeLog2));
+ __ Add(x3, x3, StandardFrameConstants::kCallerSPOffset);
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ Push(x1, x3, x2);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: number of parameters (tagged)
- // jssp[8]: address of receiver argument
- // jssp[16]: function
+ // x1 : function
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
//
// Returns pointer to result object in x0.
+ DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
+ // Make an untagged copy of the parameter count.
// Note: arg_count_smi is an alias of param_count_smi.
- Register arg_count_smi = x3;
- Register param_count_smi = x3;
+ Register function = x1;
+ Register arg_count_smi = x2;
+ Register param_count_smi = x2;
+ Register recv_arg = x3;
Register param_count = x7;
- Register recv_arg = x14;
- Register function = x4;
- __ Pop(param_count_smi, recv_arg, function);
__ SmiUntag(param_count, param_count_smi);
// Check if the calling frame is an arguments adaptor frame.
@@ -1751,16 +1731,18 @@
// No adaptor, parameter count = argument count.
- // x1 mapped_params number of mapped params, min(params, args) (uninit)
- // x2 arg_count number of function arguments (uninit)
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped params, min(params, args) (uninit)
// x7 param_count number of function parameters
// x11 caller_fp caller's frame pointer
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments (uninit)
- Register arg_count = x2;
+ Register arg_count = x14;
+ Register mapped_params = x4;
__ Mov(arg_count, param_count);
+ __ Mov(mapped_params, param_count);
__ B(&try_allocate);
// We have an adaptor frame. Patch the parameters pointer.
@@ -1773,7 +1755,6 @@
__ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
// Compute the mapped parameter count = min(param_count, arg_count)
- Register mapped_params = x1;
__ Cmp(param_count, arg_count);
__ Csel(mapped_params, param_count, arg_count, lt);
@@ -1781,13 +1762,13 @@
// x0 alloc_obj pointer to allocated objects: param map, backing
// store, arguments (uninit)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x7 param_count number of function parameters
// x10 size size of objects to allocate (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
// Compute the size of backing store, parameter map, and arguments object.
// 1. Parameter map, has two extra words containing context and backing
@@ -1819,26 +1800,24 @@
// x0 alloc_obj pointer to allocated objects (param map, backing
// store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x7 param_count number of function parameters
// x11 sloppy_args_map offset to args (or aliased args) map (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
- Register global_object = x10;
Register global_ctx = x10;
Register sloppy_args_map = x11;
Register aliased_args_map = x10;
- __ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx, FieldMemOperand(global_object,
- GlobalObject::kNativeContextOffset));
+ __ Ldr(global_ctx, NativeContextMemOperand());
__ Ldr(sloppy_args_map,
ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Ldr(aliased_args_map,
- ContextMemOperand(global_ctx, Context::ALIASED_ARGUMENTS_MAP_INDEX));
+ __ Ldr(
+ aliased_args_map,
+ ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
__ Cmp(mapped_params, 0);
__ CmovX(sloppy_args_map, aliased_args_map, ne);
@@ -1867,14 +1846,14 @@
// x0 alloc_obj pointer to allocated objects (param map, backing
// store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x5 elements pointer to parameter map or backing store (uninit)
// x6 backing_store pointer to backing store (uninit)
// x7 param_count number of function parameters
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
Register elements = x5;
__ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
@@ -1916,17 +1895,17 @@
// x0 alloc_obj pointer to allocated objects (param map, backing
// store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x5 elements pointer to parameter map or backing store (uninit)
// x6 backing_store pointer to backing store (uninit)
// x7 param_count number of function parameters
// x11 loop_count parameter loop counter (uninit)
// x12 index parameter index (smi, uninit)
// x13 the_hole hole value (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
Register loop_count = x11;
Register index = x12;
@@ -1962,12 +1941,12 @@
// x0 alloc_obj pointer to allocated objects (param map, backing
// store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x4 function function pointer
- // x3 arg_count_smi number of function arguments (smi)
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x6 backing_store pointer to backing store (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
Label arguments_loop, arguments_test;
__ Mov(x10, mapped_params);
@@ -1990,7 +1969,7 @@
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, recv_arg, arg_count_smi);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -2006,10 +1985,7 @@
// Everything is fine, call runtime.
__ Push(receiver, key);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ Bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2018,20 +1994,21 @@
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: number of parameters (tagged)
- // jssp[8]: address of receiver argument
- // jssp[16]: function
+ // x1 : function
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
//
// Returns pointer to result object in x0.
- // Get the stub arguments from the frame, and make an untagged copy of the
- // parameter count.
- Register param_count_smi = x1;
- Register params = x2;
- Register function = x3;
+ DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
+ // Make an untagged copy of the parameter count.
+ Register function = x1;
+ Register param_count_smi = x2;
+ Register params = x3;
Register param_count = x13;
- __ Pop(param_count_smi, params, function);
__ SmiUntag(param_count, param_count_smi);
// Test if arguments adaptor needed.
@@ -2044,9 +2021,9 @@
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &try_allocate);
- // x1 param_count_smi number of parameters passed to function (smi)
- // x2 params pointer to parameters
- // x3 function function pointer
+ // x1 function function pointer
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
// x11 caller_fp caller's frame pointer
// x13 param_count number of parameters passed to function
@@ -2074,20 +2051,15 @@
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (native) context.
- Register global_object = x10;
- Register global_ctx = x10;
Register strict_args_map = x4;
- __ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx, FieldMemOperand(global_object,
- GlobalObject::kNativeContextOffset));
- __ Ldr(strict_args_map,
- ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX,
+ strict_args_map);
// x0 alloc_obj pointer to allocated objects: parameter array and
// arguments object
- // x1 param_count_smi number of parameters passed to function (smi)
- // x2 params pointer to parameters
- // x3 function function pointer
+ // x1 function function pointer
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
// x4 strict_args_map offset to arguments map
// x13 param_count number of parameters passed to function
__ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
@@ -2116,9 +2088,9 @@
// x0 alloc_obj pointer to allocated objects: parameter array and
// arguments object
- // x1 param_count_smi number of parameters passed to function (smi)
- // x2 params pointer to parameters
- // x3 function function pointer
+ // x1 function function pointer
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
// x4 array pointer to array slot (uninit)
// x5 elements pointer to elements array of alloc_obj
// x13 param_count number of parameters passed to function
@@ -2145,13 +2117,61 @@
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, params, param_count_smi);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
+ // x4 : rest parameter index (tagged)
+ //
+ // Returns pointer to result object in x0.
+
+ DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(x3.is(RestParamAccessDescriptor::parameter_pointer()));
+ DCHECK(x4.is(RestParamAccessDescriptor::rest_parameter_index()));
+
+ // Get the stub arguments from the frame, and make an untagged copy of the
+ // parameter count.
+ Register rest_index_smi = x4;
+ Register param_count_smi = x2;
+ Register params = x3;
+ Register param_count = x13;
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Test if arguments adaptor needed.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx,
+ MemOperand(caller_fp, StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &runtime);
+
+ // x4 rest_index_smi index of rest parameter
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
+
+ // Patch the argument length and parameters pointer.
+ __ Ldr(param_count_smi,
+ MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(param_count, param_count_smi);
+ __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
+ __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
+
+ __ Bind(&runtime);
+ __ Push(param_count_smi, params, rest_index_smi);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2198,27 +2218,16 @@
Register last_match_info_elements = x21;
Register code_object = x22;
- // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
- CPURegList used_callee_saved_registers(subject,
- regexp_data,
- last_match_info_elements,
- code_object);
- __ PushCPURegList(used_callee_saved_registers);
-
// Stack frame.
- // jssp[0] : x19
- // jssp[8] : x20
- // jssp[16]: x21
- // jssp[24]: x22
- // jssp[32]: last_match_info (JSArray)
- // jssp[40]: previous index
- // jssp[48]: subject string
- // jssp[56]: JSRegExp object
+ // jssp[00]: last_match_info (JSArray)
+ // jssp[08]: previous index
+ // jssp[16]: subject string
+ // jssp[24]: JSRegExp object
- const int kLastMatchInfoOffset = 4 * kPointerSize;
- const int kPreviousIndexOffset = 5 * kPointerSize;
- const int kSubjectOffset = 6 * kPointerSize;
- const int kJSRegExpOffset = 7 * kPointerSize;
+ const int kLastMatchInfoOffset = 0 * kPointerSize;
+ const int kPreviousIndexOffset = 1 * kPointerSize;
+ const int kSubjectOffset = 2 * kPointerSize;
+ const int kJSRegExpOffset = 3 * kPointerSize;
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
@@ -2368,8 +2377,8 @@
// Find the code object based on the assumptions above.
// kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
- DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
- JSRegExp::kDataUC16CodeOffset);
+ STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
+ JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
// We will need the encoding later: Latin1 = 0x04
// UC16 = 0x00
@@ -2585,7 +2594,6 @@
// Return last match info.
__ Peek(x0, kLastMatchInfoOffset);
- __ PopCPURegList(used_callee_saved_registers);
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
__ Ret();
@@ -2603,29 +2611,17 @@
__ Cmp(x10, exception_value);
__ B(eq, &runtime);
- __ Str(x10, MemOperand(x11)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- Label termination_exception;
- __ JumpIfRoot(exception_value,
- Heap::kTerminationExceptionRootIndex,
- &termination_exception);
-
- __ Throw(exception_value, x10, x11, x12, x13);
-
- __ Bind(&termination_exception);
- __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value()));
- __ PopCPURegList(used_callee_saved_registers);
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
__ Ret();
__ Bind(&runtime);
- __ PopCPURegList(used_callee_saved_registers);
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2670,16 +2666,32 @@
}
-static void GenerateRecordCallTarget(MacroAssembler* masm,
- Register argc,
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
+ Register argc, Register function,
+ Register feedback_vector, Register index,
+ Register new_target) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Number-of-arguments register must be smi-tagged to call out.
+ __ SmiTag(argc);
+ __ Push(argc, function, feedback_vector, index);
+
+ DCHECK(feedback_vector.Is(x2) && index.Is(x3));
+ __ CallStub(stub);
+
+ __ Pop(index, feedback_vector, function, argc);
+ __ SmiUntag(argc);
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
- Register feedback_vector,
- Register index,
- Register scratch1,
- Register scratch2) {
+ Register feedback_vector, Register index,
+ Register new_target, Register scratch1,
+ Register scratch2, Register scratch3) {
ASM_LOCATION("GenerateRecordCallTarget");
- DCHECK(!AreAliased(scratch1, scratch2,
- argc, function, feedback_vector, index));
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
+ feedback_vector, index, new_target));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
@@ -2694,29 +2706,44 @@
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state.
- __ Add(scratch1, feedback_vector,
+ Register feedback = scratch1;
+ Register feedback_map = scratch2;
+ Register feedback_value = scratch3;
+ __ Add(feedback, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ Cmp(scratch1, function);
+ // We don't know if feedback value is a WeakCell or a Symbol, but it's
+ // harmless to read at this position in a symbol (see static asserts in
+ // type-feedback-vector.h).
+ Label check_allocation_site;
+ __ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
+ __ Cmp(function, feedback_value);
__ B(eq, &done);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ B(eq, &done);
+ __ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
+ __ B(ne, &check_allocation_site);
- if (!FLAG_pretenuring_call_new) {
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite. Do a map check on the object in scratch1 register.
- __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
- __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
+ // If the weak cell is cleared, we have a new chance to become monomorphic.
+ __ JumpIfSmi(feedback_value, &initialize);
+ __ B(&megamorphic);
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
- __ Cmp(function, scratch1);
- __ B(ne, &megamorphic);
- __ B(&done);
- }
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, &megamorphic);
+ __ B(&done);
__ Bind(&miss);
@@ -2736,289 +2763,105 @@
// indicate the ElementsKind if function is the Array constructor.
__ Bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
- __ Cmp(function, scratch1);
- __ B(ne, ¬_array_function);
+ // Make sure the function is the Array() function
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, ¬_array_function);
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateAllocationSiteStub create_stub(masm->isolate());
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &create_stub, argc, function,
+ feedback_vector, index, new_target);
+ __ B(&done);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(argc);
- __ Push(argc, function, feedback_vector, index);
-
- // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
- // index in x3.
- DCHECK(feedback_vector.Is(x2) && index.Is(x3));
- __ CallStub(&create_stub);
-
- __ Pop(index, feedback_vector, function, argc);
- __ SmiUntag(argc);
- }
- __ B(&done);
-
- __ Bind(¬_array_function);
- }
-
- // An uninitialized cache is patched with the function.
-
- __ Add(scratch1, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Str(function, MemOperand(scratch1, 0));
-
- __ Push(function);
- __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Pop(function);
-
+ __ Bind(¬_array_function);
+ CreateWeakCellStub weak_cell_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
+ feedback_vector, index, new_target);
__ Bind(&done);
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
- __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont);
-
- // Do not transform the receiver for native (Compilerhints already in x3).
- __ Tbnz(w4, SharedFunctionInfo::kNative, cont);
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm,
- int argc,
- Register function,
- Register type,
- Label* non_function) {
- // Check for function proxy.
- // x10 : function type.
- __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, non_function);
- __ Push(function); // put proxy as additional argument
- __ Mov(x0, argc + 1);
- __ Mov(x2, 0);
- __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ Bind(non_function);
- __ Poke(function, argc * kXRegSize);
- __ Mov(x0, argc); // Set up the number of arguments.
- __ Mov(x2, 0);
- __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Pop(x1);
- }
- __ Poke(x0, argc * kPointerSize);
- __ B(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // x1 function the function to call
- Register function = x1;
- Register type = x4;
- Label slow, non_function, wrap, cont;
-
- // TODO(jbramley): This function has a lot of unnamed registers. Name them,
- // and tidy things up a bit.
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- __ JumpIfSmi(function, &non_function);
-
- // Goto slow case if we do not have a function.
- __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
- }
-
- // Fast-case: Invoke the function now.
- // x1 function pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ Peek(x3, argc * kPointerSize);
-
- if (needs_checks) {
- __ JumpIfSmi(x3, &wrap);
- __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
- } else {
- __ B(&wrap);
- }
-
- __ Bind(&cont);
- }
-
- __ InvokeFunction(function,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper());
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ Bind(&slow);
- EmitSlowCase(masm, argc, function, type, &non_function);
- }
-
- if (call_as_method) {
- __ Bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("CallFunctionStub::Generate");
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallConstructStub::Generate");
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ // x3 : slot in feedback vector (Smi, for RecordCallTarget)
Register function = x1;
- Label slow, non_function_call;
+ Label non_function;
// Check that the function is not a smi.
- __ JumpIfSmi(function, &non_function_call);
+ __ JumpIfSmi(function, &non_function);
// Check that the function is a JSFunction.
Register object_type = x10;
__ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
- &slow);
+ &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
+ GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
- __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into x2.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by x3 + 1.
- __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
- } else {
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into x2, or undefined.
- __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
- __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
- __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
- &feedback_register_initialized);
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into x2, or undefined.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
+ __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
+ __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
+ &feedback_register_initialized);
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(x2, x5);
- }
+ __ AssertUndefinedOrAllocationSite(x2, x5);
- // Jump to the function-specific construct stub.
- Register jump_reg = x4;
- Register shared_func_info = jump_reg;
- Register cons_stub = jump_reg;
- Register cons_stub_code = jump_reg;
- __ Ldr(shared_func_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(cons_stub,
- FieldMemOperand(shared_func_info,
- SharedFunctionInfo::kConstructStubOffset));
- __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
- __ Br(cons_stub_code);
+ __ Mov(x3, function);
- Label do_call;
- __ Bind(&slow);
- __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
- __ B(ne, &non_function_call);
- __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ B(&do_call);
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
+ __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x4);
- __ Bind(&non_function_call);
- __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-
- __ Bind(&do_call);
- // Set expected number of arguments to zero (not changing x0).
- __ Mov(x2, 0);
- __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ Bind(&non_function);
+ __ Mov(x3, function);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(vector, FieldMemOperand(vector,
- JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(vector, FieldMemOperand(vector,
- SharedFunctionInfo::kFeedbackVectorOffset));
-}
-
-
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// x1 - function
// x3 - slot id
- Label miss;
+ // x2 - vector
+ // x4 - allocation site (loaded from vector[slot])
Register function = x1;
Register feedback_vector = x2;
Register index = x3;
- Register scratch = x4;
+ Register allocation_site = x4;
+ Register scratch = x5;
- EmitLoadTypeFeedbackVector(masm, feedback_vector);
-
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
__ Cmp(function, scratch);
- __ B(ne, &miss);
+ __ B(ne, miss);
__ Mov(x0, Operand(arg_count()));
- __ Add(scratch, feedback_vector,
+ // Increment the call count for monomorphic function calls.
+ __ Add(feedback_vector, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ Add(feedback_vector, feedback_vector,
+ Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ Ldr(index, FieldMemOperand(feedback_vector, 0));
+ __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Str(index, FieldMemOperand(feedback_vector, 0));
- // Verify that scratch contains an AllocationSite
- Register map = x5;
- __ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
-
- Register allocation_site = feedback_vector;
- __ Mov(allocation_site, scratch);
+ // Set up arguments for the array constructor stub.
+ Register allocation_site_arg = feedback_vector;
+ Register new_target_arg = index;
+ __ Mov(allocation_site_arg, allocation_site);
+ __ Mov(new_target_arg, function);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
-
- __ Unreachable();
}
@@ -3027,61 +2870,67 @@
// x1 - function
// x3 - slot id (Smi)
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
- Label have_js_function;
+ // x2 - vector
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
Register function = x1;
Register feedback_vector = x2;
Register index = x3;
- Register type = x4;
-
- EmitLoadTypeFeedbackVector(masm, feedback_vector);
// The checks. First, does x1 match the recorded monomorphic target?
__ Add(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
- __ Cmp(x4, function);
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
+ __ Cmp(x5, function);
__ B(ne, &extra_checks_or_miss);
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(function, &extra_checks_or_miss);
- // Compute the receiver in sloppy mode.
- __ Peek(x3, argc * kPointerSize);
+ // Increment the call count for monomorphic function calls.
+ __ Add(feedback_vector, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Add(feedback_vector, feedback_vector,
+ Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ Ldr(index, FieldMemOperand(feedback_vector, 0));
+ __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Str(index, FieldMemOperand(feedback_vector, 0));
- __ JumpIfSmi(x3, &wrap);
- __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
-
- __ Bind(&cont);
- }
-
- __ InvokeFunction(function,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc, function, type, &non_function);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ Bind(&call_function);
+ __ Mov(x0, argc);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
- __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
+ __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
+
+ __ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, ¬_allocation_site);
+
+ HandleArrayCase(masm, &miss);
+
+ __ bind(¬_allocation_site);
// The following cases attempt to handle MISS cases without going to the
// runtime.
@@ -3099,14 +2948,11 @@
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Subs(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
- __ Adds(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
- __ B(&slow_start);
+
+ __ Bind(&call);
+ __ Mov(x0, argc);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -3118,71 +2964,59 @@
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
__ Cmp(function, x5);
__ B(eq, &miss);
- // Update stats.
- __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Adds(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ // Make sure the function belongs to the same native context.
+ __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
+ __ Ldr(x5, NativeContextMemOperand());
+ __ Cmp(x4, x5);
+ __ B(ne, &miss);
- // Store the function.
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Str(function, FieldMemOperand(x4, FixedArray::kHeaderSize));
+ // Initialize the call counter.
+ __ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
+ __ Adds(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Str(function, MemOperand(x4, 0));
+ // Store the function. Use a stub since we need a frame for allocation.
+ // x2 - vector
+ // x3 - slot
+ // x1 - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(function);
+ __ CallStub(&create_stub);
+ __ Pop(function);
+ }
- // Update the write barrier.
- __ Mov(x5, function);
- __ RecordWrite(feedback_vector, x4, x5, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ B(&have_js_function);
+ __ B(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
-
- // Check that the function is really a JavaScript function.
- __ JumpIfSmi(function, &non_function);
-
- // Goto slow case if we do not have a function.
- __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
- __ B(&have_js_function);
+ __ B(&call);
}
void CallICStub::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("CallICStub[Miss]");
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ Peek(x4, (arg_count() + 1) * kPointerSize);
+ FrameScope scope(masm, StackFrame::INTERNAL);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push the receiver and the function and feedback info.
+ __ Push(x1, x2, x3);
- // Push the receiver and the function and feedback info.
- __ Push(x4, x1, x2, x3);
+ // Call the entry.
+ __ CallRuntime(Runtime::kCallIC_Miss);
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id),
- masm->isolate());
- __ CallExternalReference(miss, 4);
-
- // Move result to edi and exit the internal frame.
- __ Mov(x1, x0);
- }
+ // Move result to edi and exit the internal frame.
+ __ Mov(x1, x0);
}
@@ -3221,7 +3055,7 @@
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
+ MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@@ -3229,19 +3063,29 @@
// If index is a heap number, try converting it to an integer.
__ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm);
- // Save object_ on the stack and pass index_ as argument for runtime call.
- __ Push(object_, index_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_, index_);
+ } else {
+ // Save object_ on the stack and pass index_ as argument for runtime call.
+ __ Push(object_, index_);
+ }
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Mov(index_, x0);
- __ Pop(object_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister());
+ } else {
+ __ Pop(object_);
+ }
// Reload the instance type.
__ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
@@ -3259,7 +3103,7 @@
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@@ -3290,7 +3134,7 @@
__ Bind(&slow_case_);
call_helper.BeforeCall(masm);
__ Push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@@ -3299,6 +3143,32 @@
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ // Inputs are in x0 (lhs) and x1 (rhs).
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ ASM_LOCATION("CompareICStub[Booleans]");
+ Label miss;
+
+ __ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ __ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
+ __ AssertSmi(x1);
+ __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
+ __ AssertSmi(x0);
+ }
+ __ Sub(x0, x1, x0);
+ __ Ret();
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
// Inputs are in x0 (lhs) and x1 (rhs).
DCHECK(state() == CompareICState::SMI);
@@ -3366,7 +3236,7 @@
__ Ret();
__ Bind(&unordered);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3536,9 +3406,9 @@
__ Bind(&runtime);
__ Push(lhs, rhs);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ Bind(&miss);
@@ -3546,9 +3416,9 @@
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
- ASM_LOCATION("CompareICStub[Objects]");
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
+ ASM_LOCATION("CompareICStub[Receivers]");
Label miss;
@@ -3558,10 +3428,11 @@
__ JumpIfEitherSmi(rhs, lhs, &miss);
- __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
- __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ JumpIfObjectType(rhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
+ __ JumpIfObjectType(lhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
- DCHECK(GetCondition() == eq);
+ DCHECK_EQ(eq, GetCondition());
__ Sub(result, rhs, lhs);
__ Ret();
@@ -3570,10 +3441,11 @@
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
- ASM_LOCATION("CompareICStub[KnownObjects]");
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
+ ASM_LOCATION("CompareICStub[KnownReceivers]");
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
Register result = x0;
Register rhs = x0;
@@ -3583,15 +3455,30 @@
Register rhs_map = x10;
Register lhs_map = x11;
+ Register map = x12;
+ __ GetWeakValue(map, cell);
__ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
__ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ Cmp(rhs_map, Operand(known_map_));
+ __ Cmp(rhs_map, map);
__ B(ne, &miss);
- __ Cmp(lhs_map, Operand(known_map_));
+ __ Cmp(lhs_map, map);
__ B(ne, &miss);
+ if (Token::IsEqualityOp(op())) {
__ Sub(result, rhs, lhs);
__ Ret();
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
+ } else {
+ Register ncr = x2;
+ if (op() == Token::LT || op() == Token::LTE) {
+ __ Mov(ncr, Smi::FromInt(GREATER));
+ } else {
+ __ Mov(ncr, Smi::FromInt(LESS));
+ }
+ __ Push(lhs, rhs, ncr);
+ __ TailCallRuntime(Runtime::kCompare);
+ }
__ Bind(&miss);
GenerateMiss(masm);
@@ -3607,9 +3494,6 @@
Register stub_entry = x11;
{
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
-
FrameScope scope(masm, StackFrame::INTERNAL);
Register op = x10;
Register left = x1;
@@ -3621,7 +3505,7 @@
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
- __ CallExternalReference(miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
@@ -3867,7 +3751,7 @@
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// x1: result_length
@@ -3913,7 +3797,7 @@
__ Ret();
__ Bind(&slow_string);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ Bind(¬_string);
Label not_oddball;
@@ -3924,7 +3808,54 @@
__ Bind(¬_oddball);
__ Push(x0); // Push argument.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes one argument in x0.
+ Label not_smi;
+ __ JumpIfNotSmi(x0, ¬_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(x0, x0);
+ __ Csel(x0, x0, Operand(0), ge);
+ __ Ret();
+ __ Bind(¬_smi);
+
+ __ Push(x0); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes one argument in x0.
+ Label is_number;
+ __ JumpIfSmi(x0, &is_number);
+
+ Label not_string;
+ __ JumpIfObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE, ¬_string, hs);
+ // x0: receiver
+ // x1: receiver instance type
+ __ Ret();
+ __ Bind(¬_string);
+
+ Label not_heap_number;
+ __ Cmp(x1, HEAP_NUMBER_TYPE);
+ __ B(ne, ¬_heap_number);
+ __ Bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ Bind(¬_heap_number);
+
+ Label not_oddball;
+ __ Cmp(x1, ODDBALL_TYPE);
+ __ B(ne, ¬_oddball);
+ __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
+ __ Ret();
+ __ Bind(¬_oddball);
+
+ __ Push(x0); // Push argument.
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -4036,46 +3967,39 @@
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[8]: left string
- Register right = x10;
- Register left = x11;
- Register result = x0;
- __ Pop(right, left);
+ // ----------- S t a t e -------------
+ // -- x1 : left
+ // -- x0 : right
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertString(x1);
+ __ AssertString(x0);
Label not_same;
- __ Subs(result, right, left);
+ __ Cmp(x0, x1);
__ B(ne, ¬_same);
- STATIC_ASSERT(EQUAL == 0);
- __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ __ Mov(x0, Smi::FromInt(EQUAL));
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
+ x4);
__ Ret();
__ Bind(¬_same);
// Check that both objects are sequential one-byte strings.
- __ JumpIfEitherIsNotSequentialOneByteStrings(left, right, x12, x13, &runtime);
+ Label runtime;
+ __ JumpIfEitherIsNotSequentialOneByteStrings(x1, x0, x12, x13, &runtime);
- // Compare flat one-byte strings natively. Remove arguments from stack first,
- // as this function will generate a return.
- __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, x12, x13,
- x14, x15);
-
- __ Bind(&runtime);
-
- // Push arguments back on to the stack.
- // sp[0] = right string
- // sp[8] = left string.
- __ Push(left, right);
+ // Compare flat one-byte strings natively.
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
+ x4);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, x1, x0, x12, x13, x14,
+ x15);
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ Bind(&runtime);
+ __ Push(x1, x0);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -4217,12 +4141,12 @@
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.address(), regs_.object());
- __ EnsureNotWhite(val,
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- regs_.scratch2(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(val,
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ regs_.scratch2(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm); // Restore the extra scratch registers we used.
@@ -4272,76 +4196,6 @@
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // x0 value element value to store
- // x3 index_smi element index as smi
- // sp[0] array_index_smi array literal index in function as smi
- // sp[1] array array literal
-
- Register value = x0;
- Register index_smi = x3;
-
- Register array = x1;
- Register array_map = x2;
- Register array_index_smi = x4;
- __ PeekPair(array_index_smi, array, 0);
- __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
-
- Label double_elements, smi_element, fast_elements, slow_elements;
- Register bitfield2 = x10;
- __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
-
- // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
- // FAST_HOLEY_ELEMENTS.
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
- __ B(hi, &double_elements);
-
- __ JumpIfSmi(value, &smi_element);
-
- // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
- __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
- &fast_elements);
-
- // Store into the array literal requires an elements transition. Call into
- // the runtime.
- __ Bind(&slow_elements);
- __ Push(array, index_smi, value);
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
- __ Push(x11, array_index_smi);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ Bind(&fast_elements);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
- __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Str(value, MemOperand(x11));
- // Update the write barrier for the array store.
- __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ Bind(&smi_element);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
- __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
- __ Ret();
-
- __ Bind(&double_elements);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
- &slow_elements);
- __ Ret();
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4359,19 +4213,437 @@
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorLoadStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorKeyedLoadStub stub(isolate());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(x2);
+ CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
+
+
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, bool is_polymorphic,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+
+ Register cached_map = scratch1;
+
+ __ Ldr(cached_map,
+ FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+ __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Cmp(receiver_map, cached_map);
+ __ B(ne, &start_polymorphic);
+ // found, now call handler.
+ Register handler = feedback;
+ __ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(feedback);
+
+ Register length = scratch2;
+ __ Bind(&start_polymorphic);
+ __ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+ if (!is_polymorphic) {
+ __ Cmp(length, Operand(Smi::FromInt(2)));
+ __ B(eq, miss);
+ }
+
+ Register too_far = length;
+ Register pointer_reg = feedback;
+
+ // +-----+------+------+-----+-----+ ... ----+
+ // | map | len | wm0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ Add(too_far, feedback,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(pointer_reg, feedback,
+ FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
+
+ __ Bind(&next_loop);
+ __ Ldr(cached_map, MemOperand(pointer_reg));
+ __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Cmp(receiver_map, cached_map);
+ __ B(ne, &prepare_next);
+ __ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
+ __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(handler);
+
+ __ Bind(&prepare_next);
+ __ Add(pointer_reg, pointer_reg, kPointerSize * 2);
+ __ Cmp(pointer_reg, too_far);
+ __ B(lt, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register receiver_map, Register feedback,
+ Register vector, Register slot,
+ Register scratch, Label* compare_map,
+ Label* load_smi_map, Label* try_array) {
+ __ JumpIfSmi(receiver, load_smi_map);
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(compare_map);
+ Register cached_map = scratch;
+ // Move the weak map into the weak_cell register.
+ __ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
+ __ Cmp(cached_map, receiver_map);
+ __ B(ne, try_array);
+
+ Register handler = feedback;
+ __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(handler);
+}
+
+
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
+ Register name = LoadWithVectorDescriptor::NameRegister(); // x2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
+ Register feedback = x4;
+ Register receiver_map = x5;
+ Register scratch1 = x6;
+
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ Bind(&try_array);
+ __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, ¬_array);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
+
+ __ Bind(¬_array);
+ __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+ receiver, name, feedback,
+ receiver_map, scratch1, x7);
+
+ __ Bind(&miss);
+ LoadIC::GenerateMiss(masm);
+
+ __ Bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
+ Register key = LoadWithVectorDescriptor::NameRegister(); // x2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
+ Register feedback = x4;
+ Register receiver_map = x5;
+ Register scratch1 = x6;
+
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ Bind(&try_array);
+ // Is it a fixed array?
+ __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, ¬_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ Bind(&polymorphic);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
+
+ __ Bind(¬_array);
+ // Is it generic?
+ __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
+ &try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ Bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ Cmp(key, feedback);
+ __ B(ne, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, false, &miss);
+
+ __ Bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
+
+ __ Bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1
+ Register key = VectorStoreICDescriptor::NameRegister(); // x2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // x3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // x4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0
+ Register feedback = x5;
+ Register receiver_map = x6;
+ Register scratch1 = x7;
+
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ Bind(&try_array);
+ __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, ¬_array);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, true, &miss);
+
+ __ Bind(¬_array);
+ __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
+ receiver, key, feedback,
+ receiver_map, scratch1, x8);
+
+ __ Bind(&miss);
+ StoreIC::GenerateMiss(masm);
+
+ __ Bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+ Label transition_call;
+
+ Register cached_map = scratch1;
+ Register too_far = scratch2;
+ Register pointer_reg = feedback;
+
+ __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+ // +-----+------+------+-----+-----+-----+ ... ----+
+ // | map | len | wm0 | wt0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ Add(too_far, feedback,
+ Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
+ __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(pointer_reg, feedback,
+ FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
+
+ __ Bind(&next_loop);
+ __ Ldr(cached_map, MemOperand(pointer_reg));
+ __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Cmp(receiver_map, cached_map);
+ __ B(ne, &prepare_next);
+ // Is it a transitioning store?
+ __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
+ __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
+ __ B(ne, &transition_call);
+
+ __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+ __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(pointer_reg);
+
+ __ Bind(&transition_call);
+ __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+ __ JumpIfSmi(too_far, miss);
+
+ __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+ // Load the map into the correct register.
+ DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mov(feedback, too_far);
+ __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(receiver_map);
+
+ __ Bind(&prepare_next);
+ __ Add(pointer_reg, pointer_reg, kPointerSize * 3);
+ __ Cmp(pointer_reg, too_far);
+ __ B(lt, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1
+ Register key = VectorStoreICDescriptor::NameRegister(); // x2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // x3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // x4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0
+ Register feedback = x5;
+ Register receiver_map = x6;
+ Register scratch1 = x7;
+
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ Bind(&try_array);
+ // Is it a fixed array?
+ __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, ¬_array);
+
+ // We have a polymorphic element handler.
+ Label try_poly_name;
+ HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
+
+ __ Bind(¬_array);
+ // Is it generic?
+ __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
+ &try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ Bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ Cmp(key, feedback);
+ __ B(ne, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
+
+ __ Bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+
+ __ Bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@@ -4513,7 +4785,7 @@
__ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
@@ -4582,7 +4854,7 @@
__ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
@@ -4673,7 +4945,7 @@
__ And(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
__ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
@@ -4911,14 +5183,15 @@
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("ArrayConstructorStub::Generate");
// ----------- S t a t e -------------
- // -- x0 : argc (only if argument_count() == ANY)
+ // -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
// -- x1 : constructor
// -- x2 : AllocationSite or undefined
- // -- sp[0] : return address
- // -- sp[4] : last argument
+ // -- x3 : new target
+ // -- sp[0] : last argument
// -----------------------------------
Register constructor = x1;
Register allocation_site = x2;
+ Register new_target = x3;
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
@@ -4940,6 +5213,13 @@
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
}
+ // Enter the context of the Array function.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ Label subclassing;
+ __ Cmp(new_target, constructor);
+ __ B(ne, &subclassing);
+
Register kind = x3;
Label no_info;
// Get the elements kind and case on that.
@@ -4953,6 +5233,26 @@
__ Bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ // Subclassing support.
+ __ Bind(&subclassing);
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x0, x0, Operand(3));
+ break;
+ case NONE:
+ __ Poke(constructor, 0 * kPointerSize);
+ __ Mov(x0, Operand(3));
+ break;
+ case ONE:
+ __ Poke(constructor, 1 * kPointerSize);
+ __ Mov(x0, Operand(4));
+ break;
+ }
+ __ Push(new_target, allocation_site);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5043,12 +5343,330 @@
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register result = x0;
+ Register slot = x2;
+ Label slow_case;
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ Ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ context = result;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ Add(result, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ Ldr(result, ContextMemOperand(result));
+ __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+ // If the result is not the_hole, return. Otherwise, handle in the runtime.
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &slow_case);
+ __ Ret();
+
+ // Fallback to runtime.
+ __ Bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register value = x0;
+ Register slot = x2;
+ Register context_temp = x10;
+ Register cell = x10;
+ Register cell_details = x11;
+ Register cell_value = x12;
+ Register cell_value_map = x13;
+ Register value_map = x14;
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
+ __ Check(ne, kUnexpectedValue);
+ }
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); i++) {
+ __ Ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ context = context_temp;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ Add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ Ldr(cell, ContextMemOperand(cell));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ Ldr(cell_details,
+ UntagSmiFieldMemOperand(cell, PropertyCell::kDetailsOffset));
+ __ And(cell_details, cell_details,
+ PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask);
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(ne, ¬_mutable_data);
+ __ JumpIfSmi(value, &fast_smi_case);
+ __ Bind(&fast_heapobject_case);
+ __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ // RecordWriteField clobbers the value register, so we copy it before the
+ // call.
+ __ Mov(x11, value);
+ __ RecordWriteField(cell, PropertyCell::kValueOffset, x11, x12,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ Bind(¬_mutable_data);
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ Ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ Cmp(cell_value, value);
+ __ B(ne, ¬_same_value);
+
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ Tst(cell_details, PropertyDetails::kAttributesReadOnlyMask);
+ __ B(ne, &slow_case);
+
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(eq, &done);
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(eq, &done);
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData));
+ __ Check(eq, kUnexpectedValue);
+ __ Bind(&done);
+ }
+ __ Ret();
+ __ Bind(¬_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(ne, &slow_case);
+
+ // Now either both old and new values must be smis or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value, &value_is_heap_object);
+ __ JumpIfNotSmi(cell_value, &slow_case);
+ // Old and new values are smis, no need for a write barrier here.
+ __ Bind(&fast_smi_case);
+ __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ Ret();
+
+ __ Bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value, &slow_case);
+
+ __ Ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
+ __ Ldr(value_map, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ Cmp(cell_value_map, value_map);
+ __ B(eq, &fast_heapobject_case);
+
+ // Fall back to the runtime.
+ __ Bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot, value);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy);
+}
+
+
+// The number of register that CallApiFunctionAndReturn will need to save on
+// the stack. The space for these registers need to be allocated in the
+// ExitFrame before calling CallApiFunctionAndReturn.
+static const int kCallApiFunctionSpillSpace = 4;
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return static_cast<int>(ref0.address() - ref1.address());
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions.
+// 'stack_space' is the space to be unwound on exit (includes the call JS
+// arguments space and the additional space allocated for the fast call).
+// 'spill_offset' is the offset from the stack pointer where
+// CallApiFunctionAndReturn can spill registers.
+static void CallApiFunctionAndReturn(
+ MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand, int spill_offset,
+ MemOperand return_value_operand, MemOperand* context_restore_operand) {
+ ASM_LOCATION("CallApiFunctionAndReturn");
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address.is(x1) || function_address.is(x2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ Mov(x10, ExternalReference::is_profiling_address(isolate));
+ __ Ldrb(w10, MemOperand(x10));
+ __ Cbz(w10, &profiler_disabled);
+ __ Mov(x3, thunk_ref);
+ __ B(&end_profiler_check);
+
+ __ Bind(&profiler_disabled);
+ __ Mov(x3, function_address);
+ __ Bind(&end_profiler_check);
+
+ // Save the callee-save registers we are going to use.
+ // TODO(all): Is this necessary? ARM doesn't do it.
+ STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
+ __ Poke(x19, (spill_offset + 0) * kXRegSize);
+ __ Poke(x20, (spill_offset + 1) * kXRegSize);
+ __ Poke(x21, (spill_offset + 2) * kXRegSize);
+ __ Poke(x22, (spill_offset + 3) * kXRegSize);
+
+ // Allocate HandleScope in callee-save registers.
+ // We will need to restore the HandleScope after the call to the API function,
+ // by allocating it in callee-save registers they will be preserved by C code.
+ Register handle_scope_base = x22;
+ Register next_address_reg = x19;
+ Register limit_reg = x20;
+ Register level_reg = w21;
+
+ __ Mov(handle_scope_base, next_address);
+ __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ __ Add(level_reg, level_reg, 1);
+ __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ Mov(x0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate);
+ stub.GenerateCall(masm, x3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ Mov(x0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ Ldr(x0, return_value_operand);
+ __ Bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
+ __ Cmp(w1, level_reg);
+ __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ __ Sub(level_reg, level_reg, 1);
+ __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
+ __ Cmp(limit_reg, x1);
+ __ B(ne, &delete_allocated_handles);
+
+ // Leave the API exit frame.
+ __ Bind(&leave_exit_frame);
+ // Restore callee-saved registers.
+ __ Peek(x19, (spill_offset + 0) * kXRegSize);
+ __ Peek(x20, (spill_offset + 1) * kXRegSize);
+ __ Peek(x21, (spill_offset + 2) * kXRegSize);
+ __ Peek(x22, (spill_offset + 3) * kXRegSize);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ Ldr(cp, *context_restore_operand);
+ }
+
+ if (stack_space_operand != NULL) {
+ __ Ldr(w2, *stack_space_operand);
+ }
+
+ __ LeaveExitFrame(false, x1, !restore_context);
+
+ // Check if the function scheduled an exception.
+ __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
+ __ Ldr(x5, MemOperand(x5));
+ __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
+ &promote_scheduled_exception);
+
+ if (stack_space_operand != NULL) {
+ __ Drop(x2, 1);
+ } else {
+ __ Drop(stack_space);
+ }
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
+ __ Bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ Bind(&delete_allocated_handles);
+ __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ // Save the return value in a callee-save register.
+ Register saved_result = x19;
+ __ Mov(saved_result, x0);
+ __ Mov(x0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
+ 1);
+ __ Mov(x0, saved_result);
+ __ B(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
// -- x0 : callee
// -- x4 : call_data
// -- x2 : holder
// -- x1 : api_function_address
+ // -- x3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -5063,10 +5681,6 @@
Register api_function_address = x1;
Register context = cp;
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
-
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
@@ -5078,6 +5692,8 @@
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
+ DCHECK(argc.is_immediate() || x3.is(argc.reg()));
+
// FunctionCallbackArguments: context, callee and call data.
__ Push(context, callee, call_data);
@@ -5088,7 +5704,7 @@
__ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
}
Register isolate_reg = x5;
- __ Mov(isolate_reg, ExternalReference::isolate_address(isolate()));
+ __ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
// FunctionCallbackArguments:
// return value, return value default, isolate, holder.
@@ -5113,37 +5729,70 @@
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Add(x0, masm->StackPointer(), 1 * kPointerSize);
- // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
- __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
- __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc and
- // FunctionCallbackInfo::is_construct_call = 0
- __ Mov(x10, argc);
- __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
+ if (argc.is_immediate()) {
+ // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+ __ Add(x10, args,
+ Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
+ __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc and
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ Mov(x10, argc.immediate());
+ __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
+ } else {
+ // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+ __ Add(x10, args, Operand(argc.reg(), LSL, kPointerSizeLog2));
+ __ Add(x10, x10, (FCA::kArgsLength - 1) * kPointerSize);
+ __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc and
+ // FunctionCallbackInfo::is_construct_call
+ __ Add(x10, argc.reg(), FCA::kArgsLength + 1);
+ __ Mov(x10, Operand(x10, LSL, kPointerSizeLog2));
+ __ Stp(argc.reg(), x10, MemOperand(x0, 2 * kPointerSize));
+ }
- const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (is_store) {
+ if (return_first_arg) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ int stack_space = 0;
+ MemOperand is_construct_call_operand =
+ MemOperand(masm->StackPointer(), 4 * kPointerSize);
+ MemOperand* stack_space_operand = &is_construct_call_operand;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
+ }
const int spill_offset = 1 + kApiStackSpace;
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- spill_offset,
- return_value_operand,
- &context_restore_operand);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
+ stack_space_operand, spill_offset,
+ return_value_operand, &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
+ call_data_undefined);
+}
+
+
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
@@ -5181,17 +5830,15 @@
ExternalReference::invoke_accessor_getter_callback(isolate());
const int spill_offset = 1 + kApiStackSpace;
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- spill_offset,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, NULL, spill_offset,
+ MemOperand(fp, 6 * kPointerSize), NULL);
}
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64