Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE
This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.
FPIIM-449
Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/ppc/assembler-ppc-inl.h b/src/ppc/assembler-ppc-inl.h
index b384d3f..42e2208 100644
--- a/src/ppc/assembler-ppc-inl.h
+++ b/src/ppc/assembler-ppc-inl.h
@@ -202,8 +202,8 @@
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -248,9 +248,8 @@
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -329,39 +328,6 @@
}
-bool RelocInfo::IsPatchedReturnSequence() {
- //
- // The patched return sequence is defined by
- // BreakLocation::SetDebugBreakAtReturn()
- // FIXED_SEQUENCE
-
- Instr instr0 = Assembler::instr_at(pc_);
- Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
-#if V8_TARGET_ARCH_PPC64
- Instr instr3 = Assembler::instr_at(pc_ + (3 * Assembler::kInstrSize));
- Instr instr4 = Assembler::instr_at(pc_ + (4 * Assembler::kInstrSize));
- Instr binstr = Assembler::instr_at(pc_ + (7 * Assembler::kInstrSize));
-#else
- Instr binstr = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize);
-#endif
- bool patched_return =
- ((instr0 & kOpcodeMask) == ADDIS && (instr1 & kOpcodeMask) == ORI &&
-#if V8_TARGET_ARCH_PPC64
- (instr3 & kOpcodeMask) == ORIS && (instr4 & kOpcodeMask) == ORI &&
-#endif
- (binstr == 0x7d821008)); // twge r2, r2
-
- // printf("IsPatchedReturnSequence: %d\n", patched_return);
- return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/ppc/assembler-ppc.cc b/src/ppc/assembler-ppc.cc
index 147fb59..aed149b 100644
--- a/src/ppc/assembler-ppc.cc
+++ b/src/ppc/assembler-ppc.cc
@@ -55,7 +55,7 @@
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
- cache_line_size_ = 128;
+ icache_line_size_ = 128;
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
@@ -85,6 +85,9 @@
// Assume support
supported_ |= (1u << FPU);
}
+ if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
+ icache_line_size_ = cpu.icache_line_size();
+ }
#elif V8_OS_AIX
// Assume support FP support and default cache line size
supported_ |= (1u << FPU);
@@ -1504,14 +1507,14 @@
// Code address skips the function descriptor "header".
// TOC and static chain are ignored and set to 0.
void Assembler::function_descriptor() {
-#if ABI_USES_FUNCTION_DESCRIPTORS
- Label instructions;
- DCHECK(pc_offset() == 0);
- emit_label_addr(&instructions);
- dp(0);
- dp(0);
- bind(&instructions);
-#endif
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ Label instructions;
+ DCHECK(pc_offset() == 0);
+ emit_label_addr(&instructions);
+ dp(0);
+ dp(0);
+ bind(&instructions);
+ }
}
diff --git a/src/ppc/assembler-ppc.h b/src/ppc/assembler-ppc.h
index e84d695..58c6c94 100644
--- a/src/ppc/assembler-ppc.h
+++ b/src/ppc/assembler-ppc.h
@@ -46,15 +46,24 @@
#include "src/assembler.h"
#include "src/ppc/constants-ppc.h"
-#define ABI_USES_FUNCTION_DESCRIPTORS \
- (V8_HOST_ARCH_PPC && (V8_OS_AIX || \
- (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN)))
+#if V8_HOST_ARCH_PPC && \
+ (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
+#define ABI_USES_FUNCTION_DESCRIPTORS 1
+#else
+#define ABI_USES_FUNCTION_DESCRIPTORS 0
+#endif
-#define ABI_PASSES_HANDLES_IN_REGS \
- (!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64)
+#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#define ABI_PASSES_HANDLES_IN_REGS 1
+#else
+#define ABI_PASSES_HANDLES_IN_REGS 0
+#endif
-#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
- (!V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN)
+#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
+#else
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
+#endif
#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
#define ABI_CALL_VIA_IP 1
@@ -63,9 +72,9 @@
#endif
#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
-#define ABI_TOC_REGISTER Register::kCode_r2
+#define ABI_TOC_REGISTER 2
#else
-#define ABI_TOC_REGISTER Register::kCode_r13
+#define ABI_TOC_REGISTER 13
#endif
#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
@@ -247,7 +256,7 @@
// Coprocessor register
struct CRegister {
- bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
+ bool is_valid() const { return 0 <= reg_code && reg_code < 8; }
bool is(CRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
@@ -273,14 +282,9 @@
const CRegister cr5 = {5};
const CRegister cr6 = {6};
const CRegister cr7 = {7};
-const CRegister cr8 = {8};
-const CRegister cr9 = {9};
-const CRegister cr10 = {10};
-const CRegister cr11 = {11};
-const CRegister cr12 = {12};
-const CRegister cr13 = {13};
-const CRegister cr14 = {14};
-const CRegister cr15 = {15};
+
+// TODO(ppc) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -1203,7 +1207,7 @@
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
diff --git a/src/ppc/builtins-ppc.cc b/src/ppc/builtins-ppc.cc
index 0476cd2..f0b76cc 100644
--- a/src/ppc/builtins-ppc.cc
+++ b/src/ppc/builtins-ppc.cc
@@ -136,6 +136,107 @@
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- lr : return address
+ // -- sp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- sp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in r4 and the double value in d1.
+ __ LoadRoot(r4, root_index);
+ __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+
+ // Setup state for loop
+ // r5: address of arg[0] + kPointerSize
+ // r6: number of slots to drop at exit (arguments + receiver)
+ __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ add(r5, sp, r5);
+ __ addi(r6, r3, Operand(1));
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ cmpl(r5, sp);
+ __ ble(&done_loop);
+
+ // Load the next parameter tagged value into r3.
+ __ LoadPU(r3, MemOperand(r5, -kPointerSize));
+
+ // Load the double value of the parameter into d2, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(r3, &convert_smi);
+ __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ JumpIfRoot(r7, Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r6);
+ __ Push(r4, r5, r6);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Pop(r4, r5, r6);
+ __ SmiUntag(r6);
+ {
+ // Restore the double accumulator value (d1).
+ Label done_restore;
+ __ SmiToDouble(d1, r4);
+ __ JumpIfSmi(r4, &done_restore);
+ __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ bind(&done_restore);
+ }
+ }
+ __ b(&convert);
+ __ bind(&convert_number);
+ __ lfd(d2, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ b(&done_convert);
+ __ bind(&convert_smi);
+ __ SmiToDouble(d2, r3);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (d1) and the next parameter value on the right hand side (d2).
+ Label compare_nan, compare_swap;
+ __ fcmpu(d1, d2);
+ __ bunordered(&compare_nan);
+ __ b(cond_done, &loop);
+ __ b(CommuteCondition(cond_done), &compare_swap);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ TestDoubleIsMinusZero(reg, r7, r8);
+ __ bne(&loop);
+
+ // Update accumulator. Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ fmr(d1, d2);
+ __ mr(r4, r3);
+ __ b(&loop);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ // We still need to visit the rest of the arguments.
+ __ bind(&compare_nan);
+ __ LoadRoot(r4, Heap::kNanValueRootIndex);
+ __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ b(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ mr(r3, r4);
+ __ Drop(r6);
+ __ Ret();
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -230,8 +331,9 @@
__ bind(&new_object);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r5, r4, r6); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(r5); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(r5);
}
__ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
@@ -359,8 +461,9 @@
__ bind(&new_object);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r5, r4, r6); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(r5); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(r5);
}
__ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
@@ -368,24 +471,6 @@
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- r4 : target function (preserved for callee)
- // -- r6 : new target (preserved for callee)
- // -----------------------------------
-
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the target function and the new target.
- // Push function as parameter to the runtime call.
- __ Push(r4, r6, r4);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ Pop(r4, r6);
-}
-
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
@@ -393,9 +478,29 @@
__ JumpToJSEntry(ip);
}
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push the number of arguments to the callee.
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(r3);
+ __ Push(r3, r4, r6, r4);
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ addi(ip, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallRuntime(function_id, 1);
+ __ mr(r5, r3);
+
+ // Restore target function and new target.
+ __ Pop(r3, r4, r6);
+ __ SmiUntag(r3);
+ }
+ __ addi(ip, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
}
@@ -411,8 +516,7 @@
__ cmpl(sp, ip);
__ bge(&ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
@@ -421,7 +525,8 @@
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
@@ -448,142 +553,18 @@
__ SmiTag(r3);
__ Push(r5, r3);
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ CompareObjectType(r6, r8, r7, JS_FUNCTION_TYPE);
- __ bne(&rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // r6: new target
- __ LoadP(r5,
- FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r5, &rt_call);
- __ CompareObjectType(r5, r8, r7, MAP_TYPE);
- __ bne(&rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ LoadP(r8, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
- __ cmp(r4, r8);
- __ bne(&rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r4: constructor function
- // r5: initial map
- // r6: new target
- __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
- __ beq(&rt_call);
-
- // Now allocate the JSObject on the heap.
- // r4: constructor function
- // r5: initial map
- // r6: new target
- __ lbz(r10, FieldMemOperand(r5, Map::kInstanceSizeOffset));
-
- __ Allocate(r10, r7, r10, r9, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r4: constructor function
- // r5: initial map
- // r6: new target
- // r7: JSObject (not HeapObject tagged - the actual address).
- // r10: start of next object
- __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r5, MemOperand(r7, JSObject::kMapOffset));
- __ StoreP(r9, MemOperand(r7, JSObject::kPropertiesOffset));
- __ StoreP(r9, MemOperand(r7, JSObject::kElementsOffset));
- __ addi(r8, r7, Operand(JSObject::kElementsOffset + kPointerSize));
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ addi(r7, r7, Operand(kHeapObjectTag));
-
- // Fill all the in-object properties with the appropriate filler.
- // r7: JSObject (tagged)
- // r8: First in-object property of JSObject (not tagged)
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lwz(r3, bit_field3);
- __ DecodeField<Map::ConstructionCounter>(r11, r3);
- // r11: slack tracking counter
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ blt(&no_inobject_slack_tracking);
- // Decrease generous allocation count.
- __ Add(r3, r3, -(1 << Map::ConstructionCounter::kShift), r0);
- __ stw(r3, bit_field3);
-
- // Allocate object with a slack.
- __ lbz(r3, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
- __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ sub(r3, r10, r3);
- // r3: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(r8, r3);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- __ InitializeFieldsWithFiller(r8, r3, r9);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(r8, r10, r9);
-
- // r11: slack tracking counter value before decreasing.
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ bne(&allocated);
-
- // Push the constructor, new_target and the object to the stack,
- // and then the initial map as an argument to the runtime call.
- __ Push(r4, r6, r7, r5);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(r4, r6, r7);
-
- // Continue with JSObject being successfully allocated
- // r4: constructor function
- // r6: new target
- // r7: JSObject
- __ b(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(r8, r10, r9);
-
- // Continue with JSObject being successfully allocated
- // r4: constructor function
- // r6: new target
- // r7: JSObject
- __ b(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // r4: constructor function
- // r6: new target
- __ bind(&rt_call);
-
- // Push the constructor and new_target twice, second pair as arguments
- // to the runtime call.
- __ Push(r4, r6, r4, r6);
- __ CallRuntime(Runtime::kNewObject);
+ // Allocate the new receiver object.
+ __ Push(r4, r6);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mr(r7, r3);
__ Pop(r4, r6);
- // Receiver for constructor call allocated.
- // r4: constructor function
- // r6: new target
- // r7: JSObject
- __ bind(&allocated);
+ // ----------- S t a t e -------------
+ // -- r4: constructor function
+ // -- r6: new target
+ // -- r7: newly allocated object
+ // -----------------------------------
// Retrieve smi-tagged arguments count from the stack.
__ LoadP(r3, MemOperand(sp));
@@ -680,6 +661,19 @@
// Leave construct frame.
}
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(r3, &dont_throw);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
__ SmiToPtrArrayOffset(r4, r4);
__ add(sp, sp, r4);
__ addi(sp, sp, Operand(kPointerSize));
@@ -691,17 +685,23 @@
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -846,10 +846,8 @@
// o sp: stack pointer
// o lr: return address
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-ppc.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -857,17 +855,23 @@
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r4);
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ push(r6);
-
- // Push zero for bytecode array offset.
- __ li(r3, Operand::Zero());
- __ push(r3);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ Label array_done;
+ Register debug_info = r5;
+ DCHECK(!debug_info.is(r3));
+ __ LoadP(debug_info,
+ FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
+ // Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
+ __ beq(&array_done);
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ bind(&array_done);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -878,6 +882,10 @@
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Push new.target, bytecode array and zero for bytecode array offset.
+ __ li(r3, Operand::Zero());
+ __ Push(r6, kInterpreterBytecodeArrayRegister, r3);
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size (word) from the BytecodeArray object.
@@ -908,23 +916,9 @@
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- __ LoadRoot(r0, Heap::kStackLimitRootIndex);
- __ cmp(sp, r0);
- __ bge(&ok);
- __ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -932,10 +926,9 @@
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
@@ -946,7 +939,9 @@
// and header removal.
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
- __ bkpt(0); // Does not return here.
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
}
@@ -983,7 +978,8 @@
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r5 : the address of the first argument to be pushed. Subsequent
@@ -999,7 +995,9 @@
Generate_InterpreterPushArgs(masm, r5, r6, r7);
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -1028,45 +1026,24 @@
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Save accumulator register and pass the deoptimization type to
- // the runtime system.
- __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
- __ Push(kInterpreterAccumulatorRegister, r4);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts).
- __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ addi(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ LoadP(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
- __ LoadP(r4,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadP(
+ kInterpreterBytecodeArrayRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1094,6 +1071,29 @@
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
+ __ Push(r4);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register.
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1108,22 +1108,32 @@
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ mov(r0,
+ Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+ __ mtlr(r0);
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1346,13 +1356,12 @@
// Load the next prototype.
__ bind(&next_prototype);
- __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
- __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lwz(scratch, FieldMemOperand(map, Map::kBitField3Offset));
- __ DecodeField<Map::IsHiddenPrototype>(scratch, SetRC);
+ __ DecodeField<Map::HasHiddenPrototype>(scratch, SetRC);
__ beq(receiver_check_failed, cr0);
+
+ __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Iterate.
__ b(&prototype_loop_start);
@@ -1868,9 +1877,7 @@
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ LoadP(r5, FieldMemOperand(
- r3, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ LoadP(r5, FieldMemOperand(r3, JSArgumentsObject::kLengthOffset));
__ LoadP(r7, FieldMemOperand(r3, JSObject::kElementsOffset));
__ LoadP(ip, FieldMemOperand(r7, FixedArray::kLengthOffset));
__ cmp(r5, ip);
@@ -1946,10 +1953,138 @@
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ mov(scratch1, Operand(debug_is_active));
+ __ lbz(scratch1, MemOperand(scratch1));
+ __ cmpi(scratch1, Operand::Zero());
+ __ bne(&done);
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ LoadP(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+ __ bne(&no_interpreter_frame);
+ __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_arguments_adaptor);
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mr(fp, scratch2);
+ __ LoadP(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ LoadP(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ scratch1, FieldMemOperand(
+ scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+ __ SmiUntag(scratch1);
+#endif
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch2;
+ __ ShiftLeftImm(dst_reg, scratch1, Operand(kPointerSizeLog2));
+ __ add(dst_reg, fp, dst_reg);
+ __ addi(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = scratch1;
+ __ ShiftLeftImm(src_reg, args_reg, Operand(kPointerSizeLog2));
+ __ add(src_reg, sp, src_reg);
+ // Count receiver argument as well (not included in args_reg).
+ __ addi(src_reg, src_reg, Operand(kPointerSize));
+
+ if (FLAG_debug_code) {
+ __ cmpl(src_reg, dst_reg);
+ __ Check(lt, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ RestoreFrameStateForTailCall();
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch3;
+ Label loop;
+ __ addi(tmp_reg, args_reg, Operand(1)); // +1 for receiver
+ __ mtctr(tmp_reg);
+ __ bind(&loop);
+ __ LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
+ __ StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
+ __ bdnz(&loop);
+
+ // Leave current frame.
+ __ mr(sp, dst_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSFunction)
@@ -2034,6 +2169,10 @@
// -- cp : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r3, r6, r7, r8);
+ }
+
__ LoadWordArith(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
#if !V8_TARGET_ARCH_PPC64
@@ -2094,7 +2233,7 @@
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
}
__ bind(&done);
}
@@ -2138,13 +2277,18 @@
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r4);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r3, r6, r7, r8);
+ }
+
// Patch the receiver to [[BoundThis]].
__ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
@@ -2165,7 +2309,8 @@
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the target to call (can be any Object).
@@ -2175,14 +2320,25 @@
__ JumpIfSmi(r4, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET, eq);
__ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Call]] internal method.
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&non_callable, cr0);
+
__ cmpi(r8, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r3, r6, r7, r8);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ Push(r4);
// Increase the arguments size to include the pushed function and the
@@ -2195,17 +2351,13 @@
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsCallable, r0);
- __ beq(&non_callable, cr0);
// Overwrite the original receiver the (original) target.
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc
index 26fbe98..03c73af 100644
--- a/src/ppc/code-stubs-ppc.cc
+++ b/src/ppc/code-stubs-ppc.cc
@@ -91,9 +91,8 @@
#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond, Strength strength);
+ Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
Register rhs, Label* lhs_not_nan,
Label* slow, bool strict);
@@ -248,7 +247,7 @@
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond, Strength strength) {
+ Condition cond) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r3, r4);
@@ -268,14 +267,6 @@
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
__ beq(slow);
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics, since
- // we need to throw a TypeError. Smis have already been ruled out.
- __ cmpi(r7, Operand(HEAP_NUMBER_TYPE));
- __ beq(&return_equal);
- __ andi(r0, r7, Operand(kIsNotStringMask));
- __ bne(slow, cr0);
- }
} else {
__ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
__ beq(&heap_number);
@@ -289,13 +280,6 @@
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
__ beq(slow);
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics,
- // since we need to throw a TypeError. Smis and heap numbers have
- // already been ruled out.
- __ andi(r0, r7, Operand(kIsNotStringMask));
- __ bne(slow, cr0);
- }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -515,40 +499,49 @@
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register lhs, Register rhs,
Label* possible_strings,
- Label* not_both_strings) {
+ Label* runtime_call) {
DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
// r5 is object type of rhs.
- Label object_test;
+ Label object_test, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ andi(r0, r5, Operand(kIsNotStringMask));
__ bne(&object_test, cr0);
__ andi(r0, r5, Operand(kIsNotInternalizedMask));
__ bne(possible_strings, cr0);
__ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
- __ bge(not_both_strings);
+ __ bge(runtime_call);
__ andi(r0, r6, Operand(kIsNotInternalizedMask));
__ bne(possible_strings, cr0);
- // Both are internalized. We already checked they weren't the same pointer
- // so they are not equal.
- __ li(r3, Operand(NOT_EQUAL));
+ // Both are internalized. We already checked they weren't the same pointer so
+ // they are not equal. Return non-equal by returning the non-zero object
+ // pointer in r3.
__ Ret();
__ bind(&object_test);
- __ cmpi(r5, Operand(FIRST_JS_RECEIVER_TYPE));
- __ blt(not_both_strings);
- __ CompareObjectType(lhs, r5, r6, FIRST_JS_RECEIVER_TYPE);
- __ blt(not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
+ __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
__ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset));
- __ lbz(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ and_(r3, r5, r6);
- __ andi(r3, r3, Operand(1 << Map::kIsUndetectable));
- __ xori(r3, r3, Operand(1 << Map::kIsUndetectable));
+ __ lbz(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
+ __ lbz(r8, FieldMemOperand(r6, Map::kBitFieldOffset));
+ __ andi(r0, r7, Operand(1 << Map::kIsUndetectable));
+ __ bne(&undetectable, cr0);
+ __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
+ __ bne(&return_unequal, cr0);
+
+ __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ blt(runtime_call);
+ __ CompareInstanceType(r6, r6, FIRST_JS_RECEIVER_TYPE);
+ __ blt(runtime_call);
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in r3.
+ __ Ret();
+
+ __ bind(&undetectable);
+ __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
+ __ beq(&return_unequal, cr0);
+ __ li(r3, Operand(EQUAL));
__ Ret();
}
@@ -600,7 +593,7 @@
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -705,11 +698,19 @@
__ bind(&slow);
- __ Push(lhs, rhs);
- // Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(lhs, rhs);
+ __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ }
+ // Turn true into 0 and false into some non-zero value.
+ STATIC_ASSERT(EQUAL == 0);
+ __ LoadRoot(r4, Heap::kTrueValueRootIndex);
+ __ sub(r3, r3, r4);
+ __ Ret();
} else {
+ __ Push(lhs, rhs);
int ncr; // NaN compare result
if (cc == lt || cc == le) {
ncr = GREATER;
@@ -722,8 +723,7 @@
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -942,7 +942,6 @@
__ ConvertIntToDouble(exponent, double_exponent);
// Returning or bailing out.
- Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
@@ -956,7 +955,6 @@
__ stfd(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(r3));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret(2);
} else {
__ mflr(r0);
@@ -973,7 +971,6 @@
__ MovFromFloatResult(double_result);
__ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
@@ -1055,14 +1052,13 @@
// Need at least one extra slot for return address location.
int arg_stack_space = 1;
-// PPC LINUX ABI:
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// Pass buffer for return value on stack if necessary
- if (result_size() > 1) {
- DCHECK_EQ(2, result_size());
- arg_stack_space += 2;
+ bool needs_return_buffer =
+ result_size() > 2 ||
+ (result_size() == 2 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
+ if (needs_return_buffer) {
+ arg_stack_space += result_size();
}
-#endif
__ EnterExitFrame(save_doubles(), arg_stack_space);
@@ -1076,9 +1072,8 @@
// Result returned in registers or stack, depending on result size and ABI.
Register isolate_reg = r5;
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
- if (result_size() > 1) {
- // The return value is 16-byte non-scalar value.
+ if (needs_return_buffer) {
+ // The return value is a non-scalar value.
// Use frame storage reserved by calling function to pass return
// buffer as implicit first argument.
__ mr(r5, r4);
@@ -1086,21 +1081,20 @@
__ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
isolate_reg = r6;
}
-#endif
// Call C built-in.
__ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
Register target = r15;
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
- // Native AIX/PPC64 Linux use a function descriptor.
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
- __ LoadP(ip, MemOperand(r15, 0)); // Instruction address
- target = ip;
-#elif ABI_CALL_VIA_IP
- __ Move(ip, r15);
- target = ip;
-#endif
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // AIX/PPC64BE Linux use a function descriptor.
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
+ __ LoadP(ip, MemOperand(r15, 0)); // Instruction address
+ target = ip;
+ } else if (ABI_CALL_VIA_IP) {
+ __ Move(ip, r15);
+ target = ip;
+ }
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -1112,13 +1106,12 @@
__ Call(target);
__ bind(&after_call);
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// If return value is on the stack, pop it to registers.
- if (result_size() > 1) {
+ if (needs_return_buffer) {
+ if (result_size() > 2) __ LoadP(r5, MemOperand(r3, 2 * kPointerSize));
__ LoadP(r4, MemOperand(r3, kPointerSize));
__ LoadP(r3, MemOperand(r3));
}
-#endif
// Check result for exception sentinel.
Label exception_returned;
@@ -1132,9 +1125,9 @@
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
- __ mov(r5, Operand(pending_exception_address));
- __ LoadP(r5, MemOperand(r5));
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ mov(r6, Operand(pending_exception_address));
+ __ LoadP(r6, MemOperand(r6));
+ __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay);
__ stop("Unexpected pending exception");
@@ -1538,332 +1531,6 @@
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
- DCHECK(r4.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(r3.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(r4, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
- STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
- __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register r3. Use unsigned comparison to get negative
- // check for free.
- __ cmpl(r4, r3);
- __ bge(&slow);
-
- // Read the argument from the stack and return it.
- __ sub(r6, r3, r4);
- __ SmiToPtrArrayOffset(r6, r6);
- __ add(r6, fp, r6);
- __ LoadP(r3, MemOperand(r6, kDisplacement));
- __ blr();
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpl(r4, r3);
- __ bge(&slow);
-
- // Read the argument from the adaptor frame and return it.
- __ sub(r6, r3, r4);
- __ SmiToPtrArrayOffset(r6, r6);
- __ add(r6, r5, r6);
- __ LoadP(r3, MemOperand(r6, kDisplacement));
- __ blr();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(r4);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // r4 : function
- // r5 : number of parameters (tagged)
- // r6 : parameters pointer
-
- DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&runtime);
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r6, r5);
- __ add(r6, r6, r7);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // r4 : function
- // r5 : number of parameters (tagged)
- // r6 : parameters pointer
- // Registers used over whole function:
- // r8 : arguments count (tagged)
- // r9 : mapped parameter count (tagged)
-
- DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ mr(r8, r5);
- __ mr(r9, r5);
- __ b(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r6, r8);
- __ add(r6, r6, r7);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // r8 = argument count (tagged)
- // r9 = parameter count (tagged)
- // Compute the mapped parameter count = min(r5, r8) in r9.
- __ cmp(r5, r8);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(lt, r9, r5, r8);
- } else {
- Label skip;
- __ mr(r9, r5);
- __ blt(&skip);
- __ mr(r9, r8);
- __ bind(&skip);
- }
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ SmiToPtrArrayOffset(r11, r9);
- __ addi(r11, r11, Operand(kParameterMapHeaderSize));
- __ isel(eq, r11, r0, r11);
- } else {
- Label skip2, skip3;
- __ bne(&skip2);
- __ li(r11, Operand::Zero());
- __ b(&skip3);
- __ bind(&skip2);
- __ SmiToPtrArrayOffset(r11, r9);
- __ addi(r11, r11, Operand(kParameterMapHeaderSize));
- __ bind(&skip3);
- }
-
- // 2. Backing store.
- __ SmiToPtrArrayOffset(r7, r8);
- __ add(r11, r11, r7);
- __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
-
- // r3 = address of new object(s) (tagged)
- // r5 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into r4.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ LoadP(r7, NativeContextMemOperand());
- __ cmpi(r9, Operand::Zero());
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadP(r11, MemOperand(r7, kNormalOffset));
- __ LoadP(r7, MemOperand(r7, kAliasedOffset));
- __ isel(eq, r7, r11, r7);
- } else {
- Label skip4, skip5;
- __ bne(&skip4);
- __ LoadP(r7, MemOperand(r7, kNormalOffset));
- __ b(&skip5);
- __ bind(&skip4);
- __ LoadP(r7, MemOperand(r7, kAliasedOffset));
- __ bind(&skip5);
- }
-
- // r3 = address of new object (tagged)
- // r5 = argument count (smi-tagged)
- // r7 = address of arguments map (tagged)
- // r9 = mapped parameter count (tagged)
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ AssertNotSmi(r4);
- const int kCalleeOffset =
- JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
- __ StoreP(r4, FieldMemOperand(r3, kCalleeOffset), r0);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(r8);
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset =
- JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
- __ StoreP(r8, FieldMemOperand(r3, kLengthOffset), r0);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, r7 will point there, otherwise
- // it will point to the backing store.
- __ addi(r7, r3, Operand(Heap::kSloppyArgumentsObjectSize));
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
- // r3 = address of new object (tagged)
- // r5 = argument count (tagged)
- // r7 = address of parameter map or backing store (tagged)
- // r9 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(eq, r4, r7, r4);
- __ beq(&skip_parameter_map);
- } else {
- Label skip6;
- __ bne(&skip6);
- // Move backing store address to r4, because it is
- // expected there when filling in the unmapped arguments.
- __ mr(r4, r7);
- __ b(&skip_parameter_map);
- __ bind(&skip6);
- }
-
- __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
- __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
- __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
- r0);
- __ SmiToPtrArrayOffset(r8, r9);
- __ add(r8, r8, r7);
- __ addi(r8, r8, Operand(kParameterMapHeaderSize));
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
- r0);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop;
- __ mr(r8, r9);
- __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
- __ sub(r11, r11, r9);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ SmiToPtrArrayOffset(r4, r8);
- __ add(r4, r4, r7);
- __ addi(r4, r4, Operand(kParameterMapHeaderSize));
-
- // r4 = address of backing store (tagged)
- // r7 = address of parameter map (tagged)
- // r8 = temporary scratch (a.o., for address calculation)
- // r10 = temporary scratch (a.o., for address calculation)
- // ip = the hole value
- __ SmiUntag(r8);
- __ mtctr(r8);
- __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
- __ add(r10, r4, r8);
- __ add(r8, r7, r8);
- __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-
- __ bind(¶meters_loop);
- __ StorePU(r11, MemOperand(r8, -kPointerSize));
- __ StorePU(ip, MemOperand(r10, -kPointerSize));
- __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
- __ bdnz(¶meters_loop);
-
- // Restore r8 = argument count (tagged).
- __ LoadP(r8, FieldMemOperand(r3, kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // r3 = address of new object (tagged)
- // r4 = address of backing store (tagged)
- // r8 = argument count (tagged)
- // r9 = mapped parameter count (tagged)
- // r11 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
- __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
- __ sub(r11, r8, r9, LeaveOE, SetRC);
- __ Ret(eq, cr0);
-
- Label arguments_loop;
- __ SmiUntag(r11);
- __ mtctr(r11);
-
- __ SmiToPtrArrayOffset(r0, r9);
- __ sub(r6, r6, r0);
- __ add(r11, r4, r0);
- __ addi(r11, r11,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-
- __ bind(&arguments_loop);
- __ LoadPU(r7, MemOperand(r6, -kPointerSize));
- __ StorePU(r7, MemOperand(r11, kPointerSize));
- __ bdnz(&arguments_loop);
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // r8 = argument count (tagged)
- __ bind(&runtime);
- __ Push(r4, r6, r8);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in lr.
Label slow;
@@ -1887,117 +1554,6 @@
}
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // r4 : function
- // r5 : number of parameters (tagged)
- // r6 : parameters pointer
-
- DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label try_allocate, runtime;
- __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r6, r5);
- __ add(r6, r6, r7);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ SmiUntag(r11, r5, SetRC);
- __ beq(&add_arguments_object, cr0);
- __ addi(r11, r11, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ addi(r11, r11, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(r11, r3, r7, r8, &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r7);
-
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r8, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r8, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ AssertSmi(r5);
- __ StoreP(r5,
- FieldMemOperand(r3, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- r0);
-
- // If there are no actual arguments, we're done.
- __ SmiUntag(r9, r5, SetRC);
- __ Ret(eq, cr0);
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize));
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
- __ StoreP(r5, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
-
- // Copy the fixed array slots.
- Label loop;
- // Set up r7 to point just prior to the first array slot.
- __ addi(r7, r7,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ mtctr(r9);
- __ bind(&loop);
- // Pre-decrement r6 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ LoadPU(r8, MemOperand(r6, -kPointerSize));
- // Pre-increment r7 with kPointerSize on each iteration.
- __ StorePU(r8, MemOperand(r7, kPointerSize));
- __ bdnz(&loop);
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // r5 : number of parameters (tagged)
- // r6 : parameters pointer
- // r7 : rest parameter index (tagged)
-
- Label runtime;
- __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r8, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ LoadP(r5, MemOperand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r0, r5);
- __ add(r6, r8, r0);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ Push(r5, r6, r7);
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2085,48 +1641,49 @@
__ LoadP(subject, MemOperand(sp, kSubjectOffset));
__ JumpIfSmi(subject, &runtime);
__ mr(r6, subject); // Make a copy of the original subject string.
- __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
// subject: subject string
// r6: subject string
- // r3: subject string instance type
// regexp_data: RegExp data (FixedArray)
// Handle subject string according to its encoding and representation:
- // (1) Sequential string? If yes, go to (5).
- // (2) Anything but sequential or cons? If yes, go to (6).
- // (3) Cons string. If the string is flat, replace subject with first string.
- // Otherwise bailout.
- // (4) Is subject external? If yes, go to (7).
- // (5) Sequential string. Load regexp code according to encoding.
+ // (1) Sequential string? If yes, go to (4).
+ // (2) Sequential or cons? If not, go to (5).
+ // (3) Cons string. If the string is flat, replace subject with first string
+ // and go to (1). Otherwise bail out to runtime.
+ // (4) Sequential string. Load regexp code according to encoding.
// (E) Carry on.
/// [...]
// Deferred code at the end of the stub:
- // (6) Not a long external string? If yes, go to (8).
- // (7) External string. Make it, offset-wise, look like a sequential string.
- // Go to (5).
- // (8) Short external string or not a string? If yes, bail out to runtime.
- // (9) Sliced string. Replace subject with parent. Go to (4).
+ // (5) Long external string? If not, go to (7).
+ // (6) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (4).
+ // (7) Short external string or not a string? If yes, bail out to runtime.
+ // (8) Sliced string. Replace subject with parent. Go to (1).
- Label seq_string /* 5 */, external_string /* 7 */, check_underlying /* 4 */,
- not_seq_nor_cons /* 6 */, not_long_external /* 8 */;
+ Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
+ not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
- // (1) Sequential string? If yes, go to (5).
+ __ bind(&check_underlying);
+ __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+
+ // (1) Sequential string? If yes, go to (4).
+
STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
kShortExternalStringMask) == 0x93);
__ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ beq(&seq_string, cr0); // Go to (5).
+ __ beq(&seq_string, cr0); // Go to (4).
- // (2) Anything but sequential or cons? If yes, go to (6).
+ // (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
STATIC_ASSERT(kExternalStringTag < 0xffffu);
__ cmpi(r4, Operand(kExternalStringTag));
- __ bge(¬_seq_nor_cons); // Go to (6).
+ __ bge(¬_seq_nor_cons); // Go to (5).
// (3) Cons string. Check that it's flat.
// Replace subject with first string and reload instance type.
@@ -2134,20 +1691,9 @@
__ CompareRoot(r3, Heap::kempty_stringRootIndex);
__ bne(&runtime);
__ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+ __ b(&check_underlying);
- // (4) Is subject external? If yes, go to (7).
- __ bind(&check_underlying);
- __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSeqStringTag == 0);
- STATIC_ASSERT(kStringRepresentationMask == 3);
- __ andi(r0, r3, Operand(kStringRepresentationMask));
- // The underlying external string is never a short external string.
- STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
- __ bne(&external_string, cr0); // Go to (7).
-
- // (5) Sequential string. Load regexp code according to encoding.
+ // (4) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
// subject: sequential subject string (or look-alike, external string)
// r6: original subject string
@@ -2255,16 +1801,6 @@
// Locate the code entry and call it.
__ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-#if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR)
- // Even Simulated AIX/PPC64 Linux uses a function descriptor for the
- // RegExp routine. Extract the instruction address here since
- // DirectCEntryStub::GenerateCall will not do it for calls out to
- // what it thinks is C code compiled for the simulator/host
- // platform.
- __ LoadP(code, MemOperand(code, 0)); // Instruction address
-#endif
-
DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, code);
@@ -2390,12 +1926,12 @@
__ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
- // (6) Not a long external string? If yes, go to (8).
+ // (5) Long external string? If not, go to (7).
__ bind(¬_seq_nor_cons);
// Compare flags are still set.
- __ bgt(¬_long_external); // Go to (8).
+ __ bgt(¬_long_external); // Go to (7).
- // (7) External string. Make it, offset-wise, look like a sequential string.
+ // (6) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
@@ -2412,15 +1948,15 @@
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subi(subject, subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ b(&seq_string); // Go to (5).
+ __ b(&seq_string); // Go to (4).
- // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (7) Short external string or not a string? If yes, bail out to runtime.
__ bind(¬_long_external);
STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
__ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
__ bne(&runtime, cr0);
- // (9) Sliced string. Replace subject with parent. Go to (4).
+ // (8) Sliced string. Replace subject with parent. Go to (4).
// Load offset into r11 and replace subject string with parent.
__ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ SmiUntag(r11);
@@ -2658,7 +2194,8 @@
__ bind(&call_function);
__ mov(r3, Operand(argc));
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
@@ -2696,7 +2233,7 @@
__ bind(&call);
__ mov(r3, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -3246,6 +2783,37 @@
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes one argument in r3.
+ Label is_number;
+ __ JumpIfSmi(r3, &is_number);
+
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CompareObjectType(r3, r4, r4, LAST_NAME_TYPE);
+ // r3: receiver
+ // r4: receiver instance type
+ __ Ret(le);
+
+ Label not_heap_number;
+ __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
+ __ bne(¬_heap_number);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(¬_heap_number);
+
+ Label not_oddball;
+ __ cmpi(r4, Operand(ODDBALL_TYPE));
+ __ bne(¬_oddball);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(¬_oddball);
+
+ __ push(r3); // Push argument.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3438,18 +3006,14 @@
__ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
- __ AssertSmi(r4);
- __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
- __ AssertSmi(r3);
- }
- __ sub(r3, r4, r3);
- __ Ret();
+ if (!Token::IsEqualityOp(op())) {
+ __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
+ __ AssertSmi(r4);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+ __ AssertSmi(r3);
}
+ __ sub(r3, r4, r3);
+ __ Ret();
__ bind(&miss);
GenerateMiss(masm);
@@ -3547,7 +3111,7 @@
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3770,8 +3334,6 @@
if (Token::IsEqualityOp(op())) {
__ sub(r3, r3, r4);
__ Ret();
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
@@ -3820,15 +3382,15 @@
void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
- // Native AIX/PPC64 Linux use a function descriptor.
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
- __ LoadP(ip, MemOperand(target, 0)); // Instruction address
-#else
- // ip needs to be set for DirectCEentryStub::Generate, and also
- // for ABI_CALL_VIA_IP.
- __ Move(ip, target);
-#endif
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // AIX/PPC64BE Linux use a function descriptor.
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+ __ LoadP(ip, MemOperand(target, 0)); // Instruction address
+ } else {
+ // ip needs to be set for DirectCEentryStub::Generate, and also
+ // for ABI_CALL_VIA_IP.
+ __ Move(ip, target);
+ }
intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
__ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
@@ -4142,9 +3704,8 @@
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
regs_.scratch0(), &dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(), regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE, ne,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -4791,34 +4352,32 @@
#if !defined(USE_SIMULATOR)
uintptr_t entry_hook =
reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ ExternalReference entry_hook = ExternalReference(
+ &dispatcher, ExternalReference::BUILTIN_CALL, isolate());
+
+ // It additionally takes an isolate as a third parameter
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+#endif
+
__ mov(ip, Operand(entry_hook));
-#if ABI_USES_FUNCTION_DESCRIPTORS
- // Function descriptor
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
- __ LoadP(ip, MemOperand(ip, 0));
-#elif ABI_CALL_VIA_IP
-// ip set above, so nothing to do.
-#endif
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
+ __ LoadP(ip, MemOperand(ip, 0));
+ }
+ // ip set above, so nothing more to do for ABI_CALL_VIA_IP.
// PPC LINUX ABI:
__ li(r0, Operand::Zero());
__ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- // It additionally takes an isolate as a third parameter
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ mov(ip, Operand(ExternalReference(
- &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
-#endif
__ Call(ip);
-#if !defined(USE_SIMULATOR)
__ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
-#endif
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
@@ -5143,6 +4702,633 @@
GenerateCase(masm, FAST_ELEMENTS);
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : target
+ // -- r6 : new target
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r4);
+ __ AssertReceiver(r6);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ CompareObjectType(r6, r5, r5, JS_FUNCTION_TYPE);
+ __ bne(&new_object);
+
+ // Load the initial map and verify that it's in fact a map.
+ __ LoadP(r5, FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r5, &new_object);
+ __ CompareObjectType(r5, r3, r3, MAP_TYPE);
+ __ bne(&new_object);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ LoadP(r3, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r3, r4);
+ __ bne(&new_object);
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ lbz(r7, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+ __ Allocate(r7, r3, r8, r9, &allocate, SIZE_IN_WORDS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ StoreP(r5, MemOperand(r3, JSObject::kMapOffset));
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r6, MemOperand(r3, JSObject::kPropertiesOffset));
+ __ StoreP(r6, MemOperand(r3, JSObject::kElementsOffset));
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ addi(r4, r3, Operand(JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- r3 : result (untagged)
+ // -- r4 : result fields (untagged)
+ // -- r8 : result end (untagged)
+ // -- r5 : initial map
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ lwz(r6, FieldMemOperand(r5, Map::kBitField3Offset));
+ __ DecodeField<Map::ConstructionCounter>(r10, r6, SetRC);
+ __ bne(&slack_tracking, cr0);
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(r4, r8, r9);
+
+ // Add the object tag to make the JSObject real.
+ __ addi(r3, r3, Operand(kHeapObjectTag));
+ __ Ret();
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ Add(r6, r6, -(1 << Map::ConstructionCounter::kShift), r0);
+ __ stw(r6, FieldMemOperand(r5, Map::kBitField3Offset));
+
+ // Initialize the in-object fields with undefined.
+ __ lbz(r7, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
+ __ ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
+ __ sub(r7, r8, r7);
+ __ InitializeFieldsWithFiller(r4, r7, r9);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(r4, r8, r9);
+
+ // Add the object tag to make the JSObject real.
+ __ addi(r3, r3, Operand(kHeapObjectTag));
+
+ // Check if we can finalize the instance size.
+ __ cmpi(r10, Operand(Map::kSlackTrackingCounterEnd));
+ __ Ret(ne);
+
+ // Finalize the instance size.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r3, r5);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(r3);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ ShiftLeftImm(r7, r7,
+ Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
+ __ Push(r5, r7);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(r5);
+ }
+ __ subi(r3, r3, Operand(kHeapObjectTag));
+ __ lbz(r8, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+ __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
+ __ add(r8, r3, r8);
+ __ b(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ Push(r4, r6);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r4);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make r5 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mr(r5, fp);
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+ __ cmp(ip, r4);
+ __ bne(&loop);
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_rest_parameters);
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(r4);
+#endif
+ __ sub(r3, r3, r4, LeaveOE, SetRC);
+ __ bgt(&rest_parameters, cr0);
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in r0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
+ __ StoreP(r4, FieldMemOperand(r3, JSArray::kMapOffset), r0);
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r4, FieldMemOperand(r3, JSArray::kPropertiesOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r3, JSArray::kElementsOffset), r0);
+ __ li(r4, Operand::Zero());
+ __ StoreP(r4, FieldMemOperand(r3, JSArray::kLengthOffset), r0);
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ b(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ SmiToPtrArrayOffset(r9, r3);
+ __ add(r5, r5, r9);
+ __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- r3 : number of rest parameters (tagged)
+ // -- r5 : pointer just past first rest parameters
+ // -- r9 : size of rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ mov(r4, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ add(r4, r4, r9);
+ __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in r6.
+ __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+ __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+ __ addi(r7, r6,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ {
+ Label loop;
+ __ SmiUntag(r0, r3);
+ __ mtctr(r0);
+ __ bind(&loop);
+ __ LoadPU(ip, MemOperand(r5, -kPointerSize));
+ __ StorePU(ip, MemOperand(r7, kPointerSize));
+ __ bdnz(&loop);
+ __ addi(r7, r7, Operand(kPointerSize));
+ }
+
+ // Setup the rest parameter array in r7.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
+ __ StoreP(r4, MemOperand(r7, JSArray::kMapOffset));
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r4, MemOperand(r7, JSArray::kPropertiesOffset));
+ __ StoreP(r6, MemOperand(r7, JSArray::kElementsOffset));
+ __ StoreP(r3, MemOperand(r7, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ addi(r3, r7, Operand(kHeapObjectTag));
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r4);
+ __ Push(r3, r5, r4);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mr(r6, r3);
+ __ Pop(r3, r5);
+ }
+ __ b(&done_allocate);
+ }
+}
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r4);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(r5);
+#endif
+ __ SmiToPtrArrayOffset(r6, r5);
+ __ add(r6, fp, r6);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // r4 : function
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
+ // Registers used over whole function:
+ // r8 : arguments count (tagged)
+ // r9 : mapped parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ mr(r8, r5);
+ __ mr(r9, r5);
+ __ b(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r6, r8);
+ __ add(r6, r6, r7);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // r8 = argument count (tagged)
+ // r9 = parameter count (tagged)
+ // Compute the mapped parameter count = min(r5, r8) in r9.
+ __ cmp(r5, r8);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(lt, r9, r5, r8);
+ } else {
+ Label skip;
+ __ mr(r9, r5);
+ __ blt(&skip);
+ __ mr(r9, r8);
+ __ bind(&skip);
+ }
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ SmiToPtrArrayOffset(r11, r9);
+ __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+ __ isel(eq, r11, r0, r11);
+ } else {
+ Label skip2, skip3;
+ __ bne(&skip2);
+ __ li(r11, Operand::Zero());
+ __ b(&skip3);
+ __ bind(&skip2);
+ __ SmiToPtrArrayOffset(r11, r9);
+ __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+ __ bind(&skip3);
+ }
+
+ // 2. Backing store.
+ __ SmiToPtrArrayOffset(r7, r8);
+ __ add(r11, r11, r7);
+ __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
+
+ // r3 = address of new object(s) (tagged)
+ // r5 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into r4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ LoadP(r7, NativeContextMemOperand());
+ __ cmpi(r9, Operand::Zero());
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadP(r11, MemOperand(r7, kNormalOffset));
+ __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+ __ isel(eq, r7, r11, r7);
+ } else {
+ Label skip4, skip5;
+ __ bne(&skip4);
+ __ LoadP(r7, MemOperand(r7, kNormalOffset));
+ __ b(&skip5);
+ __ bind(&skip4);
+ __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+ __ bind(&skip5);
+ }
+
+ // r3 = address of new object (tagged)
+ // r5 = argument count (smi-tagged)
+ // r7 = address of arguments map (tagged)
+ // r9 = mapped parameter count (tagged)
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
+ __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(r4);
+ __ StoreP(r4, FieldMemOperand(r3, JSSloppyArgumentsObject::kCalleeOffset),
+ r0);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(r8);
+ __ StoreP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset),
+ r0);
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, r7 will point there, otherwise
+ // it will point to the backing store.
+ __ addi(r7, r3, Operand(JSSloppyArgumentsObject::kSize));
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+ // r3 = address of new object (tagged)
+ // r5 = argument count (tagged)
+ // r7 = address of parameter map or backing store (tagged)
+ // r9 = mapped parameter count (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(eq, r4, r7, r4);
+ __ beq(&skip_parameter_map);
+ } else {
+ Label skip6;
+ __ bne(&skip6);
+ // Move backing store address to r4, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mr(r4, r7);
+ __ b(&skip_parameter_map);
+ __ bind(&skip6);
+ }
+
+ __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+ __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
+ __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
+ r0);
+ __ SmiToPtrArrayOffset(r8, r9);
+ __ add(r8, r8, r7);
+ __ addi(r8, r8, Operand(kParameterMapHeaderSize));
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
+ r0);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop;
+ __ mr(r8, r9);
+ __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
+ __ sub(r11, r11, r9);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ SmiToPtrArrayOffset(r4, r8);
+ __ add(r4, r4, r7);
+ __ addi(r4, r4, Operand(kParameterMapHeaderSize));
+
+ // r4 = address of backing store (tagged)
+ // r7 = address of parameter map (tagged)
+ // r8 = temporary scratch (a.o., for address calculation)
+ // r10 = temporary scratch (a.o., for address calculation)
+ // ip = the hole value
+ __ SmiUntag(r8);
+ __ mtctr(r8);
+ __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
+ __ add(r10, r4, r8);
+ __ add(r8, r7, r8);
+ __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+
+ __ bind(¶meters_loop);
+ __ StorePU(r11, MemOperand(r8, -kPointerSize));
+ __ StorePU(ip, MemOperand(r10, -kPointerSize));
+ __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
+ __ bdnz(¶meters_loop);
+
+ // Restore r8 = argument count (tagged).
+ __ LoadP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset));
+
+ __ bind(&skip_parameter_map);
+ // r3 = address of new object (tagged)
+ // r4 = address of backing store (tagged)
+ // r8 = argument count (tagged)
+ // r9 = mapped parameter count (tagged)
+ // r11 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
+ __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
+ __ sub(r11, r8, r9, LeaveOE, SetRC);
+ __ Ret(eq, cr0);
+
+ Label arguments_loop;
+ __ SmiUntag(r11);
+ __ mtctr(r11);
+
+ __ SmiToPtrArrayOffset(r0, r9);
+ __ sub(r6, r6, r0);
+ __ add(r11, r4, r0);
+ __ addi(r11, r11,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+
+ __ bind(&arguments_loop);
+ __ LoadPU(r7, MemOperand(r6, -kPointerSize));
+ __ StorePU(r7, MemOperand(r11, kPointerSize));
+ __ bdnz(&arguments_loop);
+
+ // Return.
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // r8 = argument count (tagged)
+ __ bind(&runtime);
+ __ Push(r4, r6, r8);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r4);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make r5 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mr(r5, fp);
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+ __ cmp(ip, r4);
+ __ bne(&loop);
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r6, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&arguments_adaptor);
+ {
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r3,
+ FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(r3);
+#endif
+ __ SmiToPtrArrayOffset(r9, r3);
+ __ add(r5, r5, r9);
+ }
+ __ b(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r9, r3);
+ __ add(r5, r6, r9);
+ }
+ __ bind(&arguments_done);
+ __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- r3 : number of rest parameters (tagged)
+ // -- r5 : pointer just past first rest parameters
+ // -- r9 : size of rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ mov(r4, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ add(r4, r4, r9);
+ __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in r6.
+ __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+ __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+ __ addi(r7, r6,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ {
+ Label loop, done_loop;
+ __ SmiUntag(r0, r3, SetRC);
+ __ beq(&done_loop, cr0);
+ __ mtctr(r0);
+ __ bind(&loop);
+ __ LoadPU(ip, MemOperand(r5, -kPointerSize));
+ __ StorePU(ip, MemOperand(r7, kPointerSize));
+ __ bdnz(&loop);
+ __ bind(&done_loop);
+ __ addi(r7, r7, Operand(kPointerSize));
+ }
+
+ // Setup the rest parameter array in r7.
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
+ __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kMapOffset));
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kPropertiesOffset));
+ __ StoreP(r6, MemOperand(r7, JSStrictArgumentsObject::kElementsOffset));
+ __ StoreP(r3, MemOperand(r7, JSStrictArgumentsObject::kLengthOffset));
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ addi(r3, r7, Operand(kHeapObjectTag));
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r4);
+ __ Push(r3, r5, r4);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mr(r6, r3);
+ __ Pop(r3, r5);
+ }
+ __ b(&done_allocate);
+}
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context = cp;
@@ -5446,11 +5632,10 @@
__ b(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- r3 : callee
// -- r7 : call_data
@@ -5482,12 +5667,14 @@
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || r3.is(argc.reg()));
+ DCHECK(argc.is_immediate() || r6.is(argc.reg()));
// context save
__ push(context);
- // load context from callee
- __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // load context from callee
+ __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
// callee
__ push(callee);
@@ -5586,7 +5773,7 @@
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(r6), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5594,24 +5781,32 @@
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- sp[0] : name
+ // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- r5 : api_function_address
+ // -- r5 : api_function_address
// -----------------------------------
Register api_function_address = ApiGetterDescriptor::function_address();
+ int arg0Slot = 0;
+ int accessorInfoSlot = 0;
+ int apiStackSpace = 0;
DCHECK(api_function_address.is(r5));
- __ mr(r3, sp); // r0 = Handle<Name>
- __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = PCA
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mr(r3, sp); // r3 = Handle<Name>
+ __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = v8::PCI::args_
// If ABI passes Handles (pointer-sized struct) in a register:
//
@@ -5625,37 +5820,38 @@
// [0] space for DirectCEntryStub's LR save
// [1] copy of Handle (first arg)
// [2] AccessorInfo&
-#if ABI_PASSES_HANDLES_IN_REGS
- const int kAccessorInfoSlot = kStackFrameExtraParamSlot + 1;
- const int kApiStackSpace = 2;
-#else
- const int kArg0Slot = kStackFrameExtraParamSlot + 1;
- const int kAccessorInfoSlot = kArg0Slot + 1;
- const int kApiStackSpace = 3;
-#endif
+ if (ABI_PASSES_HANDLES_IN_REGS) {
+ accessorInfoSlot = kStackFrameExtraParamSlot + 1;
+ apiStackSpace = 2;
+ } else {
+ arg0Slot = kStackFrameExtraParamSlot + 1;
+ accessorInfoSlot = arg0Slot + 1;
+ apiStackSpace = 3;
+ }
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
+ __ EnterExitFrame(false, apiStackSpace);
-#if !ABI_PASSES_HANDLES_IN_REGS
- // pass 1st arg by reference
- __ StoreP(r3, MemOperand(sp, kArg0Slot * kPointerSize));
- __ addi(r3, sp, Operand(kArg0Slot * kPointerSize));
-#endif
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ // pass 1st arg by reference
+ __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize));
+ __ addi(r3, sp, Operand(arg0Slot * kPointerSize));
+ }
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // r4 (internal::Object** args_) as the data.
- __ StoreP(r4, MemOperand(sp, kAccessorInfoSlot * kPointerSize));
- // r4 = AccessorInfo&
- __ addi(r4, sp, Operand(kAccessorInfoSlot * kPointerSize));
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize));
+ __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize));
+ // r4 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, NULL,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ kStackUnwindSpace, NULL, return_value_operand, NULL);
}
diff --git a/src/ppc/codegen-ppc.cc b/src/ppc/codegen-ppc.cc
index 2bf8b4e..d6d86b0 100644
--- a/src/ppc/codegen-ppc.cc
+++ b/src/ppc/codegen-ppc.cc
@@ -58,9 +58,7 @@
CodeDesc desc;
masm.GetCode(&desc);
-#if !ABI_USES_FUNCTION_DESCRIPTORS
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-#endif
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
@@ -96,9 +94,7 @@
CodeDesc desc;
masm.GetCode(&desc);
-#if !ABI_USES_FUNCTION_DESCRIPTORS
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-#endif
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
diff --git a/src/ppc/cpu-ppc.cc b/src/ppc/cpu-ppc.cc
index a42fa53..91ea400 100644
--- a/src/ppc/cpu-ppc.cc
+++ b/src/ppc/cpu-ppc.cc
@@ -25,7 +25,7 @@
return;
}
- const int kCacheLineSize = CpuFeatures::cache_line_size();
+ const int kCacheLineSize = CpuFeatures::icache_line_size();
intptr_t mask = kCacheLineSize - 1;
byte *start =
reinterpret_cast<byte *>(reinterpret_cast<intptr_t>(buffer) & ~mask);
diff --git a/src/ppc/deoptimizer-ppc.cc b/src/ppc/deoptimizer-ppc.cc
index 4232342..9ec5cdd 100644
--- a/src/ppc/deoptimizer-ppc.cc
+++ b/src/ppc/deoptimizer-ppc.cc
@@ -88,31 +88,6 @@
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
- // We ensure the values are Smis to avoid confusing the garbage
- // collector in the event that any values are retreived and stored
- // elsewhere.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, reinterpret_cast<intptr_t>(Smi::FromInt(i)));
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(
- i, reinterpret_cast<intptr_t>(Memory::Address_at(tos + i)));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@@ -131,8 +106,7 @@
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on PPC in the input frame.
return false;
}
diff --git a/src/ppc/disasm-ppc.cc b/src/ppc/disasm-ppc.cc
index d9450f8..e72658f 100644
--- a/src/ppc/disasm-ppc.cc
+++ b/src/ppc/disasm-ppc.cc
@@ -1073,14 +1073,12 @@
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
instr->InstructionBits());
-#if ABI_USES_FUNCTION_DESCRIPTORS
- // The first field will be identified as a jump table entry. We emit the rest
- // of the structure as zero, so just skip past them.
- if (instr->InstructionBits() == 0) {
+ if (ABI_USES_FUNCTION_DESCRIPTORS && instr->InstructionBits() == 0) {
+ // The first field will be identified as a jump table entry. We
+ // emit the rest of the structure as zero, so just skip past them.
Format(instr, "constant");
return Instruction::kInstrSize;
}
-#endif
switch (instr->OpcodeValue() << 26) {
case TWI: {
diff --git a/src/ppc/interface-descriptors-ppc.cc b/src/ppc/interface-descriptors-ppc.cc
index b649f71..3db7bd5 100644
--- a/src/ppc/interface-descriptors-ppc.cc
+++ b/src/ppc/interface-descriptors-ppc.cc
@@ -54,20 +54,6 @@
const Register StringCompareDescriptor::RightRegister() { return r3; }
-const Register ArgumentsAccessReadDescriptor::index() { return r4; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return r3; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return r4; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return r5; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r6; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return r5; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return r6; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return r7; }
-
-
const Register ApiGetterDescriptor::function_address() { return r5; }
@@ -96,6 +82,29 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4, r6};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -113,6 +122,10 @@
// static
+const Register ToNameDescriptor::ReceiverRegister() { return r3; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return r3; }
@@ -165,13 +178,6 @@
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r6, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4};
@@ -406,6 +412,14 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -417,7 +431,6 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -429,7 +442,6 @@
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/src/ppc/macro-assembler-ppc.cc b/src/ppc/macro-assembler-ppc.cc
index 9cd35ab..14759de 100644
--- a/src/ppc/macro-assembler-ppc.cc
+++ b/src/ppc/macro-assembler-ppc.cc
@@ -183,6 +183,10 @@
}
}
+void MacroAssembler::Drop(Register count, Register scratch) {
+ ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
+ add(sp, sp, scratch);
+}
void MacroAssembler::Call(Label* target) { b(target, SetLK); }
@@ -298,13 +302,10 @@
void MacroAssembler::InNewSpace(Register object, Register scratch,
Condition cond, Label* branch) {
- // N.B. scratch may be same register as object
DCHECK(cond == eq || cond == ne);
- mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
- and_(scratch, object, r0);
- mov(r0, Operand(ExternalReference::new_space_start(isolate())));
- cmp(scratch, r0);
- b(cond, branch);
+ const int mask =
+ (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+ CheckPageFlag(object, scratch, mask, cond, branch);
}
@@ -483,6 +484,68 @@
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(js_function.is(r4));
+ DCHECK(code_entry.is(r7));
+ DCHECK(scratch.is(r8));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ addi(scratch, js_function, Operand(offset - kHeapObjectTag));
+ LoadP(ip, MemOperand(scratch));
+ cmp(ip, code_entry);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ const Register dst = scratch;
+ addi(dst, js_function, Operand(offset - kHeapObjectTag));
+
+ // Save caller-saved registers. js_function and code_entry are in the
+ // caller-saved register list.
+ DCHECK(kJSCallerSaved & js_function.bit());
+ DCHECK(kJSCallerSaved & code_entry.bit());
+ mflr(r0);
+ MultiPush(kJSCallerSaved | r0.bit());
+
+ int argument_count = 3;
+ PrepareCallCFunction(argument_count, code_entry);
+
+ mr(r3, js_function);
+ mr(r4, dst);
+ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers (including js_function and code_entry).
+ MultiPop(kJSCallerSaved | r0.bit());
+ mtlr(r0);
+
+ bind(&done);
+}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address, Register scratch,
@@ -564,6 +627,16 @@
mtlr(r0);
}
+void MacroAssembler::RestoreFrameStateForTailCall() {
+ if (FLAG_enable_embedded_constant_pool) {
+ LoadP(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ set_constant_pool_available(false);
+ }
+ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ mtlr(r0);
+}
const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
const int MacroAssembler::kNumSafepointSavedRegisters =
@@ -640,28 +713,27 @@
fsub(dst, src, kDoubleRegZero);
}
-
-void MacroAssembler::ConvertIntToDouble(Register src,
- DoubleRegister double_dst) {
- MovIntToDouble(double_dst, src, r0);
- fcfid(double_dst, double_dst);
+void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
+ MovIntToDouble(dst, src, r0);
+ fcfid(dst, dst);
}
-
void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
- DoubleRegister double_dst) {
- MovUnsignedIntToDouble(double_dst, src, r0);
- fcfid(double_dst, double_dst);
+ DoubleRegister dst) {
+ MovUnsignedIntToDouble(dst, src, r0);
+ fcfid(dst, dst);
}
-
-void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
- const Register src,
- const Register int_scratch) {
- MovIntToDouble(dst, src, int_scratch);
+void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
+ MovIntToDouble(dst, src, r0);
fcfids(dst, dst);
}
+void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
+ DoubleRegister dst) {
+ MovUnsignedIntToDouble(dst, src, r0);
+ fcfids(dst, dst);
+}
#if V8_TARGET_ARCH_PPC64
void MacroAssembler::ConvertInt64ToDouble(Register src,
@@ -1116,7 +1188,7 @@
Push(new_target);
}
Push(fun, fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -2114,6 +2186,41 @@
TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
}
+void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
+ Register scratch1,
+ Register scratch2) {
+#if V8_TARGET_ARCH_PPC64
+ MovDoubleToInt64(scratch1, input);
+ rotldi(scratch1, scratch1, 1);
+ cmpi(scratch1, Operand(1));
+#else
+ MovDoubleToInt64(scratch1, scratch2, input);
+ Label done;
+ cmpi(scratch2, Operand::Zero());
+ bne(&done);
+ lis(scratch2, Operand(SIGN_EXT_IMM16(0x8000)));
+ cmp(scratch1, scratch2);
+ bind(&done);
+#endif
+}
+
+void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+ MovDoubleToInt64(scratch, input);
+#else
+ MovDoubleHighToInt(scratch, input);
+#endif
+ cmpi(scratch, Operand::Zero());
+}
+
+void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+ LoadP(scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
+#else
+ lwz(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset));
+#endif
+ cmpi(scratch, Operand::Zero());
+}
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DoubleRegister double_input,
@@ -2335,18 +2442,6 @@
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- LoadNativeContextSlot(native_context_index, r4);
- InvokeFunctionCode(r4, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2441,9 +2536,9 @@
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// will not return here
}
@@ -2656,6 +2751,18 @@
}
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
+ push(object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
+ pop(object);
+ Check(ge, kOperandIsNotAReceiver);
+ }
+}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
@@ -3087,20 +3194,21 @@
int num_reg_arguments,
int num_double_arguments) {
DCHECK(has_frame());
-// Just call directly. The function called cannot cause a GC, or
-// allow preemption, so the return address in the link register
-// stays correct.
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
Register dest = function;
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
- // AIX uses a function descriptor. When calling C code be aware
- // of this descriptor and pick up values from it
- LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
- LoadP(ip, MemOperand(function, 0));
- dest = ip;
-#elif ABI_CALL_VIA_IP
- Move(ip, function);
- dest = ip;
-#endif
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
+ // aware of this descriptor and pick up values from it
+ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
+ LoadP(ip, MemOperand(function, 0));
+ dest = ip;
+ } else if (ABI_CALL_VIA_IP) {
+ Move(ip, function);
+ dest = ip;
+ }
Call(dest);
@@ -3116,41 +3224,6 @@
}
-void MacroAssembler::FlushICache(Register address, size_t size,
- Register scratch) {
- if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
- sync();
- icbi(r0, address);
- isync();
- return;
- }
-
- Label done;
-
- dcbf(r0, address);
- sync();
- icbi(r0, address);
- isync();
-
- // This code handles ranges which cross a single cacheline boundary.
- // scratch is last cacheline which intersects range.
- const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
-
- DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
- addi(scratch, address, Operand(size - 1));
- ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
- cmpl(scratch, address);
- ble(&done);
-
- dcbf(r0, scratch);
- sync();
- icbi(r0, scratch);
- isync();
-
- bind(&done);
-}
-
-
void MacroAssembler::DecodeConstantPoolOffset(Register result,
Register location) {
Label overflow_access, done;
@@ -3386,7 +3459,8 @@
}
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Register null_value = r8;
Register empty_fixed_array_value = r9;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Label next, start;
@@ -3400,6 +3474,7 @@
CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
beq(call_runtime);
+ LoadRoot(null_value, Heap::kNullValueRootIndex);
b(&start);
bind(&next);
diff --git a/src/ppc/macro-assembler-ppc.h b/src/ppc/macro-assembler-ppc.h
index 78de89a..d9dbd56 100644
--- a/src/ppc/macro-assembler-ppc.h
+++ b/src/ppc/macro-assembler-ppc.h
@@ -16,6 +16,7 @@
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_r3};
const Register kReturnRegister1 = {Register::kCode_r4};
+const Register kReturnRegister2 = {Register::kCode_r5};
const Register kJSFunctionRegister = {Register::kCode_r4};
const Register kContextRegister = {Register::kCode_r30};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
@@ -146,6 +147,7 @@
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count);
+ void Drop(Register count, Register scratch = r0);
void Ret(int drop) {
Drop(drop);
@@ -161,6 +163,7 @@
}
// Register move. May do nothing if the registers are identical.
+ void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
@@ -200,13 +203,13 @@
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, ne, branch);
+ InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, eq, branch);
+ InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
@@ -248,6 +251,11 @@
pointers_to_here_check_for_value);
}
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
void RecordWriteForMap(Register object, Register map, Register dst,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
@@ -341,6 +349,10 @@
void PushFixedFrame(Register marker_reg = no_reg);
void PopFixedFrame(Register marker_reg = no_reg);
+ // Restore caller's frame pointer and return address prior to being
+ // overwritten by tail call stack preparation.
+ void RestoreFrameStateForTailCall();
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -364,18 +376,20 @@
}
// Converts the integer (untagged smi) in |src| to a double, storing
- // the result to |double_dst|
- void ConvertIntToDouble(Register src, DoubleRegister double_dst);
+ // the result to |dst|
+ void ConvertIntToDouble(Register src, DoubleRegister dst);
// Converts the unsigned integer (untagged smi) in |src| to
- // a double, storing the result to |double_dst|
- void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst);
+ // a double, storing the result to |dst|
+ void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
// Converts the integer (untagged smi) in |src| to
// a float, storing the result in |dst|
- // Warning: The value in |int_scrach| will be changed in the process!
- void ConvertIntToFloat(const DoubleRegister dst, const Register src,
- const Register int_scratch);
+ void ConvertIntToFloat(Register src, DoubleRegister dst);
+
+ // Converts the unsigned integer (untagged smi) in |src| to
+ // a float, storing the result in |dst|
+ void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
#if V8_TARGET_ARCH_PPC64
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
@@ -858,6 +872,16 @@
void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
Register scratch2, DoubleRegister double_scratch);
+ // Check if a double is equal to -0.0.
+ // CR_EQ in cr7 holds the result.
+ void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
+ Register scratch2);
+
+ // Check the sign of a double.
+ // CR_LT in cr7 holds the result.
+ void TestDoubleSign(DoubleRegister input, Register scratch);
+ void TestHeapNumberSign(Register input, Register scratch);
+
// Try to convert a double to a signed 32-bit integer.
// CR_EQ in cr7 is set and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
@@ -1004,10 +1028,6 @@
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
@@ -1332,6 +1352,9 @@
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1446,9 +1469,9 @@
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
- // Expects object in r0 and returns map with validated enum cache
- // in r0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
+ // Expects object in r3 and returns map with validated enum cache
+ // in r3. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
diff --git a/src/ppc/simulator-ppc.cc b/src/ppc/simulator-ppc.cc
index 0efa660..9a1f9e0 100644
--- a/src/ppc/simulator-ppc.cc
+++ b/src/ppc/simulator-ppc.cc
@@ -15,6 +15,7 @@
#include "src/ppc/constants-ppc.h"
#include "src/ppc/frames-ppc.h"
#include "src/ppc/simulator-ppc.h"
+#include "src/runtime/runtime-utils.h"
#if defined(USE_SIMULATOR)
@@ -446,7 +447,8 @@
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
intptr_t value = *cur;
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ if (((value & 1) == 0) ||
+ current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", PlatformSmiTagging::SmiToInt(obj));
@@ -855,10 +857,19 @@
isolate->simulator_i_cache(),
reinterpret_cast<void*>(&swi_instruction_), Instruction::kInstrSize);
isolate->set_simulator_redirection(this);
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ function_descriptor_[0] = reinterpret_cast<intptr_t>(&swi_instruction_);
+ function_descriptor_[1] = 0;
+ function_descriptor_[2] = 0;
+ }
}
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
+ void* address() {
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ return reinterpret_cast<void*>(function_descriptor_);
+ } else {
+ return reinterpret_cast<void*>(&swi_instruction_);
+ }
}
void* external_function() { return external_function_; }
@@ -883,9 +894,16 @@
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
+ static Redirection* FromAddress(void* address) {
+ int delta = ABI_USES_FUNCTION_DESCRIPTORS
+ ? offsetof(Redirection, function_descriptor_)
+ : offsetof(Redirection, swi_instruction_);
+ char* addr_of_redirection = reinterpret_cast<char*>(address) - delta;
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
static void* ReverseRedirection(intptr_t reg) {
- Redirection* redirection = FromSwiInstruction(
- reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ Redirection* redirection = FromAddress(reinterpret_cast<void*>(reg));
return redirection->external_function();
}
@@ -902,6 +920,7 @@
uint32_t swi_instruction_;
ExternalReference::Type type_;
Redirection* next_;
+ intptr_t function_descriptor_[3];
};
@@ -922,7 +941,7 @@
void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_swi_instruction();
+ return redirection->address();
}
@@ -1171,20 +1190,11 @@
#if V8_TARGET_ARCH_PPC64
-struct ObjectPair {
- intptr_t x;
- intptr_t y;
-};
-
-
static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
- *x = pair->x;
- *y = pair->y;
+ *x = reinterpret_cast<intptr_t>(pair->x);
+ *y = reinterpret_cast<intptr_t>(pair->y);
}
#else
-typedef uint64_t ObjectPair;
-
-
static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
#if V8_TARGET_BIG_ENDIAN
*x = static_cast<int32_t>(*pair >> 32);
@@ -1196,16 +1206,17 @@
}
#endif
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in
-// runtime.cc uses the ObjectPair which is essentially two pointer
-// values stuffed into a structure. With the code below we assume that
-// all runtime calls return this pair. If they don't, the r4 result
-// register contains a bogus value, which is fine because it is
-// caller-saved.
-typedef ObjectPair (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+// Calls into the V8 runtime.
+typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4,
+ intptr_t arg5);
// These prototypes handle the four types of FP calls.
typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
@@ -1237,13 +1248,15 @@
Redirection* redirection = Redirection::FromSwiInstruction(instr);
const int kArgCount = 6;
int arg0_regnum = 3;
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
intptr_t result_buffer = 0;
- if (redirection->type() == ExternalReference::BUILTIN_OBJECTPAIR_CALL) {
+ bool uses_result_buffer =
+ redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE ||
+ (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR &&
+ !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
+ if (uses_result_buffer) {
result_buffer = get_register(r3);
arg0_regnum++;
}
-#endif
intptr_t arg[kArgCount];
for (int i = 0; i < kArgCount; i++) {
arg[i] = get_register(arg0_regnum + i);
@@ -1389,9 +1402,9 @@
CHECK(stack_aligned);
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
-#if !ABI_PASSES_HANDLES_IN_REGS
- arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
-#endif
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ }
target(arg[0], arg[1]);
} else if (redirection->type() ==
ExternalReference::PROFILING_GETTER_CALL) {
@@ -1408,9 +1421,9 @@
CHECK(stack_aligned);
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
-#if !ABI_PASSES_HANDLES_IN_REGS
- arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
-#endif
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ }
target(arg[0], arg[1], Redirection::ReverseRedirection(arg[2]));
} else {
// builtin call.
@@ -1430,19 +1443,53 @@
PrintF("\n");
}
CHECK(stack_aligned);
- DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- ObjectPair result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- intptr_t x;
- intptr_t y;
- decodeObjectPair(&result, &x, &y);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
+ if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+ SimulatorRuntimeTripleCall target =
+ reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+ ObjectTriple result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ "}\n",
+ reinterpret_cast<intptr_t>(result.x),
+ reinterpret_cast<intptr_t>(result.y),
+ reinterpret_cast<intptr_t>(result.z));
+ }
+ memcpy(reinterpret_cast<void*>(result_buffer), &result,
+ sizeof(ObjectTriple));
+ set_register(r3, result_buffer);
+ } else {
+ if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
+ SimulatorRuntimePairCall target =
+ reinterpret_cast<SimulatorRuntimePairCall>(external);
+ ObjectPair result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ intptr_t x;
+ intptr_t y;
+ decodeObjectPair(&result, &x, &y);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
+ }
+ if (ABI_RETURNS_OBJECT_PAIRS_IN_REGS) {
+ set_register(r3, x);
+ set_register(r4, y);
+ } else {
+ memcpy(reinterpret_cast<void*>(result_buffer), &result,
+ sizeof(ObjectPair));
+ set_register(r3, result_buffer);
+ }
+ } else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ intptr_t result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" V8PRIxPTR "\n", result);
+ }
+ set_register(r3, result);
+ }
}
- set_register(r3, x);
- set_register(r4, y);
}
set_pc(saved_lr);
break;
@@ -3852,17 +3899,19 @@
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
-// Prepare to execute the code at entry
-#if ABI_USES_FUNCTION_DESCRIPTORS
- // entry is the function descriptor
- set_pc(*(reinterpret_cast<intptr_t*>(entry)));
-#else
- // entry is the instruction address
- set_pc(reinterpret_cast<intptr_t>(entry));
-#endif
+ // Prepare to execute the code at entry
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // entry is the function descriptor
+ set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+ } else {
+ // entry is the instruction address
+ set_pc(reinterpret_cast<intptr_t>(entry));
+ }
- // Put target address in ip (for JS prologue).
- set_register(r12, get_pc());
+ if (ABI_CALL_VIA_IP) {
+ // Put target address in ip (for JS prologue).
+ set_register(r12, get_pc());
+ }
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
@@ -3919,8 +3968,12 @@
Execute();
// Check that the non-volatile registers have been preserved.
- CHECK_EQ(callee_saved_value, get_register(r2));
- CHECK_EQ(callee_saved_value, get_register(r13));
+ if (ABI_TOC_REGISTER != 2) {
+ CHECK_EQ(callee_saved_value, get_register(r2));
+ }
+ if (ABI_TOC_REGISTER != 13) {
+ CHECK_EQ(callee_saved_value, get_register(r13));
+ }
CHECK_EQ(callee_saved_value, get_register(r14));
CHECK_EQ(callee_saved_value, get_register(r15));
CHECK_EQ(callee_saved_value, get_register(r16));