blob: 6098c37c4409267082f45ef412a4b527eb795851 [file] [log] [blame]
Ben Murdochda12d292016-06-02 14:46:10 +01001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_S390
6
7#include "src/code-stubs.h"
8#include "src/api-arguments.h"
9#include "src/base/bits.h"
10#include "src/bootstrapper.h"
11#include "src/codegen.h"
12#include "src/ic/handler-compiler.h"
13#include "src/ic/ic.h"
14#include "src/ic/stub-cache.h"
15#include "src/isolate.h"
16#include "src/regexp/jsregexp.h"
17#include "src/regexp/regexp-macro-assembler.h"
18#include "src/runtime/runtime.h"
19#include "src/s390/code-stubs-s390.h"
20
21namespace v8 {
22namespace internal {
23
Ben Murdoch61f157c2016-09-16 13:49:30 +010024#define __ ACCESS_MASM(masm)
Ben Murdochda12d292016-06-02 14:46:10 +010025
Ben Murdoch61f157c2016-09-16 13:49:30 +010026void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
27 __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
28 __ StoreP(r3, MemOperand(sp, r1));
29 __ push(r3);
30 __ push(r4);
31 __ AddP(r2, r2, Operand(3));
32 __ TailCallRuntime(Runtime::kNewArray);
Ben Murdochda12d292016-06-02 14:46:10 +010033}
34
Ben Murdochda12d292016-06-02 14:46:10 +010035void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
36 Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
37 descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
38}
39
Ben Murdoch61f157c2016-09-16 13:49:30 +010040void FastFunctionBindStub::InitializeDescriptor(
Ben Murdochda12d292016-06-02 14:46:10 +010041 CodeStubDescriptor* descriptor) {
Ben Murdoch61f157c2016-09-16 13:49:30 +010042 Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
43 descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
Ben Murdochda12d292016-06-02 14:46:10 +010044}
45
Ben Murdochda12d292016-06-02 14:46:10 +010046static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
47 Condition cond);
48static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
49 Register rhs, Label* lhs_not_nan,
50 Label* slow, bool strict);
51static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
52 Register rhs);
53
54void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
55 ExternalReference miss) {
56 // Update the static counter each time a new code stub is generated.
57 isolate()->counters()->code_stubs()->Increment();
58
59 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
60 int param_count = descriptor.GetRegisterParameterCount();
61 {
62 // Call the runtime system in a fresh internal frame.
63 FrameScope scope(masm, StackFrame::INTERNAL);
64 DCHECK(param_count == 0 ||
65 r2.is(descriptor.GetRegisterParameter(param_count - 1)));
66 // Push arguments
67 for (int i = 0; i < param_count; ++i) {
68 __ push(descriptor.GetRegisterParameter(i));
69 }
70 __ CallExternalReference(miss, param_count);
71 }
72
73 __ Ret();
74}
75
76void DoubleToIStub::Generate(MacroAssembler* masm) {
77 Label out_of_range, only_low, negate, done, fastpath_done;
78 Register input_reg = source();
79 Register result_reg = destination();
80 DCHECK(is_truncating());
81
82 int double_offset = offset();
83
84 // Immediate values for this stub fit in instructions, so it's safe to use ip.
85 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
86 Register scratch_low =
87 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
88 Register scratch_high =
89 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
90 DoubleRegister double_scratch = kScratchDoubleReg;
91
92 __ push(scratch);
93 // Account for saved regs if input is sp.
94 if (input_reg.is(sp)) double_offset += kPointerSize;
95
96 if (!skip_fastpath()) {
97 // Load double input.
98 __ LoadDouble(double_scratch, MemOperand(input_reg, double_offset));
99
100 // Do fast-path convert from double to int.
101 __ ConvertDoubleToInt64(double_scratch,
102#if !V8_TARGET_ARCH_S390X
103 scratch,
104#endif
105 result_reg, d0);
106
107// Test for overflow
108#if V8_TARGET_ARCH_S390X
109 __ TestIfInt32(result_reg, r0);
110#else
111 __ TestIfInt32(scratch, result_reg, r0);
112#endif
113 __ beq(&fastpath_done, Label::kNear);
114 }
115
116 __ Push(scratch_high, scratch_low);
117 // Account for saved regs if input is sp.
118 if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
119
120 __ LoadlW(scratch_high,
121 MemOperand(input_reg, double_offset + Register::kExponentOffset));
122 __ LoadlW(scratch_low,
123 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
124
125 __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
126 // Load scratch with exponent - 1. This is faster than loading
127 // with exponent because Bias + 1 = 1024 which is a *S390* immediate value.
128 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
129 __ SubP(scratch, Operand(HeapNumber::kExponentBias + 1));
130 // If exponent is greater than or equal to 84, the 32 less significant
131 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
132 // the result is 0.
133 // Compare exponent with 84 (compare exponent - 1 with 83).
134 __ CmpP(scratch, Operand(83));
135 __ bge(&out_of_range, Label::kNear);
136
137 // If we reach this code, 31 <= exponent <= 83.
138 // So, we don't have to handle cases where 0 <= exponent <= 20 for
139 // which we would need to shift right the high part of the mantissa.
140 // Scratch contains exponent - 1.
141 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
142 __ Load(r0, Operand(51));
143 __ SubP(scratch, r0, scratch);
144 __ CmpP(scratch, Operand::Zero());
145 __ ble(&only_low, Label::kNear);
146 // 21 <= exponent <= 51, shift scratch_low and scratch_high
147 // to generate the result.
148 __ ShiftRight(scratch_low, scratch_low, scratch);
149 // Scratch contains: 52 - exponent.
150 // We needs: exponent - 20.
151 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
152 __ Load(r0, Operand(32));
153 __ SubP(scratch, r0, scratch);
154 __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
155 // Set the implicit 1 before the mantissa part in scratch_high.
156 STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
157 __ Load(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
158 __ ShiftLeftP(r0, r0, Operand(16));
159 __ OrP(result_reg, result_reg, r0);
160 __ ShiftLeft(r0, result_reg, scratch);
161 __ OrP(result_reg, scratch_low, r0);
162 __ b(&negate, Label::kNear);
163
164 __ bind(&out_of_range);
165 __ mov(result_reg, Operand::Zero());
166 __ b(&done, Label::kNear);
167
168 __ bind(&only_low);
169 // 52 <= exponent <= 83, shift only scratch_low.
170 // On entry, scratch contains: 52 - exponent.
171 __ LoadComplementRR(scratch, scratch);
172 __ ShiftLeft(result_reg, scratch_low, scratch);
173
174 __ bind(&negate);
175 // If input was positive, scratch_high ASR 31 equals 0 and
176 // scratch_high LSR 31 equals zero.
177 // New result = (result eor 0) + 0 = result.
178 // If the input was negative, we have to negate the result.
179 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
180 // New result = (result eor 0xffffffff) + 1 = 0 - result.
181 __ ShiftRightArith(r0, scratch_high, Operand(31));
182#if V8_TARGET_ARCH_S390X
183 __ lgfr(r0, r0);
184 __ ShiftRightP(r0, r0, Operand(32));
185#endif
186 __ XorP(result_reg, r0);
187 __ ShiftRight(r0, scratch_high, Operand(31));
188 __ AddP(result_reg, r0);
189
190 __ bind(&done);
191 __ Pop(scratch_high, scratch_low);
192
193 __ bind(&fastpath_done);
194 __ pop(scratch);
195
196 __ Ret();
197}
198
199// Handle the case where the lhs and rhs are the same object.
200// Equality is almost reflexive (everything but NaN), so this is a test
201// for "identity and not NaN".
202static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
203 Condition cond) {
204 Label not_identical;
205 Label heap_number, return_equal;
206 __ CmpP(r2, r3);
207 __ bne(&not_identical);
208
209 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
210 // so we do the second best thing - test it ourselves.
211 // They are both equal and they are not both Smis so both of them are not
212 // Smis. If it's not a heap number, then return equal.
213 if (cond == lt || cond == gt) {
214 // Call runtime on identical JSObjects.
215 __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE);
216 __ bge(slow);
217 // Call runtime on identical symbols since we need to throw a TypeError.
218 __ CmpP(r6, Operand(SYMBOL_TYPE));
219 __ beq(slow);
220 // Call runtime on identical SIMD values since we must throw a TypeError.
221 __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
222 __ beq(slow);
223 } else {
224 __ CompareObjectType(r2, r6, r6, HEAP_NUMBER_TYPE);
225 __ beq(&heap_number);
226 // Comparing JS objects with <=, >= is complicated.
227 if (cond != eq) {
228 __ CmpP(r6, Operand(FIRST_JS_RECEIVER_TYPE));
229 __ bge(slow);
230 // Call runtime on identical symbols since we need to throw a TypeError.
231 __ CmpP(r6, Operand(SYMBOL_TYPE));
232 __ beq(slow);
233 // Call runtime on identical SIMD values since we must throw a TypeError.
234 __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
235 __ beq(slow);
236 // Normally here we fall through to return_equal, but undefined is
237 // special: (undefined == undefined) == true, but
238 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
239 if (cond == le || cond == ge) {
240 __ CmpP(r6, Operand(ODDBALL_TYPE));
241 __ bne(&return_equal);
242 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
243 __ bne(&return_equal);
244 if (cond == le) {
245 // undefined <= undefined should fail.
246 __ LoadImmP(r2, Operand(GREATER));
247 } else {
248 // undefined >= undefined should fail.
249 __ LoadImmP(r2, Operand(LESS));
250 }
251 __ Ret();
252 }
253 }
254 }
255
256 __ bind(&return_equal);
257 if (cond == lt) {
258 __ LoadImmP(r2, Operand(GREATER)); // Things aren't less than themselves.
259 } else if (cond == gt) {
260 __ LoadImmP(r2, Operand(LESS)); // Things aren't greater than themselves.
261 } else {
262 __ LoadImmP(r2, Operand(EQUAL)); // Things are <=, >=, ==, === themselves
263 }
264 __ Ret();
265
266 // For less and greater we don't have to check for NaN since the result of
267 // x < x is false regardless. For the others here is some code to check
268 // for NaN.
269 if (cond != lt && cond != gt) {
270 __ bind(&heap_number);
271 // It is a heap number, so return non-equal if it's NaN and equal if it's
272 // not NaN.
273
274 // The representation of NaN values has all exponent bits (52..62) set,
275 // and not all mantissa bits (0..51) clear.
276 // Read top bits of double representation (second word of value).
277 __ LoadlW(r4, FieldMemOperand(r2, HeapNumber::kExponentOffset));
278 // Test that exponent bits are all set.
279 STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
280 __ ExtractBitMask(r5, r4, HeapNumber::kExponentMask);
281 __ CmpLogicalP(r5, Operand(0x7ff));
282 __ bne(&return_equal);
283
284 // Shift out flag and all exponent bits, retaining only mantissa.
285 __ sll(r4, Operand(HeapNumber::kNonMantissaBitsInTopWord));
286 // Or with all low-bits of mantissa.
287 __ LoadlW(r5, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
288 __ OrP(r2, r5, r4);
289 __ CmpP(r2, Operand::Zero());
290 // For equal we already have the right value in r2: Return zero (equal)
291 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
292 // not (it's a NaN). For <= and >= we need to load r0 with the failing
293 // value if it's a NaN.
294 if (cond != eq) {
295 Label not_equal;
296 __ bne(&not_equal, Label::kNear);
297 // All-zero means Infinity means equal.
298 __ Ret();
299 __ bind(&not_equal);
300 if (cond == le) {
301 __ LoadImmP(r2, Operand(GREATER)); // NaN <= NaN should fail.
302 } else {
303 __ LoadImmP(r2, Operand(LESS)); // NaN >= NaN should fail.
304 }
305 }
306 __ Ret();
307 }
308 // No fall through here.
309
310 __ bind(&not_identical);
311}
312
313// See comment at call site.
314static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
315 Register rhs, Label* lhs_not_nan,
316 Label* slow, bool strict) {
317 DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
318
319 Label rhs_is_smi;
320 __ JumpIfSmi(rhs, &rhs_is_smi);
321
322 // Lhs is a Smi. Check whether the rhs is a heap number.
323 __ CompareObjectType(rhs, r5, r6, HEAP_NUMBER_TYPE);
324 if (strict) {
325 // If rhs is not a number and lhs is a Smi then strict equality cannot
326 // succeed. Return non-equal
327 // If rhs is r2 then there is already a non zero value in it.
328 Label skip;
329 __ beq(&skip, Label::kNear);
330 if (!rhs.is(r2)) {
331 __ mov(r2, Operand(NOT_EQUAL));
332 }
333 __ Ret();
334 __ bind(&skip);
335 } else {
336 // Smi compared non-strictly with a non-Smi non-heap-number. Call
337 // the runtime.
338 __ bne(slow);
339 }
340
341 // Lhs is a smi, rhs is a number.
342 // Convert lhs to a double in d7.
343 __ SmiToDouble(d7, lhs);
344 // Load the double from rhs, tagged HeapNumber r2, to d6.
345 __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
346
347 // We now have both loaded as doubles but we can skip the lhs nan check
348 // since it's a smi.
349 __ b(lhs_not_nan);
350
351 __ bind(&rhs_is_smi);
352 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
353 __ CompareObjectType(lhs, r6, r6, HEAP_NUMBER_TYPE);
354 if (strict) {
355 // If lhs is not a number and rhs is a smi then strict equality cannot
356 // succeed. Return non-equal.
357 // If lhs is r2 then there is already a non zero value in it.
358 Label skip;
359 __ beq(&skip, Label::kNear);
360 if (!lhs.is(r2)) {
361 __ mov(r2, Operand(NOT_EQUAL));
362 }
363 __ Ret();
364 __ bind(&skip);
365 } else {
366 // Smi compared non-strictly with a non-smi non-heap-number. Call
367 // the runtime.
368 __ bne(slow);
369 }
370
371 // Rhs is a smi, lhs is a heap number.
372 // Load the double from lhs, tagged HeapNumber r3, to d7.
373 __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
374 // Convert rhs to a double in d6.
375 __ SmiToDouble(d6, rhs);
376 // Fall through to both_loaded_as_doubles.
377}
378
379// See comment at call site.
380static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
381 Register rhs) {
382 DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
383
384 // If either operand is a JS object or an oddball value, then they are
385 // not equal since their pointers are different.
386 // There is no test for undetectability in strict equality.
387 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
388 Label first_non_object;
389 // Get the type of the first operand into r4 and compare it with
390 // FIRST_JS_RECEIVER_TYPE.
391 __ CompareObjectType(rhs, r4, r4, FIRST_JS_RECEIVER_TYPE);
392 __ blt(&first_non_object, Label::kNear);
393
394 // Return non-zero (r2 is not zero)
395 Label return_not_equal;
396 __ bind(&return_not_equal);
397 __ Ret();
398
399 __ bind(&first_non_object);
400 // Check for oddballs: true, false, null, undefined.
401 __ CmpP(r4, Operand(ODDBALL_TYPE));
402 __ beq(&return_not_equal);
403
404 __ CompareObjectType(lhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
405 __ bge(&return_not_equal);
406
407 // Check for oddballs: true, false, null, undefined.
408 __ CmpP(r5, Operand(ODDBALL_TYPE));
409 __ beq(&return_not_equal);
410
411 // Now that we have the types we might as well check for
412 // internalized-internalized.
413 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
414 __ OrP(r4, r4, r5);
415 __ AndP(r0, r4, Operand(kIsNotStringMask | kIsNotInternalizedMask));
416 __ beq(&return_not_equal);
417}
418
419// See comment at call site.
420static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
421 Register rhs,
422 Label* both_loaded_as_doubles,
423 Label* not_heap_numbers, Label* slow) {
424 DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
425
426 __ CompareObjectType(rhs, r5, r4, HEAP_NUMBER_TYPE);
427 __ bne(not_heap_numbers);
428 __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
429 __ CmpP(r4, r5);
430 __ bne(slow); // First was a heap number, second wasn't. Go slow case.
431
432 // Both are heap numbers. Load them up then jump to the code we have
433 // for that.
434 __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
435 __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
436
437 __ b(both_loaded_as_doubles);
438}
439
440// Fast negative check for internalized-to-internalized equality or receiver
441// equality. Also handles the undetectable receiver to null/undefined
442// comparison.
443static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
444 Register lhs, Register rhs,
445 Label* possible_strings,
446 Label* runtime_call) {
447 DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
448
449 // r4 is object type of rhs.
450 Label object_test, return_equal, return_unequal, undetectable;
451 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
452 __ mov(r0, Operand(kIsNotStringMask));
453 __ AndP(r0, r4);
454 __ bne(&object_test, Label::kNear);
455 __ mov(r0, Operand(kIsNotInternalizedMask));
456 __ AndP(r0, r4);
457 __ bne(possible_strings);
458 __ CompareObjectType(lhs, r5, r5, FIRST_NONSTRING_TYPE);
459 __ bge(runtime_call);
460 __ mov(r0, Operand(kIsNotInternalizedMask));
461 __ AndP(r0, r5);
462 __ bne(possible_strings);
463
464 // Both are internalized. We already checked they weren't the same pointer so
465 // they are not equal. Return non-equal by returning the non-zero object
466 // pointer in r2.
467 __ Ret();
468
469 __ bind(&object_test);
470 __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
471 __ LoadP(r5, FieldMemOperand(rhs, HeapObject::kMapOffset));
472 __ LoadlB(r6, FieldMemOperand(r4, Map::kBitFieldOffset));
473 __ LoadlB(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
474 __ AndP(r0, r6, Operand(1 << Map::kIsUndetectable));
475 __ bne(&undetectable);
476 __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
477 __ bne(&return_unequal);
478
479 __ CompareInstanceType(r4, r4, FIRST_JS_RECEIVER_TYPE);
480 __ blt(runtime_call);
481 __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
482 __ blt(runtime_call);
483
484 __ bind(&return_unequal);
485 // Return non-equal by returning the non-zero object pointer in r2.
486 __ Ret();
487
488 __ bind(&undetectable);
489 __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
490 __ beq(&return_unequal);
491
492 // If both sides are JSReceivers, then the result is false according to
493 // the HTML specification, which says that only comparisons with null or
494 // undefined are affected by special casing for document.all.
495 __ CompareInstanceType(r4, r4, ODDBALL_TYPE);
496 __ beq(&return_equal);
497 __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
498 __ bne(&return_unequal);
499
500 __ bind(&return_equal);
501 __ LoadImmP(r2, Operand(EQUAL));
502 __ Ret();
503}
504
505static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
506 Register scratch,
507 CompareICState::State expected,
508 Label* fail) {
509 Label ok;
510 if (expected == CompareICState::SMI) {
511 __ JumpIfNotSmi(input, fail);
512 } else if (expected == CompareICState::NUMBER) {
513 __ JumpIfSmi(input, &ok);
514 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
515 DONT_DO_SMI_CHECK);
516 }
517 // We could be strict about internalized/non-internalized here, but as long as
518 // hydrogen doesn't care, the stub doesn't have to care either.
519 __ bind(&ok);
520}
521
522// On entry r3 and r4 are the values to be compared.
523// On exit r2 is 0, positive or negative to indicate the result of
524// the comparison.
525void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
526 Register lhs = r3;
527 Register rhs = r2;
528 Condition cc = GetCondition();
529
530 Label miss;
531 CompareICStub_CheckInputType(masm, lhs, r4, left(), &miss);
532 CompareICStub_CheckInputType(masm, rhs, r5, right(), &miss);
533
534 Label slow; // Call builtin.
535 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
536
537 Label not_two_smis, smi_done;
538 __ OrP(r4, r3, r2);
539 __ JumpIfNotSmi(r4, &not_two_smis);
540 __ SmiUntag(r3);
541 __ SmiUntag(r2);
542 __ SubP(r2, r3, r2);
543 __ Ret();
544 __ bind(&not_two_smis);
545
546 // NOTICE! This code is only reached after a smi-fast-case check, so
547 // it is certain that at least one operand isn't a smi.
548
549 // Handle the case where the objects are identical. Either returns the answer
550 // or goes to slow. Only falls through if the objects were not identical.
551 EmitIdenticalObjectComparison(masm, &slow, cc);
552
553 // If either is a Smi (we know that not both are), then they can only
554 // be strictly equal if the other is a HeapNumber.
555 STATIC_ASSERT(kSmiTag == 0);
556 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
557 __ AndP(r4, lhs, rhs);
558 __ JumpIfNotSmi(r4, &not_smis);
559 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
560 // 1) Return the answer.
561 // 2) Go to slow.
562 // 3) Fall through to both_loaded_as_doubles.
563 // 4) Jump to lhs_not_nan.
564 // In cases 3 and 4 we have found out we were dealing with a number-number
565 // comparison. The double values of the numbers have been loaded
566 // into d7 and d6.
567 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
568
569 __ bind(&both_loaded_as_doubles);
570 // The arguments have been converted to doubles and stored in d6 and d7
571 __ bind(&lhs_not_nan);
572 Label no_nan;
573 __ cdbr(d7, d6);
574
575 Label nan, equal, less_than;
576 __ bunordered(&nan);
577 __ beq(&equal, Label::kNear);
578 __ blt(&less_than, Label::kNear);
579 __ LoadImmP(r2, Operand(GREATER));
580 __ Ret();
581 __ bind(&equal);
582 __ LoadImmP(r2, Operand(EQUAL));
583 __ Ret();
584 __ bind(&less_than);
585 __ LoadImmP(r2, Operand(LESS));
586 __ Ret();
587
588 __ bind(&nan);
589 // If one of the sides was a NaN then the v flag is set. Load r2 with
590 // whatever it takes to make the comparison fail, since comparisons with NaN
591 // always fail.
592 if (cc == lt || cc == le) {
593 __ LoadImmP(r2, Operand(GREATER));
594 } else {
595 __ LoadImmP(r2, Operand(LESS));
596 }
597 __ Ret();
598
599 __ bind(&not_smis);
600 // At this point we know we are dealing with two different objects,
601 // and neither of them is a Smi. The objects are in rhs_ and lhs_.
602 if (strict()) {
603 // This returns non-equal for some object types, or falls through if it
604 // was not lucky.
605 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
606 }
607
608 Label check_for_internalized_strings;
609 Label flat_string_check;
610 // Check for heap-number-heap-number comparison. Can jump to slow case,
611 // or load both doubles into r2, r3, r4, r5 and jump to the code that handles
612 // that case. If the inputs are not doubles then jumps to
613 // check_for_internalized_strings.
614 // In this case r4 will contain the type of rhs_. Never falls through.
615 EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
616 &check_for_internalized_strings,
617 &flat_string_check);
618
619 __ bind(&check_for_internalized_strings);
620 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
621 // internalized strings.
622 if (cc == eq && !strict()) {
623 // Returns an answer for two internalized strings or two detectable objects.
624 // Otherwise jumps to string case or not both strings case.
625 // Assumes that r4 is the type of rhs_ on entry.
626 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
627 &slow);
628 }
629
630 // Check for both being sequential one-byte strings,
631 // and inline if that is the case.
632 __ bind(&flat_string_check);
633
634 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r4, r5, &slow);
635
636 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
637 r5);
638 if (cc == eq) {
639 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r4, r5);
640 } else {
641 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r4, r5, r6);
642 }
643 // Never falls through to here.
644
645 __ bind(&slow);
646
647 if (cc == eq) {
648 {
649 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
650 __ Push(lhs, rhs);
651 __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
652 }
653 // Turn true into 0 and false into some non-zero value.
654 STATIC_ASSERT(EQUAL == 0);
655 __ LoadRoot(r3, Heap::kTrueValueRootIndex);
656 __ SubP(r2, r2, r3);
657 __ Ret();
658 } else {
659 __ Push(lhs, rhs);
660 int ncr; // NaN compare result
661 if (cc == lt || cc == le) {
662 ncr = GREATER;
663 } else {
664 DCHECK(cc == gt || cc == ge); // remaining cases
665 ncr = LESS;
666 }
667 __ LoadSmiLiteral(r2, Smi::FromInt(ncr));
668 __ push(r2);
669
670 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
671 // tagged as a small integer.
672 __ TailCallRuntime(Runtime::kCompare);
673 }
674
675 __ bind(&miss);
676 GenerateMiss(masm);
677}
678
679void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
680 // We don't allow a GC during a store buffer overflow so there is no need to
681 // store the registers in any particular way, but we do have to store and
682 // restore them.
683 __ MultiPush(kJSCallerSaved | r14.bit());
684 if (save_doubles()) {
685 __ MultiPushDoubles(kCallerSavedDoubles);
686 }
687 const int argument_count = 1;
688 const int fp_argument_count = 0;
689 const Register scratch = r3;
690
691 AllowExternalCallThatCantCauseGC scope(masm);
692 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
693 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
694 __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
695 argument_count);
696 if (save_doubles()) {
697 __ MultiPopDoubles(kCallerSavedDoubles);
698 }
699 __ MultiPop(kJSCallerSaved | r14.bit());
700 __ Ret();
701}
702
703void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
704 __ PushSafepointRegisters();
705 __ b(r14);
706}
707
708void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
709 __ PopSafepointRegisters();
710 __ b(r14);
711}
712
713void MathPowStub::Generate(MacroAssembler* masm) {
714 const Register base = r3;
715 const Register exponent = MathPowTaggedDescriptor::exponent();
716 DCHECK(exponent.is(r4));
717 const Register heapnumbermap = r7;
718 const Register heapnumber = r2;
719 const DoubleRegister double_base = d1;
720 const DoubleRegister double_exponent = d2;
721 const DoubleRegister double_result = d3;
722 const DoubleRegister double_scratch = d0;
723 const Register scratch = r1;
724 const Register scratch2 = r9;
725
726 Label call_runtime, done, int_exponent;
727 if (exponent_type() == ON_STACK) {
728 Label base_is_smi, unpack_exponent;
729 // The exponent and base are supplied as arguments on the stack.
730 // This can only happen if the stub is called from non-optimized code.
731 // Load input parameters from stack to double registers.
732 __ LoadP(base, MemOperand(sp, 1 * kPointerSize));
733 __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
734
735 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
736
737 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
738 __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
739 __ CmpP(scratch, heapnumbermap);
740 __ bne(&call_runtime);
741
742 __ LoadDouble(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
743 __ b(&unpack_exponent, Label::kNear);
744
745 __ bind(&base_is_smi);
746 __ ConvertIntToDouble(scratch, double_base);
747 __ bind(&unpack_exponent);
748
749 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
750 __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
751 __ CmpP(scratch, heapnumbermap);
752 __ bne(&call_runtime);
753
754 __ LoadDouble(double_exponent,
755 FieldMemOperand(exponent, HeapNumber::kValueOffset));
756 } else if (exponent_type() == TAGGED) {
757 // Base is already in double_base.
758 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
759
760 __ LoadDouble(double_exponent,
761 FieldMemOperand(exponent, HeapNumber::kValueOffset));
762 }
763
764 if (exponent_type() != INTEGER) {
765 // Detect integer exponents stored as double.
766 __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
767 double_scratch);
768 __ beq(&int_exponent, Label::kNear);
769
770 if (exponent_type() == ON_STACK) {
771 // Detect square root case. Crankshaft detects constant +/-0.5 at
772 // compile time and uses DoMathPowHalf instead. We then skip this check
773 // for non-constant cases of +/-0.5 as these hardly occur.
774 Label not_plus_half, not_minus_inf1, not_minus_inf2;
775
776 // Test for 0.5.
777 __ LoadDoubleLiteral(double_scratch, 0.5, scratch);
778 __ cdbr(double_exponent, double_scratch);
779 __ bne(&not_plus_half, Label::kNear);
780
781 // Calculates square root of base. Check for the special case of
782 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
783 __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
784 __ cdbr(double_base, double_scratch);
785 __ bne(&not_minus_inf1, Label::kNear);
786 __ lcdbr(double_result, double_scratch);
787 __ b(&done);
788 __ bind(&not_minus_inf1);
789
790 // Add +0 to convert -0 to +0.
791 __ ldr(double_scratch, double_base);
792 __ lzdr(kDoubleRegZero);
793 __ adbr(double_scratch, kDoubleRegZero);
794 __ sqdbr(double_result, double_scratch);
795 __ b(&done);
796
797 __ bind(&not_plus_half);
798 __ LoadDoubleLiteral(double_scratch, -0.5, scratch);
799 __ cdbr(double_exponent, double_scratch);
800 __ bne(&call_runtime);
801
802 // Calculates square root of base. Check for the special case of
803 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
804 __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
805 __ cdbr(double_base, double_scratch);
806 __ bne(&not_minus_inf2, Label::kNear);
807 __ ldr(double_result, kDoubleRegZero);
808 __ b(&done);
809 __ bind(&not_minus_inf2);
810
811 // Add +0 to convert -0 to +0.
812 __ ldr(double_scratch, double_base);
813 __ lzdr(kDoubleRegZero);
814 __ adbr(double_scratch, kDoubleRegZero);
815 __ LoadDoubleLiteral(double_result, 1.0, scratch);
816 __ sqdbr(double_scratch, double_scratch);
817 __ ddbr(double_result, double_scratch);
818 __ b(&done);
819 }
820
821 __ push(r14);
822 {
823 AllowExternalCallThatCantCauseGC scope(masm);
824 __ PrepareCallCFunction(0, 2, scratch);
825 __ MovToFloatParameters(double_base, double_exponent);
826 __ CallCFunction(
827 ExternalReference::power_double_double_function(isolate()), 0, 2);
828 }
829 __ pop(r14);
830 __ MovFromFloatResult(double_result);
831 __ b(&done);
832 }
833
834 // Calculate power with integer exponent.
835 __ bind(&int_exponent);
836
837 // Get two copies of exponent in the registers scratch and exponent.
838 if (exponent_type() == INTEGER) {
839 __ LoadRR(scratch, exponent);
840 } else {
841 // Exponent has previously been stored into scratch as untagged integer.
842 __ LoadRR(exponent, scratch);
843 }
844 __ ldr(double_scratch, double_base); // Back up base.
845 __ LoadImmP(scratch2, Operand(1));
846 __ ConvertIntToDouble(scratch2, double_result);
847
848 // Get absolute value of exponent.
849 Label positive_exponent;
850 __ CmpP(scratch, Operand::Zero());
851 __ bge(&positive_exponent, Label::kNear);
852 __ LoadComplementRR(scratch, scratch);
853 __ bind(&positive_exponent);
854
855 Label while_true, no_carry, loop_end;
856 __ bind(&while_true);
857 __ mov(scratch2, Operand(1));
858 __ AndP(scratch2, scratch);
859 __ beq(&no_carry, Label::kNear);
860 __ mdbr(double_result, double_scratch);
861 __ bind(&no_carry);
862 __ ShiftRightArithP(scratch, scratch, Operand(1));
863 __ beq(&loop_end, Label::kNear);
864 __ mdbr(double_scratch, double_scratch);
865 __ b(&while_true);
866 __ bind(&loop_end);
867
868 __ CmpP(exponent, Operand::Zero());
869 __ bge(&done);
870
871 // get 1/double_result:
872 __ ldr(double_scratch, double_result);
873 __ LoadImmP(scratch2, Operand(1));
874 __ ConvertIntToDouble(scratch2, double_result);
875 __ ddbr(double_result, double_scratch);
876
877 // Test whether result is zero. Bail out to check for subnormal result.
878 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
879 __ lzdr(kDoubleRegZero);
880 __ cdbr(double_result, kDoubleRegZero);
881 __ bne(&done, Label::kNear);
882 // double_exponent may not containe the exponent value if the input was a
883 // smi. We set it with exponent value before bailing out.
884 __ ConvertIntToDouble(exponent, double_exponent);
885
886 // Returning or bailing out.
887 if (exponent_type() == ON_STACK) {
888 // The arguments are still on the stack.
889 __ bind(&call_runtime);
890 __ TailCallRuntime(Runtime::kMathPowRT);
891
892 // The stub is called from non-optimized code, which expects the result
893 // as heap number in exponent.
894 __ bind(&done);
895 __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
896 &call_runtime);
897 __ StoreDouble(double_result,
898 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
899 DCHECK(heapnumber.is(r2));
900 __ Ret(2);
901 } else {
902 __ push(r14);
903 {
904 AllowExternalCallThatCantCauseGC scope(masm);
905 __ PrepareCallCFunction(0, 2, scratch);
906 __ MovToFloatParameters(double_base, double_exponent);
907 __ CallCFunction(
908 ExternalReference::power_double_double_function(isolate()), 0, 2);
909 }
910 __ pop(r14);
911 __ MovFromFloatResult(double_result);
912
913 __ bind(&done);
914 __ Ret();
915 }
916}
917
918bool CEntryStub::NeedsImmovableCode() { return true; }
919
920void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
921 CEntryStub::GenerateAheadOfTime(isolate);
922 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
923 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100924 CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
Ben Murdochda12d292016-06-02 14:46:10 +0100925 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
926 CreateWeakCellStub::GenerateAheadOfTime(isolate);
927 BinaryOpICStub::GenerateAheadOfTime(isolate);
928 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
929 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
930 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
931 StoreFastElementStub::GenerateAheadOfTime(isolate);
932 TypeofStub::GenerateAheadOfTime(isolate);
933}
934
935void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
936 StoreRegistersStateStub stub(isolate);
937 stub.GetCode();
938}
939
940void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
941 RestoreRegistersStateStub stub(isolate);
942 stub.GetCode();
943}
944
945void CodeStub::GenerateFPStubs(Isolate* isolate) {
946 SaveFPRegsMode mode = kSaveFPRegs;
947 CEntryStub(isolate, 1, mode).GetCode();
948 StoreBufferOverflowStub(isolate, mode).GetCode();
949 isolate->set_fp_stubs_generated(true);
950}
951
952void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
953 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
954 stub.GetCode();
955}
956
957void CEntryStub::Generate(MacroAssembler* masm) {
958 // Called from JavaScript; parameters are on stack as if calling JS function.
959 // r2: number of arguments including receiver
960 // r3: pointer to builtin function
961 // fp: frame pointer (restored after C call)
962 // sp: stack pointer (restored as callee's sp after C call)
963 // cp: current context (C callee-saved)
964 //
965 // If argv_in_register():
966 // r4: pointer to the first argument
967 ProfileEntryHookStub::MaybeCallEntryHook(masm);
968
969 __ LoadRR(r7, r3);
970
971 if (argv_in_register()) {
972 // Move argv into the correct register.
973 __ LoadRR(r3, r4);
974 } else {
975 // Compute the argv pointer.
976 __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2));
977 __ lay(r3, MemOperand(r3, sp, -kPointerSize));
978 }
979
980 // Enter the exit frame that transitions from JavaScript to C++.
981 FrameScope scope(masm, StackFrame::MANUAL);
982
983 // Need at least one extra slot for return address location.
984 int arg_stack_space = 1;
985
986 // Pass buffer for return value on stack if necessary
987 bool needs_return_buffer =
988 result_size() > 2 ||
989 (result_size() == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS);
990 if (needs_return_buffer) {
991 arg_stack_space += result_size();
992 }
993
994#if V8_TARGET_ARCH_S390X
995 // 64-bit linux pass Argument object by reference not value
996 arg_stack_space += 2;
997#endif
998
999 __ EnterExitFrame(save_doubles(), arg_stack_space);
1000
1001 // Store a copy of argc, argv in callee-saved registers for later.
1002 __ LoadRR(r6, r2);
1003 __ LoadRR(r8, r3);
1004 // r2, r6: number of arguments including receiver (C callee-saved)
1005 // r3, r8: pointer to the first argument
1006 // r7: pointer to builtin function (C callee-saved)
1007
1008 // Result returned in registers or stack, depending on result size and ABI.
1009
1010 Register isolate_reg = r4;
1011 if (needs_return_buffer) {
1012 // The return value is 16-byte non-scalar value.
1013 // Use frame storage reserved by calling function to pass return
1014 // buffer as implicit first argument in R2. Shfit original parameters
1015 // by one register each.
1016 __ LoadRR(r4, r3);
1017 __ LoadRR(r3, r2);
1018 __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
1019 isolate_reg = r5;
1020 }
1021 // Call C built-in.
1022 __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
1023
1024 Register target = r7;
1025
1026 // To let the GC traverse the return address of the exit frames, we need to
1027 // know where the return address is. The CEntryStub is unmovable, so
1028 // we can store the address on the stack to be able to find it again and
1029 // we never have to restore it, because it will not change.
1030 {
1031 Label return_label;
1032 __ larl(r14, &return_label); // Generate the return addr of call later.
1033 __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
1034
1035 // zLinux ABI requires caller's frame to have sufficient space for callee
1036 // preserved regsiter save area.
1037 // __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize));
Ben Murdochda12d292016-06-02 14:46:10 +01001038 __ b(target);
1039 __ bind(&return_label);
1040 // __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize));
1041 }
1042
1043 // If return value is on the stack, pop it to registers.
1044 if (needs_return_buffer) {
1045 if (result_size() > 2) __ LoadP(r4, MemOperand(r2, 2 * kPointerSize));
1046 __ LoadP(r3, MemOperand(r2, kPointerSize));
1047 __ LoadP(r2, MemOperand(r2));
1048 }
1049
1050 // Check result for exception sentinel.
1051 Label exception_returned;
1052 __ CompareRoot(r2, Heap::kExceptionRootIndex);
1053 __ beq(&exception_returned, Label::kNear);
1054
1055 // Check that there is no pending exception, otherwise we
1056 // should have returned the exception sentinel.
1057 if (FLAG_debug_code) {
1058 Label okay;
1059 ExternalReference pending_exception_address(
1060 Isolate::kPendingExceptionAddress, isolate());
1061 __ mov(r1, Operand(pending_exception_address));
1062 __ LoadP(r1, MemOperand(r1));
1063 __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
1064 // Cannot use check here as it attempts to generate call into runtime.
1065 __ beq(&okay, Label::kNear);
1066 __ stop("Unexpected pending exception");
1067 __ bind(&okay);
1068 }
1069
1070 // Exit C frame and return.
1071 // r2:r3: result
1072 // sp: stack pointer
1073 // fp: frame pointer
1074 Register argc;
1075 if (argv_in_register()) {
1076 // We don't want to pop arguments so set argc to no_reg.
1077 argc = no_reg;
1078 } else {
1079 // r6: still holds argc (callee-saved).
1080 argc = r6;
1081 }
1082 __ LeaveExitFrame(save_doubles(), argc, true);
1083 __ b(r14);
1084
1085 // Handling of exception.
1086 __ bind(&exception_returned);
1087
1088 ExternalReference pending_handler_context_address(
1089 Isolate::kPendingHandlerContextAddress, isolate());
1090 ExternalReference pending_handler_code_address(
1091 Isolate::kPendingHandlerCodeAddress, isolate());
1092 ExternalReference pending_handler_offset_address(
1093 Isolate::kPendingHandlerOffsetAddress, isolate());
1094 ExternalReference pending_handler_fp_address(
1095 Isolate::kPendingHandlerFPAddress, isolate());
1096 ExternalReference pending_handler_sp_address(
1097 Isolate::kPendingHandlerSPAddress, isolate());
1098
1099 // Ask the runtime for help to determine the handler. This will set r3 to
1100 // contain the current pending exception, don't clobber it.
1101 ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1102 isolate());
1103 {
1104 FrameScope scope(masm, StackFrame::MANUAL);
1105 __ PrepareCallCFunction(3, 0, r2);
1106 __ LoadImmP(r2, Operand::Zero());
1107 __ LoadImmP(r3, Operand::Zero());
1108 __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
1109 __ CallCFunction(find_handler, 3);
1110 }
1111
1112 // Retrieve the handler context, SP and FP.
1113 __ mov(cp, Operand(pending_handler_context_address));
1114 __ LoadP(cp, MemOperand(cp));
1115 __ mov(sp, Operand(pending_handler_sp_address));
1116 __ LoadP(sp, MemOperand(sp));
1117 __ mov(fp, Operand(pending_handler_fp_address));
1118 __ LoadP(fp, MemOperand(fp));
1119
1120 // If the handler is a JS frame, restore the context to the frame. Note that
1121 // the context will be set to (cp == 0) for non-JS frames.
1122 Label skip;
1123 __ CmpP(cp, Operand::Zero());
1124 __ beq(&skip, Label::kNear);
1125 __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1126 __ bind(&skip);
1127
1128 // Compute the handler entry address and jump to it.
1129 __ mov(r3, Operand(pending_handler_code_address));
1130 __ LoadP(r3, MemOperand(r3));
1131 __ mov(r4, Operand(pending_handler_offset_address));
1132 __ LoadP(r4, MemOperand(r4));
1133 __ AddP(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
1134 __ AddP(ip, r3, r4);
1135 __ Jump(ip);
1136}
1137
1138void JSEntryStub::Generate(MacroAssembler* masm) {
1139 // r2: code entry
1140 // r3: function
1141 // r4: receiver
1142 // r5: argc
1143 // r6: argv
1144
1145 Label invoke, handler_entry, exit;
1146
1147 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1148
1149// saving floating point registers
1150#if V8_TARGET_ARCH_S390X
1151 // 64bit ABI requires f8 to f15 be saved
1152 __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
1153 __ std(d8, MemOperand(sp));
1154 __ std(d9, MemOperand(sp, 1 * kDoubleSize));
1155 __ std(d10, MemOperand(sp, 2 * kDoubleSize));
1156 __ std(d11, MemOperand(sp, 3 * kDoubleSize));
1157 __ std(d12, MemOperand(sp, 4 * kDoubleSize));
1158 __ std(d13, MemOperand(sp, 5 * kDoubleSize));
1159 __ std(d14, MemOperand(sp, 6 * kDoubleSize));
1160 __ std(d15, MemOperand(sp, 7 * kDoubleSize));
1161#else
1162 // 31bit ABI requires you to store f4 and f6:
1163 // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
1164 __ lay(sp, MemOperand(sp, -2 * kDoubleSize));
1165 __ std(d4, MemOperand(sp));
1166 __ std(d6, MemOperand(sp, kDoubleSize));
1167#endif
1168
1169 // zLinux ABI
1170 // Incoming parameters:
1171 // r2: code entry
1172 // r3: function
1173 // r4: receiver
1174 // r5: argc
1175 // r6: argv
1176 // Requires us to save the callee-preserved registers r6-r13
1177 // General convention is to also save r14 (return addr) and
1178 // sp/r15 as well in a single STM/STMG
1179 __ lay(sp, MemOperand(sp, -10 * kPointerSize));
1180 __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
1181
1182 // Set up the reserved register for 0.0.
1183 // __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
1184
1185 // Push a frame with special values setup to mark it as an entry frame.
1186 // Bad FP (-1)
1187 // SMI Marker
1188 // SMI Marker
1189 // kCEntryFPAddress
1190 // Frame type
1191 __ lay(sp, MemOperand(sp, -5 * kPointerSize));
1192 // Push a bad frame pointer to fail if it is used.
1193 __ LoadImmP(r10, Operand(-1));
1194
1195 int marker = type();
1196 __ LoadSmiLiteral(r9, Smi::FromInt(marker));
1197 __ LoadSmiLiteral(r8, Smi::FromInt(marker));
1198 // Save copies of the top frame descriptor on the stack.
1199 __ mov(r7, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1200 __ LoadP(r7, MemOperand(r7));
1201 __ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
1202 // Set up frame pointer for the frame to be pushed.
1203 // Need to add kPointerSize, because sp has one extra
1204 // frame already for the frame type being pushed later.
1205 __ lay(fp,
1206 MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
1207
1208 // If this is the outermost JS call, set js_entry_sp value.
1209 Label non_outermost_js;
1210 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1211 __ mov(r7, Operand(ExternalReference(js_entry_sp)));
1212 __ LoadAndTestP(r8, MemOperand(r7));
1213 __ bne(&non_outermost_js, Label::kNear);
1214 __ StoreP(fp, MemOperand(r7));
1215 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1216 Label cont;
1217 __ b(&cont, Label::kNear);
1218 __ bind(&non_outermost_js);
1219 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
1220
1221 __ bind(&cont);
1222 __ StoreP(ip, MemOperand(sp)); // frame-type
1223
1224 // Jump to a faked try block that does the invoke, with a faked catch
1225 // block that sets the pending exception.
1226 __ b(&invoke, Label::kNear);
1227
1228 __ bind(&handler_entry);
1229 handler_offset_ = handler_entry.pos();
1230 // Caught exception: Store result (exception) in the pending exception
1231 // field in the JSEnv and return a failure sentinel. Coming in here the
1232 // fp will be invalid because the PushStackHandler below sets it to 0 to
1233 // signal the existence of the JSEntry frame.
1234 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1235 isolate())));
1236
1237 __ StoreP(r2, MemOperand(ip));
1238 __ LoadRoot(r2, Heap::kExceptionRootIndex);
1239 __ b(&exit, Label::kNear);
1240
1241 // Invoke: Link this frame into the handler chain.
1242 __ bind(&invoke);
1243 // Must preserve r2-r6.
1244 __ PushStackHandler();
1245 // If an exception not caught by another handler occurs, this handler
1246 // returns control to the code after the b(&invoke) above, which
1247 // restores all kCalleeSaved registers (including cp and fp) to their
1248 // saved values before returning a failure to C.
1249
1250 // Clear any pending exceptions.
1251 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1252 isolate())));
1253 __ mov(r7, Operand(isolate()->factory()->the_hole_value()));
1254 __ StoreP(r7, MemOperand(ip));
1255
1256 // Invoke the function by calling through JS entry trampoline builtin.
1257 // Notice that we cannot store a reference to the trampoline code directly in
1258 // this stub, because runtime stubs are not traversed when doing GC.
1259
1260 // Expected registers by Builtins::JSEntryTrampoline
1261 // r2: code entry
1262 // r3: function
1263 // r4: receiver
1264 // r5: argc
1265 // r6: argv
1266 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1267 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1268 isolate());
1269 __ mov(ip, Operand(construct_entry));
1270 } else {
1271 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1272 __ mov(ip, Operand(entry));
1273 }
1274 __ LoadP(ip, MemOperand(ip)); // deref address
1275
1276 // Branch and link to JSEntryTrampoline.
1277 // the address points to the start of the code object, skip the header
1278 __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1279 Label return_addr;
1280 // __ basr(r14, ip);
1281 __ larl(r14, &return_addr);
1282 __ b(ip);
1283 __ bind(&return_addr);
1284
1285 // Unlink this frame from the handler chain.
1286 __ PopStackHandler();
1287
1288 __ bind(&exit); // r2 holds result
1289 // Check if the current stack frame is marked as the outermost JS frame.
1290 Label non_outermost_js_2;
1291 __ pop(r7);
1292 __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
1293 __ bne(&non_outermost_js_2, Label::kNear);
1294 __ mov(r8, Operand::Zero());
1295 __ mov(r7, Operand(ExternalReference(js_entry_sp)));
1296 __ StoreP(r8, MemOperand(r7));
1297 __ bind(&non_outermost_js_2);
1298
1299 // Restore the top frame descriptors from the stack.
1300 __ pop(r5);
1301 __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1302 __ StoreP(r5, MemOperand(ip));
1303
1304 // Reset the stack to the callee saved registers.
1305 __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
1306
1307 // Reload callee-saved preserved regs, return address reg (r14) and sp
1308 __ LoadMultipleP(r6, sp, MemOperand(sp, 0));
1309 __ la(sp, MemOperand(sp, 10 * kPointerSize));
1310
1311// saving floating point registers
1312#if V8_TARGET_ARCH_S390X
1313 // 64bit ABI requires f8 to f15 be saved
1314 __ ld(d8, MemOperand(sp));
1315 __ ld(d9, MemOperand(sp, 1 * kDoubleSize));
1316 __ ld(d10, MemOperand(sp, 2 * kDoubleSize));
1317 __ ld(d11, MemOperand(sp, 3 * kDoubleSize));
1318 __ ld(d12, MemOperand(sp, 4 * kDoubleSize));
1319 __ ld(d13, MemOperand(sp, 5 * kDoubleSize));
1320 __ ld(d14, MemOperand(sp, 6 * kDoubleSize));
1321 __ ld(d15, MemOperand(sp, 7 * kDoubleSize));
1322 __ la(sp, MemOperand(sp, 8 * kDoubleSize));
1323#else
1324 // 31bit ABI requires you to store f4 and f6:
1325 // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
1326 __ ld(d4, MemOperand(sp));
1327 __ ld(d6, MemOperand(sp, kDoubleSize));
1328 __ la(sp, MemOperand(sp, 2 * kDoubleSize));
1329#endif
1330
1331 __ b(r14);
1332}
1333
Ben Murdochda12d292016-06-02 14:46:10 +01001334void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1335 Label miss;
1336 Register receiver = LoadDescriptor::ReceiverRegister();
1337 // Ensure that the vector and slot registers won't be clobbered before
1338 // calling the miss handler.
1339 DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::VectorRegister(),
1340 LoadWithVectorDescriptor::SlotRegister()));
1341
1342 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
1343 r7, &miss);
1344 __ bind(&miss);
1345 PropertyAccessCompiler::TailCallBuiltin(
1346 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1347}
1348
1349void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1350 // Return address is in lr.
1351 Label miss;
1352
1353 Register receiver = LoadDescriptor::ReceiverRegister();
1354 Register index = LoadDescriptor::NameRegister();
1355 Register scratch = r7;
1356 Register result = r2;
1357 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1358 DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
1359 result.is(LoadWithVectorDescriptor::SlotRegister()));
1360
1361 // StringCharAtGenerator doesn't use the result register until it's passed
1362 // the different miss possibilities. If it did, we would have a conflict
1363 // when FLAG_vector_ics is true.
1364 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1365 &miss, // When not a string.
1366 &miss, // When not a number.
1367 &miss, // When index out of range.
Ben Murdochda12d292016-06-02 14:46:10 +01001368 RECEIVER_IS_STRING);
1369 char_at_generator.GenerateFast(masm);
1370 __ Ret();
1371
1372 StubRuntimeCallHelper call_helper;
1373 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1374
1375 __ bind(&miss);
1376 PropertyAccessCompiler::TailCallBuiltin(
1377 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1378}
1379
1380void RegExpExecStub::Generate(MacroAssembler* masm) {
1381// Just jump directly to runtime if native RegExp is not selected at compile
1382// time or if regexp entry in generated code is turned off runtime switch or
1383// at compilation.
1384#ifdef V8_INTERPRETED_REGEXP
1385 __ TailCallRuntime(Runtime::kRegExpExec);
1386#else // V8_INTERPRETED_REGEXP
1387
1388 // Stack frame on entry.
1389 // sp[0]: last_match_info (expected JSArray)
1390 // sp[4]: previous index
1391 // sp[8]: subject string
1392 // sp[12]: JSRegExp object
1393
1394 const int kLastMatchInfoOffset = 0 * kPointerSize;
1395 const int kPreviousIndexOffset = 1 * kPointerSize;
1396 const int kSubjectOffset = 2 * kPointerSize;
1397 const int kJSRegExpOffset = 3 * kPointerSize;
1398
1399 Label runtime, br_over, encoding_type_UC16;
1400
1401 // Allocation of registers for this function. These are in callee save
1402 // registers and will be preserved by the call to the native RegExp code, as
1403 // this code is called using the normal C calling convention. When calling
1404 // directly from generated code the native RegExp code will not do a GC and
1405 // therefore the content of these registers are safe to use after the call.
1406 Register subject = r6;
1407 Register regexp_data = r7;
1408 Register last_match_info_elements = r8;
1409 Register code = r9;
1410
1411 __ CleanseP(r14);
1412
1413 // Ensure register assigments are consistent with callee save masks
1414 DCHECK(subject.bit() & kCalleeSaved);
1415 DCHECK(regexp_data.bit() & kCalleeSaved);
1416 DCHECK(last_match_info_elements.bit() & kCalleeSaved);
1417 DCHECK(code.bit() & kCalleeSaved);
1418
1419 // Ensure that a RegExp stack is allocated.
1420 ExternalReference address_of_regexp_stack_memory_address =
1421 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1422 ExternalReference address_of_regexp_stack_memory_size =
1423 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1424 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
1425 __ LoadAndTestP(r2, MemOperand(r2));
1426 __ beq(&runtime);
1427
1428 // Check that the first argument is a JSRegExp object.
1429 __ LoadP(r2, MemOperand(sp, kJSRegExpOffset));
1430 __ JumpIfSmi(r2, &runtime);
1431 __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
1432 __ bne(&runtime);
1433
1434 // Check that the RegExp has been compiled (data contains a fixed array).
1435 __ LoadP(regexp_data, FieldMemOperand(r2, JSRegExp::kDataOffset));
1436 if (FLAG_debug_code) {
1437 __ TestIfSmi(regexp_data);
1438 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
1439 __ CompareObjectType(regexp_data, r2, r2, FIXED_ARRAY_TYPE);
1440 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1441 }
1442
1443 // regexp_data: RegExp data (FixedArray)
1444 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1445 __ LoadP(r2, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1446 // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
1447 __ CmpSmiLiteral(r2, Smi::FromInt(JSRegExp::IRREGEXP), r0);
1448 __ bne(&runtime);
1449
1450 // regexp_data: RegExp data (FixedArray)
1451 // Check that the number of captures fit in the static offsets vector buffer.
1452 __ LoadP(r4,
1453 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1454 // Check (number_of_captures + 1) * 2 <= offsets vector size
1455 // Or number_of_captures * 2 <= offsets vector size - 2
1456 // SmiToShortArrayOffset accomplishes the multiplication by 2 and
1457 // SmiUntag (which is a nop for 32-bit).
1458 __ SmiToShortArrayOffset(r4, r4);
1459 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1460 __ CmpLogicalP(r4, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1461 __ bgt(&runtime);
1462
1463 // Reset offset for possibly sliced string.
1464 __ LoadImmP(ip, Operand::Zero());
1465 __ LoadP(subject, MemOperand(sp, kSubjectOffset));
1466 __ JumpIfSmi(subject, &runtime);
1467 __ LoadRR(r5, subject); // Make a copy of the original subject string.
1468 // subject: subject string
1469 // r5: subject string
1470 // regexp_data: RegExp data (FixedArray)
1471 // Handle subject string according to its encoding and representation:
1472 // (1) Sequential string? If yes, go to (4).
1473 // (2) Sequential or cons? If not, go to (5).
1474 // (3) Cons string. If the string is flat, replace subject with first string
1475 // and go to (1). Otherwise bail out to runtime.
1476 // (4) Sequential string. Load regexp code according to encoding.
1477 // (E) Carry on.
1478 /// [...]
1479
1480 // Deferred code at the end of the stub:
1481 // (5) Long external string? If not, go to (7).
1482 // (6) External string. Make it, offset-wise, look like a sequential string.
1483 // Go to (4).
1484 // (7) Short external string or not a string? If yes, bail out to runtime.
1485 // (8) Sliced string. Replace subject with parent. Go to (1).
1486
1487 Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
1488 not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
1489
1490 __ bind(&check_underlying);
1491 __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
1492 __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
1493
1494 // (1) Sequential string? If yes, go to (4).
1495
1496 STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
1497 kShortExternalStringMask) == 0x93);
1498 __ mov(r3, Operand(kIsNotStringMask | kStringRepresentationMask |
1499 kShortExternalStringMask));
1500 __ AndP(r3, r2);
1501 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1502 __ beq(&seq_string, Label::kNear); // Go to (4).
1503
1504 // (2) Sequential or cons? If not, go to (5).
1505 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1506 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1507 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1508 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1509 STATIC_ASSERT(kExternalStringTag < 0xffffu);
1510 __ CmpP(r3, Operand(kExternalStringTag));
1511 __ bge(&not_seq_nor_cons); // Go to (5).
1512
1513 // (3) Cons string. Check that it's flat.
1514 // Replace subject with first string and reload instance type.
1515 __ LoadP(r2, FieldMemOperand(subject, ConsString::kSecondOffset));
1516 __ CompareRoot(r2, Heap::kempty_stringRootIndex);
1517 __ bne(&runtime);
1518 __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1519 __ b(&check_underlying);
1520
1521 // (4) Sequential string. Load regexp code according to encoding.
1522 __ bind(&seq_string);
1523 // subject: sequential subject string (or look-alike, external string)
1524 // r5: original subject string
1525 // Load previous index and check range before r5 is overwritten. We have to
1526 // use r5 instead of subject here because subject might have been only made
1527 // to look like a sequential string when it actually is an external string.
1528 __ LoadP(r3, MemOperand(sp, kPreviousIndexOffset));
1529 __ JumpIfNotSmi(r3, &runtime);
1530 __ LoadP(r5, FieldMemOperand(r5, String::kLengthOffset));
1531 __ CmpLogicalP(r5, r3);
1532 __ ble(&runtime);
1533 __ SmiUntag(r3);
1534
1535 STATIC_ASSERT(4 == kOneByteStringTag);
1536 STATIC_ASSERT(kTwoByteStringTag == 0);
1537 STATIC_ASSERT(kStringEncodingMask == 4);
1538 __ ExtractBitMask(r5, r2, kStringEncodingMask, SetRC);
1539 __ beq(&encoding_type_UC16, Label::kNear);
1540 __ LoadP(code,
1541 FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
1542 __ b(&br_over, Label::kNear);
1543 __ bind(&encoding_type_UC16);
1544 __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
1545 __ bind(&br_over);
1546
1547 // (E) Carry on. String handling is done.
1548 // code: irregexp code
1549 // Check that the irregexp code has been generated for the actual string
1550 // encoding. If it has, the field contains a code object otherwise it contains
1551 // a smi (code flushing support).
1552 __ JumpIfSmi(code, &runtime);
1553
1554 // r3: previous index
1555 // r5: encoding of subject string (1 if one_byte, 0 if two_byte);
1556 // code: Address of generated regexp code
1557 // subject: Subject string
1558 // regexp_data: RegExp data (FixedArray)
1559 // All checks done. Now push arguments for native regexp code.
1560 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r2, r4);
1561
1562 // Isolates: note we add an additional parameter here (isolate pointer).
1563 const int kRegExpExecuteArguments = 10;
1564 const int kParameterRegisters = 5;
1565 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1566
1567 // Stack pointer now points to cell where return address is to be written.
1568 // Arguments are before that on the stack or in registers.
1569
1570 // Argument 10 (in stack parameter area): Pass current isolate address.
1571 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
1572 __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1573 4 * kPointerSize));
1574
1575 // Argument 9 is a dummy that reserves the space used for
1576 // the return address added by the ExitFrame in native calls.
1577 __ mov(r2, Operand::Zero());
1578 __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1579 3 * kPointerSize));
1580
1581 // Argument 8: Indicate that this is a direct call from JavaScript.
1582 __ mov(r2, Operand(1));
1583 __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1584 2 * kPointerSize));
1585
1586 // Argument 7: Start (high end) of backtracking stack memory area.
1587 __ mov(r2, Operand(address_of_regexp_stack_memory_address));
1588 __ LoadP(r2, MemOperand(r2, 0));
1589 __ mov(r1, Operand(address_of_regexp_stack_memory_size));
1590 __ LoadP(r1, MemOperand(r1, 0));
1591 __ AddP(r2, r1);
1592 __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1593 1 * kPointerSize));
1594
1595 // Argument 6: Set the number of capture registers to zero to force
1596 // global egexps to behave as non-global. This does not affect non-global
1597 // regexps.
1598 __ mov(r2, Operand::Zero());
1599 __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
1600 0 * kPointerSize));
1601
1602 // Argument 1 (r2): Subject string.
1603 // Load the length from the original subject string from the previous stack
1604 // frame. Therefore we have to use fp, which points exactly to 15 pointer
1605 // sizes below the previous sp. (Because creating a new stack frame pushes
1606 // the previous fp onto the stack and moves up sp by 2 * kPointerSize and
1607 // 13 registers saved on the stack previously)
1608 __ LoadP(r2, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1609
1610 // Argument 2 (r3): Previous index.
1611 // Already there
1612 __ AddP(r1, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1613
1614 // Argument 5 (r6): static offsets vector buffer.
1615 __ mov(
1616 r6,
1617 Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
1618
1619 // For arguments 4 (r5) and 3 (r4) get string length, calculate start of data
1620 // and calculate the shift of the index (0 for one-byte and 1 for two byte).
1621 __ XorP(r5, Operand(1));
1622 // If slice offset is not 0, load the length from the original sliced string.
1623 // Argument 3, r4: Start of string data
1624 // Prepare start and end index of the input.
1625 __ ShiftLeftP(ip, ip, r5);
1626 __ AddP(ip, r1, ip);
1627 __ ShiftLeftP(r4, r3, r5);
1628 __ AddP(r4, ip, r4);
1629
1630 // Argument 4, r5: End of string data
1631 __ LoadP(r1, FieldMemOperand(r2, String::kLengthOffset));
1632 __ SmiUntag(r1);
1633 __ ShiftLeftP(r0, r1, r5);
1634 __ AddP(r5, ip, r0);
1635
1636 // Locate the code entry and call it.
1637 __ AddP(code, Operand(Code::kHeaderSize - kHeapObjectTag));
1638
1639 DirectCEntryStub stub(isolate());
1640 stub.GenerateCall(masm, code);
1641
1642 __ LeaveExitFrame(false, no_reg, true);
1643
1644 // r2: result (int32)
1645 // subject: subject string -- needed to reload
1646 __ LoadP(subject, MemOperand(sp, kSubjectOffset));
1647
1648 // regexp_data: RegExp data (callee saved)
1649 // last_match_info_elements: Last match info elements (callee saved)
1650 // Check the result.
1651 Label success;
1652 __ Cmp32(r2, Operand(1));
1653 // We expect exactly one result since we force the called regexp to behave
1654 // as non-global.
1655 __ beq(&success);
1656 Label failure;
1657 __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::FAILURE));
1658 __ beq(&failure);
1659 __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1660 // If not exception it can only be retry. Handle that in the runtime system.
1661 __ bne(&runtime);
1662 // Result must now be exception. If there is no pending exception already a
1663 // stack overflow (on the backtrack stack) was detected in RegExp code but
1664 // haven't created the exception yet. Handle that in the runtime system.
1665 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1666 __ mov(r3, Operand(isolate()->factory()->the_hole_value()));
1667 __ mov(r4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1668 isolate())));
1669 __ LoadP(r2, MemOperand(r4, 0));
1670 __ CmpP(r2, r3);
1671 __ beq(&runtime);
1672
1673 // For exception, throw the exception again.
1674 __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1675
1676 __ bind(&failure);
1677 // For failure and exception return null.
1678 __ mov(r2, Operand(isolate()->factory()->null_value()));
1679 __ la(sp, MemOperand(sp, (4 * kPointerSize)));
1680 __ Ret();
1681
1682 // Process the result from the native regexp code.
1683 __ bind(&success);
1684 __ LoadP(r3,
1685 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1686 // Calculate number of capture registers (number_of_captures + 1) * 2.
1687 // SmiToShortArrayOffset accomplishes the multiplication by 2 and
1688 // SmiUntag (which is a nop for 32-bit).
1689 __ SmiToShortArrayOffset(r3, r3);
1690 __ AddP(r3, Operand(2));
1691
1692 __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
1693 __ JumpIfSmi(r2, &runtime);
1694 __ CompareObjectType(r2, r4, r4, JS_ARRAY_TYPE);
1695 __ bne(&runtime);
1696 // Check that the JSArray is in fast case.
1697 __ LoadP(last_match_info_elements,
1698 FieldMemOperand(r2, JSArray::kElementsOffset));
1699 __ LoadP(r2,
1700 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1701 __ CompareRoot(r2, Heap::kFixedArrayMapRootIndex);
1702 __ bne(&runtime);
1703 // Check that the last match info has space for the capture registers and the
1704 // additional information.
1705 __ LoadP(
1706 r2, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1707 __ AddP(r4, r3, Operand(RegExpImpl::kLastMatchOverhead));
1708 __ SmiUntag(r0, r2);
1709 __ CmpP(r4, r0);
1710 __ bgt(&runtime);
1711
1712 // r3: number of capture registers
1713 // subject: subject string
1714 // Store the capture count.
1715 __ SmiTag(r4, r3);
1716 __ StoreP(r4, FieldMemOperand(last_match_info_elements,
1717 RegExpImpl::kLastCaptureCountOffset));
1718 // Store last subject and last input.
1719 __ StoreP(subject, FieldMemOperand(last_match_info_elements,
1720 RegExpImpl::kLastSubjectOffset));
1721 __ LoadRR(r4, subject);
1722 __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
1723 subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
1724 __ LoadRR(subject, r4);
1725 __ StoreP(subject, FieldMemOperand(last_match_info_elements,
1726 RegExpImpl::kLastInputOffset));
1727 __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
1728 subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
1729
1730 // Get the static offsets vector filled by the native regexp code.
1731 ExternalReference address_of_static_offsets_vector =
1732 ExternalReference::address_of_static_offsets_vector(isolate());
1733 __ mov(r4, Operand(address_of_static_offsets_vector));
1734
1735 // r3: number of capture registers
1736 // r4: offsets vector
1737 Label next_capture;
1738 // Capture register counter starts from number of capture registers and
1739 // counts down until wraping after zero.
1740 __ AddP(
1741 r2, last_match_info_elements,
1742 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
1743 __ AddP(r4, Operand(-kIntSize)); // bias down for lwzu
1744 __ bind(&next_capture);
1745 // Read the value from the static offsets vector buffer.
1746 __ ly(r5, MemOperand(r4, kIntSize));
1747 __ lay(r4, MemOperand(r4, kIntSize));
1748 // Store the smi value in the last match info.
1749 __ SmiTag(r5);
1750 __ StoreP(r5, MemOperand(r2, kPointerSize));
1751 __ lay(r2, MemOperand(r2, kPointerSize));
1752 __ BranchOnCount(r3, &next_capture);
1753
1754 // Return last match info.
1755 __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
1756 __ la(sp, MemOperand(sp, (4 * kPointerSize)));
1757 __ Ret();
1758
1759 // Do the runtime call to execute the regexp.
1760 __ bind(&runtime);
1761 __ TailCallRuntime(Runtime::kRegExpExec);
1762
1763 // Deferred code for string handling.
1764 // (5) Long external string? If not, go to (7).
1765 __ bind(&not_seq_nor_cons);
1766 // Compare flags are still set.
1767 __ bgt(&not_long_external, Label::kNear); // Go to (7).
1768
1769 // (6) External string. Make it, offset-wise, look like a sequential string.
1770 __ bind(&external_string);
1771 __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
1772 __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
1773 if (FLAG_debug_code) {
1774 // Assert that we do not have a cons or slice (indirect strings) here.
1775 // Sequential strings have already been ruled out.
1776 STATIC_ASSERT(kIsIndirectStringMask == 1);
1777 __ tmll(r2, Operand(kIsIndirectStringMask));
1778 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
1779 }
1780 __ LoadP(subject,
1781 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1782 // Move the pointer so that offset-wise, it looks like a sequential string.
1783 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1784 __ SubP(subject, subject,
1785 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1786 __ b(&seq_string); // Go to (4).
1787
1788 // (7) Short external string or not a string? If yes, bail out to runtime.
1789 __ bind(&not_long_external);
1790 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
1791 __ mov(r0, Operand(kIsNotStringMask | kShortExternalStringMask));
1792 __ AndP(r0, r3);
1793 __ bne(&runtime);
1794
1795 // (8) Sliced string. Replace subject with parent. Go to (4).
1796 // Load offset into ip and replace subject string with parent.
1797 __ LoadP(ip, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1798 __ SmiUntag(ip);
1799 __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1800 __ b(&check_underlying); // Go to (4).
1801#endif // V8_INTERPRETED_REGEXP
1802}
1803
1804static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1805 // r2 : number of arguments to the construct function
1806 // r3 : the function to call
1807 // r4 : feedback vector
1808 // r5 : slot in feedback vector (Smi)
1809 FrameScope scope(masm, StackFrame::INTERNAL);
1810
1811 // Number-of-arguments register must be smi-tagged to call out.
1812 __ SmiTag(r2);
1813 __ Push(r5, r4, r3, r2);
1814
1815 __ CallStub(stub);
1816
1817 __ Pop(r5, r4, r3, r2);
1818 __ SmiUntag(r2);
1819}
1820
1821static void GenerateRecordCallTarget(MacroAssembler* masm) {
1822 // Cache the called function in a feedback vector slot. Cache states
1823 // are uninitialized, monomorphic (indicated by a JSFunction), and
1824 // megamorphic.
1825 // r2 : number of arguments to the construct function
1826 // r3 : the function to call
1827 // r4 : feedback vector
1828 // r5 : slot in feedback vector (Smi)
1829 Label initialize, done, miss, megamorphic, not_array_function;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001830 Label done_initialize_count, done_increment_count;
Ben Murdochda12d292016-06-02 14:46:10 +01001831
1832 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
1833 masm->isolate()->heap()->megamorphic_symbol());
1834 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
1835 masm->isolate()->heap()->uninitialized_symbol());
1836
Ben Murdoch61f157c2016-09-16 13:49:30 +01001837 const int count_offset = FixedArray::kHeaderSize + kPointerSize;
1838
Ben Murdochda12d292016-06-02 14:46:10 +01001839 // Load the cache state into r7.
1840 __ SmiToPtrArrayOffset(r7, r5);
1841 __ AddP(r7, r4, r7);
1842 __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
1843
1844 // A monomorphic cache hit or an already megamorphic state: invoke the
1845 // function without changing the state.
1846 // We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at
1847 // this position in a symbol (see static asserts in type-feedback-vector.h).
1848 Label check_allocation_site;
1849 Register feedback_map = r8;
1850 Register weak_value = r9;
1851 __ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
1852 __ CmpP(r3, weak_value);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001853 __ beq(&done_increment_count, Label::kNear);
Ben Murdochda12d292016-06-02 14:46:10 +01001854 __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001855 __ beq(&done, Label::kNear);
Ben Murdochda12d292016-06-02 14:46:10 +01001856 __ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
1857 __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
1858 __ bne(&check_allocation_site);
1859
1860 // If the weak cell is cleared, we have a new chance to become monomorphic.
1861 __ JumpIfSmi(weak_value, &initialize);
1862 __ b(&megamorphic);
1863
1864 __ bind(&check_allocation_site);
1865 // If we came here, we need to see if we are the array function.
1866 // If we didn't have a matching function, and we didn't find the megamorph
1867 // sentinel, then we have in the slot either some other function or an
1868 // AllocationSite.
1869 __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
1870 __ bne(&miss);
1871
1872 // Make sure the function is the Array() function
1873 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
1874 __ CmpP(r3, r7);
1875 __ bne(&megamorphic);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001876 __ b(&done_increment_count, Label::kNear);
Ben Murdochda12d292016-06-02 14:46:10 +01001877
1878 __ bind(&miss);
1879
1880 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1881 // megamorphic.
1882 __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
1883 __ beq(&initialize);
1884 // MegamorphicSentinel is an immortal immovable object (undefined) so no
1885 // write-barrier is needed.
1886 __ bind(&megamorphic);
1887 __ SmiToPtrArrayOffset(r7, r5);
1888 __ AddP(r7, r4, r7);
1889 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
1890 __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
1891 __ jmp(&done);
1892
1893 // An uninitialized cache is patched with the function
1894 __ bind(&initialize);
1895
1896 // Make sure the function is the Array() function.
1897 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
1898 __ CmpP(r3, r7);
1899 __ bne(&not_array_function);
1900
1901 // The target function is the Array constructor,
1902 // Create an AllocationSite if we don't already have it, store it in the
1903 // slot.
1904 CreateAllocationSiteStub create_stub(masm->isolate());
1905 CallStubInRecordCallTarget(masm, &create_stub);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001906 __ b(&done_initialize_count, Label::kNear);
Ben Murdochda12d292016-06-02 14:46:10 +01001907
1908 __ bind(&not_array_function);
1909
1910 CreateWeakCellStub weak_cell_stub(masm->isolate());
1911 CallStubInRecordCallTarget(masm, &weak_cell_stub);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001912
1913 __ bind(&done_initialize_count);
1914 // Initialize the call counter.
1915 __ LoadSmiLiteral(r7, Smi::FromInt(1));
1916 __ SmiToPtrArrayOffset(r6, r5);
1917 __ AddP(r6, r4, r6);
1918 __ StoreP(r7, FieldMemOperand(r6, count_offset), r0);
1919 __ b(&done, Label::kNear);
1920
1921 __ bind(&done_increment_count);
1922
1923 // Increment the call count for monomorphic function calls.
1924 __ SmiToPtrArrayOffset(r7, r5);
1925 __ AddP(r7, r4, r7);
1926
1927 __ LoadP(r6, FieldMemOperand(r7, count_offset));
1928 __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
1929 __ StoreP(r6, FieldMemOperand(r7, count_offset), r0);
1930
Ben Murdochda12d292016-06-02 14:46:10 +01001931 __ bind(&done);
1932}
1933
1934void CallConstructStub::Generate(MacroAssembler* masm) {
1935 // r2 : number of arguments
1936 // r3 : the function to call
1937 // r4 : feedback vector
1938 // r5 : slot in feedback vector (Smi, for RecordCallTarget)
1939
1940 Label non_function;
1941 // Check that the function is not a smi.
1942 __ JumpIfSmi(r3, &non_function);
1943 // Check that the function is a JSFunction.
1944 __ CompareObjectType(r3, r7, r7, JS_FUNCTION_TYPE);
1945 __ bne(&non_function);
1946
1947 GenerateRecordCallTarget(masm);
1948
1949 __ SmiToPtrArrayOffset(r7, r5);
1950 __ AddP(r7, r4, r7);
1951 // Put the AllocationSite from the feedback vector into r4, or undefined.
1952 __ LoadP(r4, FieldMemOperand(r7, FixedArray::kHeaderSize));
1953 __ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset));
1954 __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
1955 Label feedback_register_initialized;
1956 __ beq(&feedback_register_initialized);
1957 __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
1958 __ bind(&feedback_register_initialized);
1959
1960 __ AssertUndefinedOrAllocationSite(r4, r7);
1961
1962 // Pass function as new target.
1963 __ LoadRR(r5, r3);
1964
1965 // Tail call to the function-specific construct stub (still in the caller
1966 // context at this point).
1967 __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
1968 __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
1969 __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
1970 __ JumpToJSEntry(ip);
1971
1972 __ bind(&non_function);
1973 __ LoadRR(r5, r3);
1974 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1975}
1976
1977void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
1978 // r3 - function
1979 // r5 - slot id
1980 // r4 - vector
1981 // r6 - allocation site (loaded from vector[slot])
1982 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
1983 __ CmpP(r3, r7);
1984 __ bne(miss);
1985
1986 __ mov(r2, Operand(arg_count()));
1987
1988 // Increment the call count for monomorphic function calls.
1989 const int count_offset = FixedArray::kHeaderSize + kPointerSize;
1990 __ SmiToPtrArrayOffset(r7, r5);
1991 __ AddP(r4, r4, r7);
1992 __ LoadP(r5, FieldMemOperand(r4, count_offset));
Ben Murdoch61f157c2016-09-16 13:49:30 +01001993 __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
Ben Murdochda12d292016-06-02 14:46:10 +01001994 __ StoreP(r5, FieldMemOperand(r4, count_offset), r0);
1995
1996 __ LoadRR(r4, r6);
1997 __ LoadRR(r5, r3);
1998 ArrayConstructorStub stub(masm->isolate(), arg_count());
1999 __ TailCallStub(&stub);
2000}
2001
2002void CallICStub::Generate(MacroAssembler* masm) {
2003 // r3 - function
2004 // r5 - slot id (Smi)
2005 // r4 - vector
2006 Label extra_checks_or_miss, call, call_function;
2007 int argc = arg_count();
2008 ParameterCount actual(argc);
2009
2010 // The checks. First, does r3 match the recorded monomorphic target?
2011 __ SmiToPtrArrayOffset(r8, r5);
2012 __ AddP(r8, r4, r8);
2013 __ LoadP(r6, FieldMemOperand(r8, FixedArray::kHeaderSize));
2014
2015 // We don't know that we have a weak cell. We might have a private symbol
2016 // or an AllocationSite, but the memory is safe to examine.
2017 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2018 // FixedArray.
2019 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2020 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2021 // computed, meaning that it can't appear to be a pointer. If the low bit is
2022 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2023 // to be a pointer.
2024 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2025 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2026 WeakCell::kValueOffset &&
2027 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2028
2029 __ LoadP(r7, FieldMemOperand(r6, WeakCell::kValueOffset));
2030 __ CmpP(r3, r7);
2031 __ bne(&extra_checks_or_miss, Label::kNear);
2032
2033 // The compare above could have been a SMI/SMI comparison. Guard against this
2034 // convincing us that we have a monomorphic JSFunction.
2035 __ JumpIfSmi(r3, &extra_checks_or_miss);
2036
2037 // Increment the call count for monomorphic function calls.
2038 const int count_offset = FixedArray::kHeaderSize + kPointerSize;
2039 __ LoadP(r5, FieldMemOperand(r8, count_offset));
Ben Murdoch61f157c2016-09-16 13:49:30 +01002040 __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
Ben Murdochda12d292016-06-02 14:46:10 +01002041 __ StoreP(r5, FieldMemOperand(r8, count_offset), r0);
2042
2043 __ bind(&call_function);
2044 __ mov(r2, Operand(argc));
2045 __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
2046 tail_call_mode()),
2047 RelocInfo::CODE_TARGET);
2048
2049 __ bind(&extra_checks_or_miss);
2050 Label uninitialized, miss, not_allocation_site;
2051
2052 __ CompareRoot(r6, Heap::kmegamorphic_symbolRootIndex);
2053 __ beq(&call);
2054
2055 // Verify that r6 contains an AllocationSite
2056 __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
2057 __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
2058 __ bne(&not_allocation_site);
2059
2060 // We have an allocation site.
2061 HandleArrayCase(masm, &miss);
2062
2063 __ bind(&not_allocation_site);
2064
2065 // The following cases attempt to handle MISS cases without going to the
2066 // runtime.
2067 if (FLAG_trace_ic) {
2068 __ b(&miss);
2069 }
2070
2071 __ CompareRoot(r6, Heap::kuninitialized_symbolRootIndex);
2072 __ beq(&uninitialized);
2073
2074 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2075 // to handle it here. More complex cases are dealt with in the runtime.
2076 __ AssertNotSmi(r6);
2077 __ CompareObjectType(r6, r7, r7, JS_FUNCTION_TYPE);
2078 __ bne(&miss);
2079 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
2080 __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
2081
2082 __ bind(&call);
2083 __ mov(r2, Operand(argc));
2084 __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
2085 RelocInfo::CODE_TARGET);
2086
2087 __ bind(&uninitialized);
2088
2089 // We are going monomorphic, provided we actually have a JSFunction.
2090 __ JumpIfSmi(r3, &miss);
2091
2092 // Goto miss case if we do not have a function.
2093 __ CompareObjectType(r3, r6, r6, JS_FUNCTION_TYPE);
2094 __ bne(&miss);
2095
2096 // Make sure the function is not the Array() function, which requires special
2097 // behavior on MISS.
2098 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r6);
2099 __ CmpP(r3, r6);
2100 __ beq(&miss);
2101
2102 // Make sure the function belongs to the same native context.
2103 __ LoadP(r6, FieldMemOperand(r3, JSFunction::kContextOffset));
2104 __ LoadP(r6, ContextMemOperand(r6, Context::NATIVE_CONTEXT_INDEX));
2105 __ LoadP(ip, NativeContextMemOperand());
2106 __ CmpP(r6, ip);
2107 __ bne(&miss);
2108
2109 // Initialize the call counter.
Ben Murdoch61f157c2016-09-16 13:49:30 +01002110 __ LoadSmiLiteral(r7, Smi::FromInt(1));
Ben Murdochda12d292016-06-02 14:46:10 +01002111 __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
2112
2113 // Store the function. Use a stub since we need a frame for allocation.
2114 // r4 - vector
2115 // r5 - slot
2116 // r3 - function
2117 {
2118 FrameScope scope(masm, StackFrame::INTERNAL);
2119 CreateWeakCellStub create_stub(masm->isolate());
2120 __ Push(r3);
2121 __ CallStub(&create_stub);
2122 __ Pop(r3);
2123 }
2124
2125 __ b(&call_function);
2126
2127 // We are here because tracing is on or we encountered a MISS case we can't
2128 // handle here.
2129 __ bind(&miss);
2130 GenerateMiss(masm);
2131
2132 __ b(&call);
2133}
2134
2135void CallICStub::GenerateMiss(MacroAssembler* masm) {
2136 FrameScope scope(masm, StackFrame::INTERNAL);
2137
2138 // Push the function and feedback info.
2139 __ Push(r3, r4, r5);
2140
2141 // Call the entry.
2142 __ CallRuntime(Runtime::kCallIC_Miss);
2143
2144 // Move result to r3 and exit the internal frame.
2145 __ LoadRR(r3, r2);
2146}
2147
2148// StringCharCodeAtGenerator
2149void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2150 // If the receiver is a smi trigger the non-string case.
2151 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2152 __ JumpIfSmi(object_, receiver_not_string_);
2153
2154 // Fetch the instance type of the receiver into result register.
2155 __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2156 __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2157 // If the receiver is not a string trigger the non-string case.
2158 __ mov(r0, Operand(kIsNotStringMask));
2159 __ AndP(r0, result_);
2160 __ bne(receiver_not_string_);
2161 }
2162
2163 // If the index is non-smi trigger the non-smi case.
2164 __ JumpIfNotSmi(index_, &index_not_smi_);
2165 __ bind(&got_smi_index_);
2166
2167 // Check for index out of range.
2168 __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
2169 __ CmpLogicalP(ip, index_);
2170 __ ble(index_out_of_range_);
2171
2172 __ SmiUntag(index_);
2173
2174 StringCharLoadGenerator::Generate(masm, object_, index_, result_,
2175 &call_runtime_);
2176
2177 __ SmiTag(result_);
2178 __ bind(&exit_);
2179}
2180
2181void StringCharCodeAtGenerator::GenerateSlow(
2182 MacroAssembler* masm, EmbedMode embed_mode,
2183 const RuntimeCallHelper& call_helper) {
2184 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2185
2186 // Index is not a smi.
2187 __ bind(&index_not_smi_);
2188 // If index is a heap number, try converting it to an integer.
2189 __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
2190 DONT_DO_SMI_CHECK);
2191 call_helper.BeforeCall(masm);
2192 if (embed_mode == PART_OF_IC_HANDLER) {
2193 __ Push(LoadWithVectorDescriptor::VectorRegister(),
2194 LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2195 } else {
2196 // index_ is consumed by runtime conversion function.
2197 __ Push(object_, index_);
2198 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01002199 __ CallRuntime(Runtime::kNumberToSmi);
Ben Murdochda12d292016-06-02 14:46:10 +01002200 // Save the conversion result before the pop instructions below
2201 // have a chance to overwrite it.
2202 __ Move(index_, r2);
2203 if (embed_mode == PART_OF_IC_HANDLER) {
2204 __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2205 LoadWithVectorDescriptor::SlotRegister(), object_);
2206 } else {
2207 __ pop(object_);
2208 }
2209 // Reload the instance type.
2210 __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2211 __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2212 call_helper.AfterCall(masm);
2213 // If index is still not a smi, it must be out of range.
2214 __ JumpIfNotSmi(index_, index_out_of_range_);
2215 // Otherwise, return to the fast path.
2216 __ b(&got_smi_index_);
2217
2218 // Call runtime. We get here when the receiver is a string and the
2219 // index is a number, but the code of getting the actual character
2220 // is too complex (e.g., when the string needs to be flattened).
2221 __ bind(&call_runtime_);
2222 call_helper.BeforeCall(masm);
2223 __ SmiTag(index_);
2224 __ Push(object_, index_);
2225 __ CallRuntime(Runtime::kStringCharCodeAtRT);
2226 __ Move(result_, r2);
2227 call_helper.AfterCall(masm);
2228 __ b(&exit_);
2229
2230 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2231}
2232
2233// -------------------------------------------------------------------------
2234// StringCharFromCodeGenerator
2235
2236void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2237 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2238 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
2239 __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
2240 __ OrP(r0, r0, Operand(kSmiTagMask));
2241 __ AndP(r0, code_, r0);
2242 __ bne(&slow_case_);
2243
2244 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2245 // At this point code register contains smi tagged one-byte char code.
2246 __ LoadRR(r0, code_);
2247 __ SmiToPtrArrayOffset(code_, code_);
2248 __ AddP(result_, code_);
2249 __ LoadRR(code_, r0);
2250 __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2251 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2252 __ beq(&slow_case_);
2253 __ bind(&exit_);
2254}
2255
2256void StringCharFromCodeGenerator::GenerateSlow(
2257 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
2258 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2259
2260 __ bind(&slow_case_);
2261 call_helper.BeforeCall(masm);
2262 __ push(code_);
2263 __ CallRuntime(Runtime::kStringCharFromCode);
2264 __ Move(result_, r2);
2265 call_helper.AfterCall(masm);
2266 __ b(&exit_);
2267
2268 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2269}
2270
2271enum CopyCharactersFlags { COPY_ASCII = 1, DEST_ALWAYS_ALIGNED = 2 };
2272
2273void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
2274 Register src, Register count,
2275 Register scratch,
2276 String::Encoding encoding) {
2277 if (FLAG_debug_code) {
2278 // Check that destination is word aligned.
2279 __ mov(r0, Operand(kPointerAlignmentMask));
2280 __ AndP(r0, dest);
2281 __ Check(eq, kDestinationOfCopyNotAligned, cr0);
2282 }
2283
2284 // Nothing to do for zero characters.
2285 Label done;
2286 if (encoding == String::TWO_BYTE_ENCODING) {
2287 // double the length
2288 __ AddP(count, count, count);
2289 __ beq(&done, Label::kNear);
2290 } else {
2291 __ CmpP(count, Operand::Zero());
2292 __ beq(&done, Label::kNear);
2293 }
2294
2295 // Copy count bytes from src to dst.
2296 Label byte_loop;
2297 // TODO(joransiu): Convert into MVC loop
2298 __ bind(&byte_loop);
2299 __ LoadlB(scratch, MemOperand(src));
2300 __ la(src, MemOperand(src, 1));
2301 __ stc(scratch, MemOperand(dest));
2302 __ la(dest, MemOperand(dest, 1));
2303 __ BranchOnCount(count, &byte_loop);
2304
2305 __ bind(&done);
2306}
2307
2308void SubStringStub::Generate(MacroAssembler* masm) {
2309 Label runtime;
2310
2311 // Stack frame on entry.
2312 // lr: return address
2313 // sp[0]: to
2314 // sp[4]: from
2315 // sp[8]: string
2316
2317 // This stub is called from the native-call %_SubString(...), so
2318 // nothing can be assumed about the arguments. It is tested that:
2319 // "string" is a sequential string,
2320 // both "from" and "to" are smis, and
2321 // 0 <= from <= to <= string.length.
2322 // If any of these assumptions fail, we call the runtime system.
2323
2324 const int kToOffset = 0 * kPointerSize;
2325 const int kFromOffset = 1 * kPointerSize;
2326 const int kStringOffset = 2 * kPointerSize;
2327
2328 __ LoadP(r4, MemOperand(sp, kToOffset));
2329 __ LoadP(r5, MemOperand(sp, kFromOffset));
2330
2331 // If either to or from had the smi tag bit set, then fail to generic runtime
2332 __ JumpIfNotSmi(r4, &runtime);
2333 __ JumpIfNotSmi(r5, &runtime);
2334 __ SmiUntag(r4);
2335 __ SmiUntag(r5);
2336 // Both r4 and r5 are untagged integers.
2337
2338 // We want to bailout to runtime here if From is negative.
2339 __ blt(&runtime); // From < 0.
2340
2341 __ CmpLogicalP(r5, r4);
2342 __ bgt(&runtime); // Fail if from > to.
2343 __ SubP(r4, r4, r5);
2344
2345 // Make sure first argument is a string.
2346 __ LoadP(r2, MemOperand(sp, kStringOffset));
2347 __ JumpIfSmi(r2, &runtime);
2348 Condition is_string = masm->IsObjectStringType(r2, r3);
2349 __ b(NegateCondition(is_string), &runtime);
2350
2351 Label single_char;
2352 __ CmpP(r4, Operand(1));
2353 __ b(eq, &single_char);
2354
2355 // Short-cut for the case of trivial substring.
2356 Label return_r2;
2357 // r2: original string
2358 // r4: result string length
2359 __ LoadP(r6, FieldMemOperand(r2, String::kLengthOffset));
2360 __ SmiUntag(r0, r6);
2361 __ CmpLogicalP(r4, r0);
2362 // Return original string.
2363 __ beq(&return_r2);
2364 // Longer than original string's length or negative: unsafe arguments.
2365 __ bgt(&runtime);
2366 // Shorter than original string's length: an actual substring.
2367
2368 // Deal with different string types: update the index if necessary
2369 // and put the underlying string into r7.
2370 // r2: original string
2371 // r3: instance type
2372 // r4: length
2373 // r5: from index (untagged)
2374 Label underlying_unpacked, sliced_string, seq_or_external_string;
2375 // If the string is not indirect, it can only be sequential or external.
2376 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
2377 STATIC_ASSERT(kIsIndirectStringMask != 0);
2378 __ mov(r0, Operand(kIsIndirectStringMask));
2379 __ AndP(r0, r3);
2380 __ beq(&seq_or_external_string);
2381
2382 __ mov(r0, Operand(kSlicedNotConsMask));
2383 __ AndP(r0, r3);
2384 __ bne(&sliced_string);
2385 // Cons string. Check whether it is flat, then fetch first part.
2386 __ LoadP(r7, FieldMemOperand(r2, ConsString::kSecondOffset));
2387 __ CompareRoot(r7, Heap::kempty_stringRootIndex);
2388 __ bne(&runtime);
2389 __ LoadP(r7, FieldMemOperand(r2, ConsString::kFirstOffset));
2390 // Update instance type.
2391 __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
2392 __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
2393 __ b(&underlying_unpacked);
2394
2395 __ bind(&sliced_string);
2396 // Sliced string. Fetch parent and correct start index by offset.
2397 __ LoadP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
2398 __ LoadP(r6, FieldMemOperand(r2, SlicedString::kOffsetOffset));
2399 __ SmiUntag(r3, r6);
2400 __ AddP(r5, r3); // Add offset to index.
2401 // Update instance type.
2402 __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
2403 __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
2404 __ b(&underlying_unpacked);
2405
2406 __ bind(&seq_or_external_string);
2407 // Sequential or external string. Just move string to the expected register.
2408 __ LoadRR(r7, r2);
2409
2410 __ bind(&underlying_unpacked);
2411
2412 if (FLAG_string_slices) {
2413 Label copy_routine;
2414 // r7: underlying subject string
2415 // r3: instance type of underlying subject string
2416 // r4: length
2417 // r5: adjusted start index (untagged)
2418 __ CmpP(r4, Operand(SlicedString::kMinLength));
2419 // Short slice. Copy instead of slicing.
2420 __ blt(&copy_routine);
2421 // Allocate new sliced string. At this point we do not reload the instance
2422 // type including the string encoding because we simply rely on the info
2423 // provided by the original string. It does not matter if the original
2424 // string's encoding is wrong because we always have to recheck encoding of
2425 // the newly created string's parent anyways due to externalized strings.
2426 Label two_byte_slice, set_slice_header;
2427 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
2428 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
2429 __ mov(r0, Operand(kStringEncodingMask));
2430 __ AndP(r0, r3);
2431 __ beq(&two_byte_slice);
2432 __ AllocateOneByteSlicedString(r2, r4, r8, r9, &runtime);
2433 __ b(&set_slice_header);
2434 __ bind(&two_byte_slice);
2435 __ AllocateTwoByteSlicedString(r2, r4, r8, r9, &runtime);
2436 __ bind(&set_slice_header);
2437 __ SmiTag(r5);
2438 __ StoreP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
2439 __ StoreP(r5, FieldMemOperand(r2, SlicedString::kOffsetOffset));
2440 __ b(&return_r2);
2441
2442 __ bind(&copy_routine);
2443 }
2444
2445 // r7: underlying subject string
2446 // r3: instance type of underlying subject string
2447 // r4: length
2448 // r5: adjusted start index (untagged)
2449 Label two_byte_sequential, sequential_string, allocate_result;
2450 STATIC_ASSERT(kExternalStringTag != 0);
2451 STATIC_ASSERT(kSeqStringTag == 0);
2452 __ mov(r0, Operand(kExternalStringTag));
2453 __ AndP(r0, r3);
2454 __ beq(&sequential_string);
2455
2456 // Handle external string.
2457 // Rule out short external strings.
2458 STATIC_ASSERT(kShortExternalStringTag != 0);
2459 __ mov(r0, Operand(kShortExternalStringTag));
2460 __ AndP(r0, r3);
2461 __ bne(&runtime);
2462 __ LoadP(r7, FieldMemOperand(r7, ExternalString::kResourceDataOffset));
2463 // r7 already points to the first character of underlying string.
2464 __ b(&allocate_result);
2465
2466 __ bind(&sequential_string);
2467 // Locate first character of underlying subject string.
2468 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2469 __ AddP(r7, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2470
2471 __ bind(&allocate_result);
2472 // Sequential acii string. Allocate the result.
2473 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
2474 __ mov(r0, Operand(kStringEncodingMask));
2475 __ AndP(r0, r3);
2476 __ beq(&two_byte_sequential);
2477
2478 // Allocate and copy the resulting one-byte string.
2479 __ AllocateOneByteString(r2, r4, r6, r8, r9, &runtime);
2480
2481 // Locate first character of substring to copy.
2482 __ AddP(r7, r5);
2483 // Locate first character of result.
2484 __ AddP(r3, r2, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2485
2486 // r2: result string
2487 // r3: first character of result string
2488 // r4: result string length
2489 // r7: first character of substring to copy
2490 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2491 StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
2492 String::ONE_BYTE_ENCODING);
2493 __ b(&return_r2);
2494
2495 // Allocate and copy the resulting two-byte string.
2496 __ bind(&two_byte_sequential);
2497 __ AllocateTwoByteString(r2, r4, r6, r8, r9, &runtime);
2498
2499 // Locate first character of substring to copy.
2500 __ ShiftLeftP(r3, r5, Operand(1));
2501 __ AddP(r7, r3);
2502 // Locate first character of result.
2503 __ AddP(r3, r2, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2504
2505 // r2: result string.
2506 // r3: first character of result.
2507 // r4: result length.
2508 // r7: first character of substring to copy.
2509 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2510 StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
2511 String::TWO_BYTE_ENCODING);
2512
2513 __ bind(&return_r2);
2514 Counters* counters = isolate()->counters();
2515 __ IncrementCounter(counters->sub_string_native(), 1, r5, r6);
2516 __ Drop(3);
2517 __ Ret();
2518
2519 // Just jump to runtime to create the sub string.
2520 __ bind(&runtime);
2521 __ TailCallRuntime(Runtime::kSubString);
2522
2523 __ bind(&single_char);
2524 // r2: original string
2525 // r3: instance type
2526 // r4: length
2527 // r5: from index (untagged)
2528 __ SmiTag(r5, r5);
2529 StringCharAtGenerator generator(r2, r5, r4, r2, &runtime, &runtime, &runtime,
Ben Murdoch61f157c2016-09-16 13:49:30 +01002530 RECEIVER_IS_STRING);
Ben Murdochda12d292016-06-02 14:46:10 +01002531 generator.GenerateFast(masm);
2532 __ Drop(3);
2533 __ Ret();
2534 generator.SkipSlow(masm, &runtime);
2535}
2536
Ben Murdochda12d292016-06-02 14:46:10 +01002537void ToStringStub::Generate(MacroAssembler* masm) {
2538 // The ToString stub takes one argument in r2.
2539 Label done;
2540 Label is_number;
2541 __ JumpIfSmi(r2, &is_number);
2542
2543 __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
2544 // r2: receiver
2545 // r3: receiver instance type
2546 __ blt(&done);
2547
2548 Label not_heap_number;
2549 __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
2550 __ bne(&not_heap_number);
2551 __ bind(&is_number);
2552 NumberToStringStub stub(isolate());
2553 __ TailCallStub(&stub);
2554 __ bind(&not_heap_number);
2555
2556 Label not_oddball;
2557 __ CmpP(r3, Operand(ODDBALL_TYPE));
2558 __ bne(&not_oddball);
2559 __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
2560 __ Ret();
2561 __ bind(&not_oddball);
2562
2563 __ push(r2); // Push argument.
2564 __ TailCallRuntime(Runtime::kToString);
2565
2566 __ bind(&done);
2567 __ Ret();
2568}
2569
2570void ToNameStub::Generate(MacroAssembler* masm) {
2571 // The ToName stub takes one argument in r2.
2572 Label is_number;
2573 __ JumpIfSmi(r2, &is_number);
2574
2575 STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
2576 __ CompareObjectType(r2, r3, r3, LAST_NAME_TYPE);
2577 // r2: receiver
2578 // r3: receiver instance type
2579 __ Ret(le);
2580
2581 Label not_heap_number;
2582 __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
2583 __ bne(&not_heap_number);
2584 __ bind(&is_number);
2585 NumberToStringStub stub(isolate());
2586 __ TailCallStub(&stub);
2587 __ bind(&not_heap_number);
2588
2589 Label not_oddball;
2590 __ CmpP(r3, Operand(ODDBALL_TYPE));
2591 __ bne(&not_oddball);
2592 __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
2593 __ Ret();
2594 __ bind(&not_oddball);
2595
2596 __ push(r2); // Push argument.
2597 __ TailCallRuntime(Runtime::kToName);
2598}
2599
2600void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
2601 Register left,
2602 Register right,
2603 Register scratch1,
2604 Register scratch2) {
2605 Register length = scratch1;
2606
2607 // Compare lengths.
2608 Label strings_not_equal, check_zero_length;
2609 __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
2610 __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
2611 __ CmpP(length, scratch2);
2612 __ beq(&check_zero_length);
2613 __ bind(&strings_not_equal);
2614 __ LoadSmiLiteral(r2, Smi::FromInt(NOT_EQUAL));
2615 __ Ret();
2616
2617 // Check if the length is zero.
2618 Label compare_chars;
2619 __ bind(&check_zero_length);
2620 STATIC_ASSERT(kSmiTag == 0);
2621 __ CmpP(length, Operand::Zero());
2622 __ bne(&compare_chars);
2623 __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2624 __ Ret();
2625
2626 // Compare characters.
2627 __ bind(&compare_chars);
2628 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
2629 &strings_not_equal);
2630
2631 // Characters are equal.
2632 __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2633 __ Ret();
2634}
2635
2636void StringHelper::GenerateCompareFlatOneByteStrings(
2637 MacroAssembler* masm, Register left, Register right, Register scratch1,
2638 Register scratch2, Register scratch3) {
2639 Label skip, result_not_equal, compare_lengths;
2640 // Find minimum length and length difference.
2641 __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
2642 __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
2643 __ SubP(scratch3, scratch1, scratch2 /*, LeaveOE, SetRC*/);
2644 // Removing RC looks okay here.
2645 Register length_delta = scratch3;
2646 __ ble(&skip, Label::kNear);
2647 __ LoadRR(scratch1, scratch2);
2648 __ bind(&skip);
2649 Register min_length = scratch1;
2650 STATIC_ASSERT(kSmiTag == 0);
2651 __ CmpP(min_length, Operand::Zero());
2652 __ beq(&compare_lengths);
2653
2654 // Compare loop.
2655 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2656 &result_not_equal);
2657
2658 // Compare lengths - strings up to min-length are equal.
2659 __ bind(&compare_lengths);
2660 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2661 // Use length_delta as result if it's zero.
2662 __ LoadRR(r2, length_delta);
2663 __ CmpP(length_delta, Operand::Zero());
2664 __ bind(&result_not_equal);
2665 // Conditionally update the result based either on length_delta or
2666 // the last comparion performed in the loop above.
2667 Label less_equal, equal;
2668 __ ble(&less_equal);
2669 __ LoadSmiLiteral(r2, Smi::FromInt(GREATER));
2670 __ Ret();
2671 __ bind(&less_equal);
2672 __ beq(&equal);
2673 __ LoadSmiLiteral(r2, Smi::FromInt(LESS));
2674 __ bind(&equal);
2675 __ Ret();
2676}
2677
2678void StringHelper::GenerateOneByteCharsCompareLoop(
2679 MacroAssembler* masm, Register left, Register right, Register length,
2680 Register scratch1, Label* chars_not_equal) {
2681 // Change index to run from -length to -1 by adding length to string
2682 // start. This means that loop ends when index reaches zero, which
2683 // doesn't need an additional compare.
2684 __ SmiUntag(length);
2685 __ AddP(scratch1, length,
2686 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2687 __ AddP(left, scratch1);
2688 __ AddP(right, scratch1);
2689 __ LoadComplementRR(length, length);
2690 Register index = length; // index = -length;
2691
2692 // Compare loop.
2693 Label loop;
2694 __ bind(&loop);
2695 __ LoadlB(scratch1, MemOperand(left, index));
2696 __ LoadlB(r0, MemOperand(right, index));
2697 __ CmpP(scratch1, r0);
2698 __ bne(chars_not_equal);
2699 __ AddP(index, Operand(1));
2700 __ CmpP(index, Operand::Zero());
2701 __ bne(&loop);
2702}
2703
2704void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2705 // ----------- S t a t e -------------
2706 // -- r3 : left
2707 // -- r2 : right
2708 // r3: second string
2709 // -----------------------------------
2710
2711 // Load r4 with the allocation site. We stick an undefined dummy value here
2712 // and replace it with the real allocation site later when we instantiate this
2713 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
Ben Murdoch61f157c2016-09-16 13:49:30 +01002714 __ Move(r4, isolate()->factory()->undefined_value());
Ben Murdochda12d292016-06-02 14:46:10 +01002715
2716 // Make sure that we actually patched the allocation site.
2717 if (FLAG_debug_code) {
2718 __ TestIfSmi(r4);
2719 __ Assert(ne, kExpectedAllocationSite, cr0);
2720 __ push(r4);
2721 __ LoadP(r4, FieldMemOperand(r4, HeapObject::kMapOffset));
2722 __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
2723 __ pop(r4);
2724 __ Assert(eq, kExpectedAllocationSite);
2725 }
2726
2727 // Tail call into the stub that handles binary operations with allocation
2728 // sites.
2729 BinaryOpWithAllocationSiteStub stub(isolate(), state());
2730 __ TailCallStub(&stub);
2731}
2732
2733void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2734 DCHECK_EQ(CompareICState::BOOLEAN, state());
2735 Label miss;
2736
2737 __ CheckMap(r3, r4, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2738 __ CheckMap(r2, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2739 if (!Token::IsEqualityOp(op())) {
2740 __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
2741 __ AssertSmi(r3);
2742 __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
2743 __ AssertSmi(r2);
2744 }
2745 __ SubP(r2, r3, r2);
2746 __ Ret();
2747
2748 __ bind(&miss);
2749 GenerateMiss(masm);
2750}
2751
2752void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2753 DCHECK(state() == CompareICState::SMI);
2754 Label miss;
2755 __ OrP(r4, r3, r2);
2756 __ JumpIfNotSmi(r4, &miss);
2757
2758 if (GetCondition() == eq) {
2759 // For equality we do not care about the sign of the result.
2760 // __ sub(r2, r2, r3, SetCC);
2761 __ SubP(r2, r2, r3);
2762 } else {
2763 // Untag before subtracting to avoid handling overflow.
2764 __ SmiUntag(r3);
2765 __ SmiUntag(r2);
2766 __ SubP(r2, r3, r2);
2767 }
2768 __ Ret();
2769
2770 __ bind(&miss);
2771 GenerateMiss(masm);
2772}
2773
2774void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2775 DCHECK(state() == CompareICState::NUMBER);
2776
2777 Label generic_stub;
2778 Label unordered, maybe_undefined1, maybe_undefined2;
2779 Label miss;
2780 Label equal, less_than;
2781
2782 if (left() == CompareICState::SMI) {
2783 __ JumpIfNotSmi(r3, &miss);
2784 }
2785 if (right() == CompareICState::SMI) {
2786 __ JumpIfNotSmi(r2, &miss);
2787 }
2788
2789 // Inlining the double comparison and falling back to the general compare
2790 // stub if NaN is involved.
2791 // Load left and right operand.
2792 Label done, left, left_smi, right_smi;
2793 __ JumpIfSmi(r2, &right_smi);
2794 __ CheckMap(r2, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
2795 DONT_DO_SMI_CHECK);
2796 __ LoadDouble(d1, FieldMemOperand(r2, HeapNumber::kValueOffset));
2797 __ b(&left);
2798 __ bind(&right_smi);
2799 __ SmiToDouble(d1, r2);
2800
2801 __ bind(&left);
2802 __ JumpIfSmi(r3, &left_smi);
2803 __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
2804 DONT_DO_SMI_CHECK);
2805 __ LoadDouble(d0, FieldMemOperand(r3, HeapNumber::kValueOffset));
2806 __ b(&done);
2807 __ bind(&left_smi);
2808 __ SmiToDouble(d0, r3);
2809
2810 __ bind(&done);
2811
2812 // Compare operands
2813 __ cdbr(d0, d1);
2814
2815 // Don't base result on status bits when a NaN is involved.
2816 __ bunordered(&unordered);
2817
2818 // Return a result of -1, 0, or 1, based on status bits.
2819 __ beq(&equal);
2820 __ blt(&less_than);
2821 // assume greater than
2822 __ LoadImmP(r2, Operand(GREATER));
2823 __ Ret();
2824 __ bind(&equal);
2825 __ LoadImmP(r2, Operand(EQUAL));
2826 __ Ret();
2827 __ bind(&less_than);
2828 __ LoadImmP(r2, Operand(LESS));
2829 __ Ret();
2830
2831 __ bind(&unordered);
2832 __ bind(&generic_stub);
2833 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2834 CompareICState::GENERIC, CompareICState::GENERIC);
2835 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2836
2837 __ bind(&maybe_undefined1);
2838 if (Token::IsOrderedRelationalCompareOp(op())) {
2839 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
2840 __ bne(&miss);
2841 __ JumpIfSmi(r3, &unordered);
2842 __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
2843 __ bne(&maybe_undefined2);
2844 __ b(&unordered);
2845 }
2846
2847 __ bind(&maybe_undefined2);
2848 if (Token::IsOrderedRelationalCompareOp(op())) {
2849 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
2850 __ beq(&unordered);
2851 }
2852
2853 __ bind(&miss);
2854 GenerateMiss(masm);
2855}
2856
2857void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2858 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2859 Label miss, not_equal;
2860
2861 // Registers containing left and right operands respectively.
2862 Register left = r3;
2863 Register right = r2;
2864 Register tmp1 = r4;
2865 Register tmp2 = r5;
2866
2867 // Check that both operands are heap objects.
2868 __ JumpIfEitherSmi(left, right, &miss);
2869
2870 // Check that both operands are symbols.
2871 __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2872 __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2873 __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2874 __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2875 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2876 __ OrP(tmp1, tmp1, tmp2);
2877 __ AndP(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2878 __ bne(&miss);
2879
2880 // Internalized strings are compared by identity.
2881 __ CmpP(left, right);
2882 __ bne(&not_equal);
2883 // Make sure r2 is non-zero. At this point input operands are
2884 // guaranteed to be non-zero.
2885 DCHECK(right.is(r2));
2886 STATIC_ASSERT(EQUAL == 0);
2887 STATIC_ASSERT(kSmiTag == 0);
2888 __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2889 __ bind(&not_equal);
2890 __ Ret();
2891
2892 __ bind(&miss);
2893 GenerateMiss(masm);
2894}
2895
2896void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2897 DCHECK(state() == CompareICState::UNIQUE_NAME);
2898 DCHECK(GetCondition() == eq);
2899 Label miss;
2900
2901 // Registers containing left and right operands respectively.
2902 Register left = r3;
2903 Register right = r2;
2904 Register tmp1 = r4;
2905 Register tmp2 = r5;
2906
2907 // Check that both operands are heap objects.
2908 __ JumpIfEitherSmi(left, right, &miss);
2909
2910 // Check that both operands are unique names. This leaves the instance
2911 // types loaded in tmp1 and tmp2.
2912 __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2913 __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2914 __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2915 __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2916
2917 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2918 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2919
2920 // Unique names are compared by identity.
2921 __ CmpP(left, right);
2922 __ bne(&miss);
2923 // Make sure r2 is non-zero. At this point input operands are
2924 // guaranteed to be non-zero.
2925 DCHECK(right.is(r2));
2926 STATIC_ASSERT(EQUAL == 0);
2927 STATIC_ASSERT(kSmiTag == 0);
2928 __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2929 __ Ret();
2930
2931 __ bind(&miss);
2932 GenerateMiss(masm);
2933}
2934
2935void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2936 DCHECK(state() == CompareICState::STRING);
2937 Label miss, not_identical, is_symbol;
2938
2939 bool equality = Token::IsEqualityOp(op());
2940
2941 // Registers containing left and right operands respectively.
2942 Register left = r3;
2943 Register right = r2;
2944 Register tmp1 = r4;
2945 Register tmp2 = r5;
2946 Register tmp3 = r6;
2947 Register tmp4 = r7;
2948
2949 // Check that both operands are heap objects.
2950 __ JumpIfEitherSmi(left, right, &miss);
2951
2952 // Check that both operands are strings. This leaves the instance
2953 // types loaded in tmp1 and tmp2.
2954 __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2955 __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2956 __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2957 __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2958 STATIC_ASSERT(kNotStringTag != 0);
2959 __ OrP(tmp3, tmp1, tmp2);
2960 __ AndP(r0, tmp3, Operand(kIsNotStringMask));
2961 __ bne(&miss);
2962
2963 // Fast check for identical strings.
2964 __ CmpP(left, right);
2965 STATIC_ASSERT(EQUAL == 0);
2966 STATIC_ASSERT(kSmiTag == 0);
2967 __ bne(&not_identical);
2968 __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
2969 __ Ret();
2970 __ bind(&not_identical);
2971
2972 // Handle not identical strings.
2973
2974 // Check that both strings are internalized strings. If they are, we're done
2975 // because we already know they are not identical. We know they are both
2976 // strings.
2977 if (equality) {
2978 DCHECK(GetCondition() == eq);
2979 STATIC_ASSERT(kInternalizedTag == 0);
2980 __ OrP(tmp3, tmp1, tmp2);
2981 __ AndP(r0, tmp3, Operand(kIsNotInternalizedMask));
2982 __ bne(&is_symbol);
2983 // Make sure r2 is non-zero. At this point input operands are
2984 // guaranteed to be non-zero.
2985 DCHECK(right.is(r2));
2986 __ Ret();
2987 __ bind(&is_symbol);
2988 }
2989
2990 // Check that both strings are sequential one-byte.
2991 Label runtime;
2992 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2993 &runtime);
2994
2995 // Compare flat one-byte strings. Returns when done.
2996 if (equality) {
2997 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
2998 tmp2);
2999 } else {
3000 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3001 tmp2, tmp3);
3002 }
3003
3004 // Handle more complex cases in runtime.
3005 __ bind(&runtime);
3006 if (equality) {
3007 {
3008 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3009 __ Push(left, right);
3010 __ CallRuntime(Runtime::kStringEqual);
3011 }
3012 __ LoadRoot(r3, Heap::kTrueValueRootIndex);
3013 __ SubP(r2, r2, r3);
3014 __ Ret();
3015 } else {
3016 __ Push(left, right);
3017 __ TailCallRuntime(Runtime::kStringCompare);
3018 }
3019
3020 __ bind(&miss);
3021 GenerateMiss(masm);
3022}
3023
3024void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
3025 DCHECK_EQ(CompareICState::RECEIVER, state());
3026 Label miss;
3027 __ AndP(r4, r3, r2);
3028 __ JumpIfSmi(r4, &miss);
3029
3030 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3031 __ CompareObjectType(r2, r4, r4, FIRST_JS_RECEIVER_TYPE);
3032 __ blt(&miss);
3033 __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
3034 __ blt(&miss);
3035
3036 DCHECK(GetCondition() == eq);
3037 __ SubP(r2, r2, r3);
3038 __ Ret();
3039
3040 __ bind(&miss);
3041 GenerateMiss(masm);
3042}
3043
3044void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
3045 Label miss;
3046 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3047 __ AndP(r4, r3, r2);
3048 __ JumpIfSmi(r4, &miss);
3049 __ GetWeakValue(r6, cell);
3050 __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
3051 __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
3052 __ CmpP(r4, r6);
3053 __ bne(&miss);
3054 __ CmpP(r5, r6);
3055 __ bne(&miss);
3056
3057 if (Token::IsEqualityOp(op())) {
3058 __ SubP(r2, r2, r3);
3059 __ Ret();
3060 } else {
3061 if (op() == Token::LT || op() == Token::LTE) {
3062 __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
3063 } else {
3064 __ LoadSmiLiteral(r4, Smi::FromInt(LESS));
3065 }
3066 __ Push(r3, r2, r4);
3067 __ TailCallRuntime(Runtime::kCompare);
3068 }
3069
3070 __ bind(&miss);
3071 GenerateMiss(masm);
3072}
3073
3074void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3075 {
3076 // Call the runtime system in a fresh internal frame.
3077 FrameScope scope(masm, StackFrame::INTERNAL);
3078 __ Push(r3, r2);
3079 __ Push(r3, r2);
3080 __ LoadSmiLiteral(r0, Smi::FromInt(op()));
3081 __ push(r0);
3082 __ CallRuntime(Runtime::kCompareIC_Miss);
3083 // Compute the entry point of the rewritten stub.
3084 __ AddP(r4, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
3085 // Restore registers.
3086 __ Pop(r3, r2);
3087 }
3088
3089 __ JumpToJSEntry(r4);
3090}
3091
3092// This stub is paired with DirectCEntryStub::GenerateCall
3093void DirectCEntryStub::Generate(MacroAssembler* masm) {
3094 __ CleanseP(r14);
3095
Ben Murdochda12d292016-06-02 14:46:10 +01003096 __ b(ip); // Callee will return to R14 directly
3097}
3098
3099void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
3100#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
3101 // Native AIX/S390X Linux use a function descriptor.
3102 __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
3103 __ LoadP(target, MemOperand(target, 0)); // Instruction address
3104#else
3105 // ip needs to be set for DirectCEentryStub::Generate, and also
3106 // for ABI_CALL_VIA_IP.
3107 __ Move(ip, target);
3108#endif
3109
3110 __ call(GetCode(), RelocInfo::CODE_TARGET); // Call the stub.
3111}
3112
3113void NameDictionaryLookupStub::GenerateNegativeLookup(
3114 MacroAssembler* masm, Label* miss, Label* done, Register receiver,
3115 Register properties, Handle<Name> name, Register scratch0) {
3116 DCHECK(name->IsUniqueName());
3117 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3118 // not equal to the name and kProbes-th slot is not used (its name is the
3119 // undefined value), it guarantees the hash table doesn't contain the
3120 // property. It's true even if some slots represent deleted properties
3121 // (their names are the hole value).
3122 for (int i = 0; i < kInlinedProbes; i++) {
3123 // scratch0 points to properties hash.
3124 // Compute the masked index: (hash + i + i * i) & mask.
3125 Register index = scratch0;
3126 // Capacity is smi 2^n.
3127 __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
3128 __ SubP(index, Operand(1));
3129 __ LoadSmiLiteral(
3130 ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
3131 __ AndP(index, ip);
3132
3133 // Scale the index by multiplying by the entry size.
3134 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3135 __ ShiftLeftP(ip, index, Operand(1));
3136 __ AddP(index, ip); // index *= 3.
3137
3138 Register entity_name = scratch0;
3139 // Having undefined at this place means the name is not contained.
3140 Register tmp = properties;
3141 __ SmiToPtrArrayOffset(ip, index);
3142 __ AddP(tmp, properties, ip);
3143 __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3144
3145 DCHECK(!tmp.is(entity_name));
3146 __ CompareRoot(entity_name, Heap::kUndefinedValueRootIndex);
3147 __ beq(done);
3148
3149 // Stop if found the property.
3150 __ CmpP(entity_name, Operand(Handle<Name>(name)));
3151 __ beq(miss);
3152
3153 Label good;
3154 __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
3155 __ beq(&good);
3156
3157 // Check if the entry name is not a unique name.
3158 __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3159 __ LoadlB(entity_name,
3160 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3161 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3162 __ bind(&good);
3163
3164 // Restore the properties.
3165 __ LoadP(properties,
3166 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3167 }
3168
3169 const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
3170 r4.bit() | r3.bit() | r2.bit());
3171
3172 __ LoadRR(r0, r14);
3173 __ MultiPush(spill_mask);
3174
3175 __ LoadP(r2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3176 __ mov(r3, Operand(Handle<Name>(name)));
3177 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3178 __ CallStub(&stub);
3179 __ CmpP(r2, Operand::Zero());
3180
3181 __ MultiPop(spill_mask); // MultiPop does not touch condition flags
3182 __ LoadRR(r14, r0);
3183
3184 __ beq(done);
3185 __ bne(miss);
3186}
3187
3188// Probe the name dictionary in the |elements| register. Jump to the
3189// |done| label if a property with the given name is found. Jump to
3190// the |miss| label otherwise.
3191// If lookup was successful |scratch2| will be equal to elements + 4 * index.
3192void NameDictionaryLookupStub::GeneratePositiveLookup(
3193 MacroAssembler* masm, Label* miss, Label* done, Register elements,
3194 Register name, Register scratch1, Register scratch2) {
3195 DCHECK(!elements.is(scratch1));
3196 DCHECK(!elements.is(scratch2));
3197 DCHECK(!name.is(scratch1));
3198 DCHECK(!name.is(scratch2));
3199
3200 __ AssertName(name);
3201
3202 // Compute the capacity mask.
3203 __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
3204 __ SmiUntag(scratch1); // convert smi to int
3205 __ SubP(scratch1, Operand(1));
3206
3207 // Generate an unrolled loop that performs a few probes before
3208 // giving up. Measurements done on Gmail indicate that 2 probes
3209 // cover ~93% of loads from dictionaries.
3210 for (int i = 0; i < kInlinedProbes; i++) {
3211 // Compute the masked index: (hash + i + i * i) & mask.
3212 __ LoadlW(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
3213 if (i > 0) {
3214 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3215 // the hash in a separate instruction. The value hash + i + i * i is right
3216 // shifted in the following and instruction.
3217 DCHECK(NameDictionary::GetProbeOffset(i) <
3218 1 << (32 - Name::kHashFieldOffset));
3219 __ AddP(scratch2,
3220 Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3221 }
3222 __ srl(scratch2, Operand(String::kHashShift));
3223 __ AndP(scratch2, scratch1);
3224
3225 // Scale the index by multiplying by the entry size.
3226 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3227 // scratch2 = scratch2 * 3.
3228 __ ShiftLeftP(ip, scratch2, Operand(1));
3229 __ AddP(scratch2, ip);
3230
3231 // Check if the key is identical to the name.
3232 __ ShiftLeftP(ip, scratch2, Operand(kPointerSizeLog2));
3233 __ AddP(scratch2, elements, ip);
3234 __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
3235 __ CmpP(name, ip);
3236 __ beq(done);
3237 }
3238
3239 const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
3240 r4.bit() | r3.bit() | r2.bit()) &
3241 ~(scratch1.bit() | scratch2.bit());
3242
3243 __ LoadRR(r0, r14);
3244 __ MultiPush(spill_mask);
3245 if (name.is(r2)) {
3246 DCHECK(!elements.is(r3));
3247 __ LoadRR(r3, name);
3248 __ LoadRR(r2, elements);
3249 } else {
3250 __ LoadRR(r2, elements);
3251 __ LoadRR(r3, name);
3252 }
3253 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
3254 __ CallStub(&stub);
3255 __ LoadRR(r1, r2);
3256 __ LoadRR(scratch2, r4);
3257 __ MultiPop(spill_mask);
3258 __ LoadRR(r14, r0);
3259
3260 __ CmpP(r1, Operand::Zero());
3261 __ bne(done);
3262 __ beq(miss);
3263}
3264
3265void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3266 // This stub overrides SometimesSetsUpAFrame() to return false. That means
3267 // we cannot call anything that could cause a GC from this stub.
3268 // Registers:
3269 // result: NameDictionary to probe
3270 // r3: key
3271 // dictionary: NameDictionary to probe.
3272 // index: will hold an index of entry if lookup is successful.
3273 // might alias with result_.
3274 // Returns:
3275 // result_ is zero if lookup failed, non zero otherwise.
3276
3277 Register result = r2;
3278 Register dictionary = r2;
3279 Register key = r3;
3280 Register index = r4;
3281 Register mask = r5;
3282 Register hash = r6;
3283 Register undefined = r7;
3284 Register entry_key = r8;
3285 Register scratch = r8;
3286
3287 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3288
3289 __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
3290 __ SmiUntag(mask);
3291 __ SubP(mask, Operand(1));
3292
3293 __ LoadlW(hash, FieldMemOperand(key, String::kHashFieldOffset));
3294
3295 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3296
3297 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3298 // Compute the masked index: (hash + i + i * i) & mask.
3299 // Capacity is smi 2^n.
3300 if (i > 0) {
3301 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3302 // the hash in a separate instruction. The value hash + i + i * i is right
3303 // shifted in the following and instruction.
3304 DCHECK(NameDictionary::GetProbeOffset(i) <
3305 1 << (32 - Name::kHashFieldOffset));
3306 __ AddP(index, hash,
3307 Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3308 } else {
3309 __ LoadRR(index, hash);
3310 }
3311 __ ShiftRight(r0, index, Operand(String::kHashShift));
3312 __ AndP(index, r0, mask);
3313
3314 // Scale the index by multiplying by the entry size.
3315 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3316 __ ShiftLeftP(scratch, index, Operand(1));
3317 __ AddP(index, scratch); // index *= 3.
3318
3319 __ ShiftLeftP(scratch, index, Operand(kPointerSizeLog2));
3320 __ AddP(index, dictionary, scratch);
3321 __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
3322
3323 // Having undefined at this place means the name is not contained.
3324 __ CmpP(entry_key, undefined);
3325 __ beq(&not_in_dictionary);
3326
3327 // Stop if found the property.
3328 __ CmpP(entry_key, key);
3329 __ beq(&in_dictionary);
3330
3331 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3332 // Check if the entry name is not a unique name.
3333 __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
3334 __ LoadlB(entry_key,
3335 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
3336 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
3337 }
3338 }
3339
3340 __ bind(&maybe_in_dictionary);
3341 // If we are doing negative lookup then probing failure should be
3342 // treated as a lookup success. For positive lookup probing failure
3343 // should be treated as lookup failure.
3344 if (mode() == POSITIVE_LOOKUP) {
3345 __ LoadImmP(result, Operand::Zero());
3346 __ Ret();
3347 }
3348
3349 __ bind(&in_dictionary);
3350 __ LoadImmP(result, Operand(1));
3351 __ Ret();
3352
3353 __ bind(&not_in_dictionary);
3354 __ LoadImmP(result, Operand::Zero());
3355 __ Ret();
3356}
3357
3358void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
3359 Isolate* isolate) {
3360 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
3361 stub1.GetCode();
3362 // Hydrogen code stubs need stub2 at snapshot time.
3363 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
3364 stub2.GetCode();
3365}
3366
3367// Takes the input in 3 registers: address_ value_ and object_. A pointer to
3368// the value has just been written into the object, now this stub makes sure
3369// we keep the GC informed. The word in the object where the value has been
3370// written is in the address register.
3371void RecordWriteStub::Generate(MacroAssembler* masm) {
3372 Label skip_to_incremental_noncompacting;
3373 Label skip_to_incremental_compacting;
3374
3375 // The first two branch instructions are generated with labels so as to
3376 // get the offset fixed up correctly by the bind(Label*) call. We patch
3377 // it back and forth between branch condition True and False
3378 // when we start and stop incremental heap marking.
3379 // See RecordWriteStub::Patch for details.
3380
3381 // Clear the bit, branch on True for NOP action initially
3382 __ b(CC_NOP, &skip_to_incremental_noncompacting);
3383 __ b(CC_NOP, &skip_to_incremental_compacting);
3384
3385 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3386 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3387 MacroAssembler::kReturnAtEnd);
3388 }
3389 __ Ret();
3390
3391 __ bind(&skip_to_incremental_noncompacting);
3392 GenerateIncremental(masm, INCREMENTAL);
3393
3394 __ bind(&skip_to_incremental_compacting);
3395 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3396
3397 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3398 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3399 // patching not required on S390 as the initial path is effectively NOP
3400}
3401
3402void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3403 regs_.Save(masm);
3404
3405 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3406 Label dont_need_remembered_set;
3407
3408 __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
3409 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
3410 regs_.scratch0(), &dont_need_remembered_set);
3411
3412 __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
3413 &dont_need_remembered_set);
3414
3415 // First notify the incremental marker if necessary, then update the
3416 // remembered set.
3417 CheckNeedsToInformIncrementalMarker(
3418 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3419 InformIncrementalMarker(masm);
3420 regs_.Restore(masm);
3421 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3422 MacroAssembler::kReturnAtEnd);
3423
3424 __ bind(&dont_need_remembered_set);
3425 }
3426
3427 CheckNeedsToInformIncrementalMarker(
3428 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3429 InformIncrementalMarker(masm);
3430 regs_.Restore(masm);
3431 __ Ret();
3432}
3433
3434void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3435 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3436 int argument_count = 3;
3437 __ PrepareCallCFunction(argument_count, regs_.scratch0());
3438 Register address =
3439 r2.is(regs_.address()) ? regs_.scratch0() : regs_.address();
3440 DCHECK(!address.is(regs_.object()));
3441 DCHECK(!address.is(r2));
3442 __ LoadRR(address, regs_.address());
3443 __ LoadRR(r2, regs_.object());
3444 __ LoadRR(r3, address);
3445 __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
3446
3447 AllowExternalCallThatCantCauseGC scope(masm);
3448 __ CallCFunction(
3449 ExternalReference::incremental_marking_record_write_function(isolate()),
3450 argument_count);
3451 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3452}
3453
3454void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3455 MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
3456 Mode mode) {
3457 Label on_black;
3458 Label need_incremental;
3459 Label need_incremental_pop_scratch;
3460
3461 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3462 __ AndP(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
3463 __ LoadP(
3464 regs_.scratch1(),
3465 MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
3466 __ SubP(regs_.scratch1(), regs_.scratch1(), Operand(1));
3467 __ StoreP(
3468 regs_.scratch1(),
3469 MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
3470 __ CmpP(regs_.scratch1(), Operand::Zero()); // S390, we could do better here
3471 __ blt(&need_incremental);
3472
3473 // Let's look at the color of the object: If it is not black we don't have
3474 // to inform the incremental marker.
3475 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
3476
3477 regs_.Restore(masm);
3478 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3479 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3480 MacroAssembler::kReturnAtEnd);
3481 } else {
3482 __ Ret();
3483 }
3484
3485 __ bind(&on_black);
3486
3487 // Get the value from the slot.
3488 __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
3489
3490 if (mode == INCREMENTAL_COMPACTION) {
3491 Label ensure_not_white;
3492
3493 __ CheckPageFlag(regs_.scratch0(), // Contains value.
3494 regs_.scratch1(), // Scratch.
3495 MemoryChunk::kEvacuationCandidateMask, eq,
3496 &ensure_not_white);
3497
3498 __ CheckPageFlag(regs_.object(),
3499 regs_.scratch1(), // Scratch.
3500 MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
3501 &need_incremental);
3502
3503 __ bind(&ensure_not_white);
3504 }
3505
3506 // We need extra registers for this, so we push the object and the address
3507 // register temporarily.
3508 __ Push(regs_.object(), regs_.address());
3509 __ JumpIfWhite(regs_.scratch0(), // The value.
3510 regs_.scratch1(), // Scratch.
3511 regs_.object(), // Scratch.
3512 regs_.address(), // Scratch.
3513 &need_incremental_pop_scratch);
3514 __ Pop(regs_.object(), regs_.address());
3515
3516 regs_.Restore(masm);
3517 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3518 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3519 MacroAssembler::kReturnAtEnd);
3520 } else {
3521 __ Ret();
3522 }
3523
3524 __ bind(&need_incremental_pop_scratch);
3525 __ Pop(regs_.object(), regs_.address());
3526
3527 __ bind(&need_incremental);
3528
3529 // Fall through when we need to inform the incremental marker.
3530}
3531
3532void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
3533 CEntryStub ces(isolate(), 1, kSaveFPRegs);
3534 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
3535 int parameter_count_offset =
3536 StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
3537 __ LoadP(r3, MemOperand(fp, parameter_count_offset));
3538 if (function_mode() == JS_FUNCTION_STUB_MODE) {
3539 __ AddP(r3, Operand(1));
3540 }
3541 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
3542 __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
3543 __ la(sp, MemOperand(r3, sp));
3544 __ Ret();
3545}
3546
3547void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
3548 __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
Ben Murdoch61f157c2016-09-16 13:49:30 +01003549 LoadICStub stub(isolate());
Ben Murdochda12d292016-06-02 14:46:10 +01003550 stub.GenerateForTrampoline(masm);
3551}
3552
3553void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
3554 __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
Ben Murdoch61f157c2016-09-16 13:49:30 +01003555 KeyedLoadICStub stub(isolate());
Ben Murdochda12d292016-06-02 14:46:10 +01003556 stub.GenerateForTrampoline(masm);
3557}
3558
3559void CallICTrampolineStub::Generate(MacroAssembler* masm) {
3560 __ EmitLoadTypeFeedbackVector(r4);
3561 CallICStub stub(isolate(), state());
3562 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3563}
3564
3565void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
3566
3567void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
3568 GenerateImpl(masm, true);
3569}
3570
3571static void HandleArrayCases(MacroAssembler* masm, Register feedback,
3572 Register receiver_map, Register scratch1,
3573 Register scratch2, bool is_polymorphic,
3574 Label* miss) {
3575 // feedback initially contains the feedback array
3576 Label next_loop, prepare_next;
3577 Label start_polymorphic;
3578
3579 Register cached_map = scratch1;
3580
3581 __ LoadP(cached_map,
3582 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
3583 __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3584 __ CmpP(receiver_map, cached_map);
3585 __ bne(&start_polymorphic, Label::kNear);
3586 // found, now call handler.
3587 Register handler = feedback;
3588 __ LoadP(handler,
3589 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
3590 __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3591 __ Jump(ip);
3592
3593 Register length = scratch2;
3594 __ bind(&start_polymorphic);
3595 __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3596 if (!is_polymorphic) {
3597 // If the IC could be monomorphic we have to make sure we don't go past the
3598 // end of the feedback array.
3599 __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
3600 __ beq(miss);
3601 }
3602
3603 Register too_far = length;
3604 Register pointer_reg = feedback;
3605
3606 // +-----+------+------+-----+-----+ ... ----+
3607 // | map | len | wm0 | h0 | wm1 | hN |
3608 // +-----+------+------+-----+-----+ ... ----+
3609 // 0 1 2 len-1
3610 // ^ ^
3611 // | |
3612 // pointer_reg too_far
3613 // aka feedback scratch2
3614 // also need receiver_map
3615 // use cached_map (scratch1) to look in the weak map values.
3616 __ SmiToPtrArrayOffset(r0, length);
3617 __ AddP(too_far, feedback, r0);
3618 __ AddP(too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3619 __ AddP(pointer_reg, feedback,
3620 Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
3621
3622 __ bind(&next_loop);
3623 __ LoadP(cached_map, MemOperand(pointer_reg));
3624 __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3625 __ CmpP(receiver_map, cached_map);
3626 __ bne(&prepare_next, Label::kNear);
3627 __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
3628 __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3629 __ Jump(ip);
3630
3631 __ bind(&prepare_next);
3632 __ AddP(pointer_reg, Operand(kPointerSize * 2));
3633 __ CmpP(pointer_reg, too_far);
3634 __ blt(&next_loop, Label::kNear);
3635
3636 // We exhausted our array of map handler pairs.
3637 __ b(miss);
3638}
3639
3640static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
3641 Register receiver_map, Register feedback,
3642 Register vector, Register slot,
3643 Register scratch, Label* compare_map,
3644 Label* load_smi_map, Label* try_array) {
3645 __ JumpIfSmi(receiver, load_smi_map);
3646 __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
3647 __ bind(compare_map);
3648 Register cached_map = scratch;
3649 // Move the weak map into the weak_cell register.
3650 __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
3651 __ CmpP(cached_map, receiver_map);
3652 __ bne(try_array);
3653 Register handler = feedback;
3654 __ SmiToPtrArrayOffset(r1, slot);
3655 __ LoadP(handler,
3656 FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
3657 __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3658 __ Jump(ip);
3659}
3660
3661void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3662 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r3
3663 Register name = LoadWithVectorDescriptor::NameRegister(); // r4
3664 Register vector = LoadWithVectorDescriptor::VectorRegister(); // r5
3665 Register slot = LoadWithVectorDescriptor::SlotRegister(); // r2
3666 Register feedback = r6;
3667 Register receiver_map = r7;
3668 Register scratch1 = r8;
3669
3670 __ SmiToPtrArrayOffset(r1, slot);
3671 __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
3672
3673 // Try to quickly handle the monomorphic case without knowing for sure
3674 // if we have a weak cell in feedback. We do know it's safe to look
3675 // at WeakCell::kValueOffset.
3676 Label try_array, load_smi_map, compare_map;
3677 Label not_array, miss;
3678 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3679 scratch1, &compare_map, &load_smi_map, &try_array);
3680
3681 // Is it a fixed array?
3682 __ bind(&try_array);
3683 __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3684 __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
3685 __ bne(&not_array, Label::kNear);
3686 HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
3687
3688 __ bind(&not_array);
3689 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
3690 __ bne(&miss);
Ben Murdochc5610432016-08-08 18:44:38 +01003691 Code::Flags code_flags =
3692 Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
Ben Murdochda12d292016-06-02 14:46:10 +01003693 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
3694 receiver, name, feedback,
3695 receiver_map, scratch1, r9);
3696
3697 __ bind(&miss);
3698 LoadIC::GenerateMiss(masm);
3699
3700 __ bind(&load_smi_map);
3701 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3702 __ b(&compare_map);
3703}
3704
3705void KeyedLoadICStub::Generate(MacroAssembler* masm) {
3706 GenerateImpl(masm, false);
3707}
3708
3709void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
3710 GenerateImpl(masm, true);
3711}
3712
3713void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3714 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r3
3715 Register key = LoadWithVectorDescriptor::NameRegister(); // r4
3716 Register vector = LoadWithVectorDescriptor::VectorRegister(); // r5
3717 Register slot = LoadWithVectorDescriptor::SlotRegister(); // r2
3718 Register feedback = r6;
3719 Register receiver_map = r7;
3720 Register scratch1 = r8;
3721
3722 __ SmiToPtrArrayOffset(r1, slot);
3723 __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
3724
3725 // Try to quickly handle the monomorphic case without knowing for sure
3726 // if we have a weak cell in feedback. We do know it's safe to look
3727 // at WeakCell::kValueOffset.
3728 Label try_array, load_smi_map, compare_map;
3729 Label not_array, miss;
3730 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3731 scratch1, &compare_map, &load_smi_map, &try_array);
3732
3733 __ bind(&try_array);
3734 // Is it a fixed array?
3735 __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3736 __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
3737 __ bne(&not_array);
3738
3739 // We have a polymorphic element handler.
3740 Label polymorphic, try_poly_name;
3741 __ bind(&polymorphic);
3742 HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
3743
3744 __ bind(&not_array);
3745 // Is it generic?
3746 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
3747 __ bne(&try_poly_name);
3748 Handle<Code> megamorphic_stub =
3749 KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3750 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3751
3752 __ bind(&try_poly_name);
3753 // We might have a name in feedback, and a fixed array in the next slot.
3754 __ CmpP(key, feedback);
3755 __ bne(&miss);
3756 // If the name comparison succeeded, we know we have a fixed array with
3757 // at least one map/handler pair.
3758 __ SmiToPtrArrayOffset(r1, slot);
3759 __ LoadP(feedback,
3760 FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
3761 HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
3762
3763 __ bind(&miss);
3764 KeyedLoadIC::GenerateMiss(masm);
3765
3766 __ bind(&load_smi_map);
3767 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3768 __ b(&compare_map);
3769}
3770
3771void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3772 __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
3773 VectorStoreICStub stub(isolate(), state());
3774 stub.GenerateForTrampoline(masm);
3775}
3776
3777void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3778 __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
3779 VectorKeyedStoreICStub stub(isolate(), state());
3780 stub.GenerateForTrampoline(masm);
3781}
3782
3783void VectorStoreICStub::Generate(MacroAssembler* masm) {
3784 GenerateImpl(masm, false);
3785}
3786
3787void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3788 GenerateImpl(masm, true);
3789}
3790
3791void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3792 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r3
3793 Register key = VectorStoreICDescriptor::NameRegister(); // r4
3794 Register vector = VectorStoreICDescriptor::VectorRegister(); // r5
3795 Register slot = VectorStoreICDescriptor::SlotRegister(); // r6
3796 DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2)); // r2
3797 Register feedback = r7;
3798 Register receiver_map = r8;
3799 Register scratch1 = r9;
3800
3801 __ SmiToPtrArrayOffset(r0, slot);
3802 __ AddP(feedback, vector, r0);
3803 __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3804
3805 // Try to quickly handle the monomorphic case without knowing for sure
3806 // if we have a weak cell in feedback. We do know it's safe to look
3807 // at WeakCell::kValueOffset.
3808 Label try_array, load_smi_map, compare_map;
3809 Label not_array, miss;
3810 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3811 scratch1, &compare_map, &load_smi_map, &try_array);
3812
3813 // Is it a fixed array?
3814 __ bind(&try_array);
3815 __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3816 __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
3817 __ bne(&not_array);
3818
3819 Register scratch2 = ip;
3820 HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
3821 &miss);
3822
3823 __ bind(&not_array);
3824 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
3825 __ bne(&miss);
Ben Murdochc5610432016-08-08 18:44:38 +01003826 Code::Flags code_flags =
3827 Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
Ben Murdochda12d292016-06-02 14:46:10 +01003828 masm->isolate()->stub_cache()->GenerateProbe(
3829 masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
3830 scratch1, scratch2);
3831
3832 __ bind(&miss);
3833 StoreIC::GenerateMiss(masm);
3834
3835 __ bind(&load_smi_map);
3836 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3837 __ b(&compare_map);
3838}
3839
3840void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
3841 GenerateImpl(masm, false);
3842}
3843
3844void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3845 GenerateImpl(masm, true);
3846}
3847
3848static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
3849 Register receiver_map, Register scratch1,
3850 Register scratch2, Label* miss) {
3851 // feedback initially contains the feedback array
3852 Label next_loop, prepare_next;
3853 Label start_polymorphic;
3854 Label transition_call;
3855
3856 Register cached_map = scratch1;
3857 Register too_far = scratch2;
3858 Register pointer_reg = feedback;
3859 __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3860
3861 // +-----+------+------+-----+-----+-----+ ... ----+
3862 // | map | len | wm0 | wt0 | h0 | wm1 | hN |
3863 // +-----+------+------+-----+-----+ ----+ ... ----+
3864 // 0 1 2 len-1
3865 // ^ ^
3866 // | |
3867 // pointer_reg too_far
3868 // aka feedback scratch2
3869 // also need receiver_map
3870 // use cached_map (scratch1) to look in the weak map values.
3871 __ SmiToPtrArrayOffset(r0, too_far);
3872 __ AddP(too_far, feedback, r0);
3873 __ AddP(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3874 __ AddP(pointer_reg, feedback,
3875 Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
3876
3877 __ bind(&next_loop);
3878 __ LoadP(cached_map, MemOperand(pointer_reg));
3879 __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3880 __ CmpP(receiver_map, cached_map);
3881 __ bne(&prepare_next);
3882 // Is it a transitioning store?
3883 __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
3884 __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
3885 __ bne(&transition_call);
3886 __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
3887 __ AddP(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3888 __ Jump(ip);
3889
3890 __ bind(&transition_call);
3891 __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
3892 __ JumpIfSmi(too_far, miss);
3893
3894 __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
3895
3896 // Load the map into the correct register.
3897 DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
3898 __ LoadRR(feedback, too_far);
3899
3900 __ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
3901 __ Jump(ip);
3902
3903 __ bind(&prepare_next);
3904 __ AddP(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
3905 __ CmpLogicalP(pointer_reg, too_far);
3906 __ blt(&next_loop);
3907
3908 // We exhausted our array of map handler pairs.
3909 __ b(miss);
3910}
3911
3912void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3913 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r3
3914 Register key = VectorStoreICDescriptor::NameRegister(); // r4
3915 Register vector = VectorStoreICDescriptor::VectorRegister(); // r5
3916 Register slot = VectorStoreICDescriptor::SlotRegister(); // r6
3917 DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2)); // r2
3918 Register feedback = r7;
3919 Register receiver_map = r8;
3920 Register scratch1 = r9;
3921
3922 __ SmiToPtrArrayOffset(r0, slot);
3923 __ AddP(feedback, vector, r0);
3924 __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3925
3926 // Try to quickly handle the monomorphic case without knowing for sure
3927 // if we have a weak cell in feedback. We do know it's safe to look
3928 // at WeakCell::kValueOffset.
3929 Label try_array, load_smi_map, compare_map;
3930 Label not_array, miss;
3931 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3932 scratch1, &compare_map, &load_smi_map, &try_array);
3933
3934 __ bind(&try_array);
3935 // Is it a fixed array?
3936 __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3937 __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
3938 __ bne(&not_array);
3939
3940 // We have a polymorphic element handler.
3941 Label polymorphic, try_poly_name;
3942 __ bind(&polymorphic);
3943
3944 Register scratch2 = ip;
3945
3946 HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
3947 &miss);
3948
3949 __ bind(&not_array);
3950 // Is it generic?
3951 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
3952 __ bne(&try_poly_name);
3953 Handle<Code> megamorphic_stub =
3954 KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3955 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3956
3957 __ bind(&try_poly_name);
3958 // We might have a name in feedback, and a fixed array in the next slot.
3959 __ CmpP(key, feedback);
3960 __ bne(&miss);
3961 // If the name comparison succeeded, we know we have a fixed array with
3962 // at least one map/handler pair.
3963 __ SmiToPtrArrayOffset(r0, slot);
3964 __ AddP(feedback, vector, r0);
3965 __ LoadP(feedback,
3966 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3967 HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
3968 &miss);
3969
3970 __ bind(&miss);
3971 KeyedStoreIC::GenerateMiss(masm);
3972
3973 __ bind(&load_smi_map);
3974 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3975 __ b(&compare_map);
3976}
3977
3978void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
3979 if (masm->isolate()->function_entry_hook() != NULL) {
3980 PredictableCodeSizeScope predictable(masm,
3981#if V8_TARGET_ARCH_S390X
3982 40);
3983#elif V8_HOST_ARCH_S390
3984 36);
3985#else
3986 32);
3987#endif
3988 ProfileEntryHookStub stub(masm->isolate());
3989 __ CleanseP(r14);
3990 __ Push(r14, ip);
3991 __ CallStub(&stub); // BRASL
3992 __ Pop(r14, ip);
3993 }
3994}
3995
3996void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
3997// The entry hook is a "push lr" instruction (LAY+ST/STG), followed by a call.
3998#if V8_TARGET_ARCH_S390X
3999 const int32_t kReturnAddressDistanceFromFunctionStart =
4000 Assembler::kCallTargetAddressOffset + 18; // LAY + STG * 2
4001#elif V8_HOST_ARCH_S390
4002 const int32_t kReturnAddressDistanceFromFunctionStart =
4003 Assembler::kCallTargetAddressOffset + 18; // NILH + LAY + ST * 2
4004#else
4005 const int32_t kReturnAddressDistanceFromFunctionStart =
4006 Assembler::kCallTargetAddressOffset + 14; // LAY + ST * 2
4007#endif
4008
4009 // This should contain all kJSCallerSaved registers.
4010 const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.
4011 r7.bit(); // Saved stack pointer.
4012
4013 // We also save r14+ip, so count here is one higher than the mask indicates.
4014 const int32_t kNumSavedRegs = kNumJSCallerSaved + 3;
4015
4016 // Save all caller-save registers as this may be called from anywhere.
4017 __ CleanseP(r14);
4018 __ LoadRR(ip, r14);
4019 __ MultiPush(kSavedRegs | ip.bit());
4020
4021 // Compute the function's address for the first argument.
4022
4023 __ SubP(r2, ip, Operand(kReturnAddressDistanceFromFunctionStart));
4024
4025 // The caller's return address is two slots above the saved temporaries.
4026 // Grab that for the second argument to the hook.
4027 __ lay(r3, MemOperand(sp, kNumSavedRegs * kPointerSize));
4028
4029 // Align the stack if necessary.
4030 int frame_alignment = masm->ActivationFrameAlignment();
4031 if (frame_alignment > kPointerSize) {
4032 __ LoadRR(r7, sp);
4033 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4034 __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
4035 }
4036
4037#if !defined(USE_SIMULATOR)
4038 uintptr_t entry_hook =
4039 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
4040 __ mov(ip, Operand(entry_hook));
4041
4042#if ABI_USES_FUNCTION_DESCRIPTORS
4043 // Function descriptor
4044 __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
4045 __ LoadP(ip, MemOperand(ip, 0));
4046// ip already set.
4047#endif
4048#endif
4049
4050 // zLinux ABI requires caller's frame to have sufficient space for callee
4051 // preserved regsiter save area.
4052 __ LoadImmP(r0, Operand::Zero());
4053 __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize -
4054 kNumRequiredStackFrameSlots * kPointerSize));
4055 __ StoreP(r0, MemOperand(sp));
4056#if defined(USE_SIMULATOR)
4057 // Under the simulator we need to indirect the entry hook through a
4058 // trampoline function at a known address.
4059 // It additionally takes an isolate as a third parameter
4060 __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
4061
4062 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4063 __ mov(ip, Operand(ExternalReference(
4064 &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
4065#endif
4066 __ Call(ip);
4067
4068 // zLinux ABI requires caller's frame to have sufficient space for callee
4069 // preserved regsiter save area.
4070 __ la(sp, MemOperand(sp, kCalleeRegisterSaveAreaSize +
4071 kNumRequiredStackFrameSlots * kPointerSize));
4072
4073 // Restore the stack pointer if needed.
4074 if (frame_alignment > kPointerSize) {
4075 __ LoadRR(sp, r7);
4076 }
4077
4078 // Also pop lr to get Ret(0).
4079 __ MultiPop(kSavedRegs | ip.bit());
4080 __ LoadRR(r14, ip);
4081 __ Ret();
4082}
4083
4084template <class T>
4085static void CreateArrayDispatch(MacroAssembler* masm,
4086 AllocationSiteOverrideMode mode) {
4087 if (mode == DISABLE_ALLOCATION_SITES) {
4088 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4089 __ TailCallStub(&stub);
4090 } else if (mode == DONT_OVERRIDE) {
4091 int last_index =
4092 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4093 for (int i = 0; i <= last_index; ++i) {
4094 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4095 __ CmpP(r5, Operand(kind));
4096 T stub(masm->isolate(), kind);
4097 __ TailCallStub(&stub, eq);
4098 }
4099
4100 // If we reached this point there is a problem.
4101 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4102 } else {
4103 UNREACHABLE();
4104 }
4105}
4106
4107static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4108 AllocationSiteOverrideMode mode) {
4109 // r4 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4110 // r5 - kind (if mode != DISABLE_ALLOCATION_SITES)
4111 // r2 - number of arguments
4112 // r3 - constructor?
4113 // sp[0] - last argument
4114 Label normal_sequence;
4115 if (mode == DONT_OVERRIDE) {
4116 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4117 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4118 STATIC_ASSERT(FAST_ELEMENTS == 2);
4119 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4120 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4121 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4122
4123 // is the low bit set? If so, we are holey and that is good.
4124 __ AndP(r0, r5, Operand(1));
4125 __ bne(&normal_sequence);
4126 }
4127
4128 // look at the first argument
4129 __ LoadP(r7, MemOperand(sp, 0));
4130 __ CmpP(r7, Operand::Zero());
4131 __ beq(&normal_sequence);
4132
4133 if (mode == DISABLE_ALLOCATION_SITES) {
4134 ElementsKind initial = GetInitialFastElementsKind();
4135 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4136
4137 ArraySingleArgumentConstructorStub stub_holey(
4138 masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
4139 __ TailCallStub(&stub_holey);
4140
4141 __ bind(&normal_sequence);
4142 ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
4143 DISABLE_ALLOCATION_SITES);
4144 __ TailCallStub(&stub);
4145 } else if (mode == DONT_OVERRIDE) {
4146 // We are going to create a holey array, but our kind is non-holey.
4147 // Fix kind and retry (only if we have an allocation site in the slot).
4148 __ AddP(r5, r5, Operand(1));
4149 if (FLAG_debug_code) {
4150 __ LoadP(r7, FieldMemOperand(r4, 0));
4151 __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
4152 __ Assert(eq, kExpectedAllocationSite);
4153 }
4154
4155 // Save the resulting elements kind in type info. We can't just store r5
4156 // in the AllocationSite::transition_info field because elements kind is
4157 // restricted to a portion of the field...upper bits need to be left alone.
4158 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4159 __ LoadP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
4160 __ AddSmiLiteral(r6, r6, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
4161 __ StoreP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
4162
4163 __ bind(&normal_sequence);
4164 int last_index =
4165 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4166 for (int i = 0; i <= last_index; ++i) {
4167 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4168 __ CmpP(r5, Operand(kind));
4169 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4170 __ TailCallStub(&stub, eq);
4171 }
4172
4173 // If we reached this point there is a problem.
4174 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4175 } else {
4176 UNREACHABLE();
4177 }
4178}
4179
4180template <class T>
4181static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4182 int to_index =
4183 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4184 for (int i = 0; i <= to_index; ++i) {
4185 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4186 T stub(isolate, kind);
4187 stub.GetCode();
4188 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4189 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4190 stub1.GetCode();
4191 }
4192 }
4193}
4194
Ben Murdoch61f157c2016-09-16 13:49:30 +01004195void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
Ben Murdochda12d292016-06-02 14:46:10 +01004196 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4197 isolate);
Ben Murdoch61f157c2016-09-16 13:49:30 +01004198 ArrayNArgumentsConstructorStub stub(isolate);
4199 stub.GetCode();
Ben Murdochda12d292016-06-02 14:46:10 +01004200 ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
4201 for (int i = 0; i < 2; i++) {
4202 // For internal arrays we only need a few things
4203 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4204 stubh1.GetCode();
4205 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4206 stubh2.GetCode();
Ben Murdochda12d292016-06-02 14:46:10 +01004207 }
4208}
4209
4210void ArrayConstructorStub::GenerateDispatchToArrayStub(
4211 MacroAssembler* masm, AllocationSiteOverrideMode mode) {
4212 if (argument_count() == ANY) {
4213 Label not_zero_case, not_one_case;
4214 __ CmpP(r2, Operand::Zero());
4215 __ bne(&not_zero_case);
4216 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4217
4218 __ bind(&not_zero_case);
4219 __ CmpP(r2, Operand(1));
4220 __ bgt(&not_one_case);
4221 CreateArrayDispatchOneArgument(masm, mode);
4222
4223 __ bind(&not_one_case);
Ben Murdoch61f157c2016-09-16 13:49:30 +01004224 ArrayNArgumentsConstructorStub stub(masm->isolate());
4225 __ TailCallStub(&stub);
Ben Murdochda12d292016-06-02 14:46:10 +01004226 } else if (argument_count() == NONE) {
4227 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4228 } else if (argument_count() == ONE) {
4229 CreateArrayDispatchOneArgument(masm, mode);
4230 } else if (argument_count() == MORE_THAN_ONE) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01004231 ArrayNArgumentsConstructorStub stub(masm->isolate());
4232 __ TailCallStub(&stub);
Ben Murdochda12d292016-06-02 14:46:10 +01004233 } else {
4234 UNREACHABLE();
4235 }
4236}
4237
4238void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4239 // ----------- S t a t e -------------
4240 // -- r2 : argc (only if argument_count() == ANY)
4241 // -- r3 : constructor
4242 // -- r4 : AllocationSite or undefined
4243 // -- r5 : new target
4244 // -- sp[0] : return address
4245 // -- sp[4] : last argument
4246 // -----------------------------------
4247
4248 if (FLAG_debug_code) {
4249 // The array construct code is only set for the global and natives
4250 // builtin Array functions which always have maps.
4251
4252 // Initial map for the builtin Array function should be a map.
4253 __ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
4254 // Will both indicate a NULL and a Smi.
4255 __ TestIfSmi(r6);
4256 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
4257 __ CompareObjectType(r6, r6, r7, MAP_TYPE);
4258 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4259
4260 // We should either have undefined in r4 or a valid AllocationSite
4261 __ AssertUndefinedOrAllocationSite(r4, r6);
4262 }
4263
4264 // Enter the context of the Array function.
4265 __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
4266
4267 Label subclassing;
4268 __ CmpP(r5, r3);
4269 __ bne(&subclassing, Label::kNear);
4270
4271 Label no_info;
4272 // Get the elements kind and case on that.
4273 __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
4274 __ beq(&no_info);
4275
4276 __ LoadP(r5, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
4277 __ SmiUntag(r5);
4278 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4279 __ AndP(r5, Operand(AllocationSite::ElementsKindBits::kMask));
4280 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4281
4282 __ bind(&no_info);
4283 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4284
4285 __ bind(&subclassing);
4286 switch (argument_count()) {
4287 case ANY:
4288 case MORE_THAN_ONE:
4289 __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
4290 __ StoreP(r3, MemOperand(sp, r1));
4291 __ AddP(r2, r2, Operand(3));
4292 break;
4293 case NONE:
4294 __ StoreP(r3, MemOperand(sp, 0 * kPointerSize));
4295 __ LoadImmP(r2, Operand(3));
4296 break;
4297 case ONE:
4298 __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
4299 __ LoadImmP(r2, Operand(4));
4300 break;
4301 }
4302
4303 __ Push(r5, r4);
4304 __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
4305}
4306
4307void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
4308 ElementsKind kind) {
4309 __ CmpLogicalP(r2, Operand(1));
4310
4311 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4312 __ TailCallStub(&stub0, lt);
4313
Ben Murdoch61f157c2016-09-16 13:49:30 +01004314 ArrayNArgumentsConstructorStub stubN(isolate());
Ben Murdochda12d292016-06-02 14:46:10 +01004315 __ TailCallStub(&stubN, gt);
4316
4317 if (IsFastPackedElementsKind(kind)) {
4318 // We might need to create a holey array
4319 // look at the first argument
4320 __ LoadP(r5, MemOperand(sp, 0));
4321 __ CmpP(r5, Operand::Zero());
4322
4323 InternalArraySingleArgumentConstructorStub stub1_holey(
4324 isolate(), GetHoleyElementsKind(kind));
4325 __ TailCallStub(&stub1_holey, ne);
4326 }
4327
4328 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4329 __ TailCallStub(&stub1);
4330}
4331
4332void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4333 // ----------- S t a t e -------------
4334 // -- r2 : argc
4335 // -- r3 : constructor
4336 // -- sp[0] : return address
4337 // -- sp[4] : last argument
4338 // -----------------------------------
4339
4340 if (FLAG_debug_code) {
4341 // The array construct code is only set for the global and natives
4342 // builtin Array functions which always have maps.
4343
4344 // Initial map for the builtin Array function should be a map.
4345 __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
4346 // Will both indicate a NULL and a Smi.
4347 __ TestIfSmi(r5);
4348 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
4349 __ CompareObjectType(r5, r5, r6, MAP_TYPE);
4350 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4351 }
4352
4353 // Figure out the right elements kind
4354 __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
4355 // Load the map's "bit field 2" into |result|.
4356 __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset));
4357 // Retrieve elements_kind from bit field 2.
4358 __ DecodeField<Map::ElementsKindBits>(r5);
4359
4360 if (FLAG_debug_code) {
4361 Label done;
4362 __ CmpP(r5, Operand(FAST_ELEMENTS));
4363 __ beq(&done);
4364 __ CmpP(r5, Operand(FAST_HOLEY_ELEMENTS));
4365 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4366 __ bind(&done);
4367 }
4368
4369 Label fast_elements_case;
4370 __ CmpP(r5, Operand(FAST_ELEMENTS));
4371 __ beq(&fast_elements_case);
4372 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4373
4374 __ bind(&fast_elements_case);
4375 GenerateCase(masm, FAST_ELEMENTS);
4376}
4377
4378void FastNewObjectStub::Generate(MacroAssembler* masm) {
4379 // ----------- S t a t e -------------
4380 // -- r3 : target
4381 // -- r5 : new target
4382 // -- cp : context
4383 // -- lr : return address
4384 // -----------------------------------
4385 __ AssertFunction(r3);
4386 __ AssertReceiver(r5);
4387
4388 // Verify that the new target is a JSFunction.
4389 Label new_object;
4390 __ CompareObjectType(r5, r4, r4, JS_FUNCTION_TYPE);
4391 __ bne(&new_object);
4392
4393 // Load the initial map and verify that it's in fact a map.
4394 __ LoadP(r4, FieldMemOperand(r5, JSFunction::kPrototypeOrInitialMapOffset));
4395 __ JumpIfSmi(r4, &new_object);
4396 __ CompareObjectType(r4, r2, r2, MAP_TYPE);
4397 __ bne(&new_object);
4398
4399 // Fall back to runtime if the target differs from the new target's
4400 // initial map constructor.
4401 __ LoadP(r2, FieldMemOperand(r4, Map::kConstructorOrBackPointerOffset));
4402 __ CmpP(r2, r3);
4403 __ bne(&new_object);
4404
4405 // Allocate the JSObject on the heap.
4406 Label allocate, done_allocate;
4407 __ LoadlB(r6, FieldMemOperand(r4, Map::kInstanceSizeOffset));
4408 __ Allocate(r6, r2, r7, r8, &allocate, SIZE_IN_WORDS);
4409 __ bind(&done_allocate);
4410
4411 // Initialize the JSObject fields.
Ben Murdochc5610432016-08-08 18:44:38 +01004412 __ StoreP(r4, FieldMemOperand(r2, JSObject::kMapOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01004413 __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
Ben Murdochc5610432016-08-08 18:44:38 +01004414 __ StoreP(r5, FieldMemOperand(r2, JSObject::kPropertiesOffset));
4415 __ StoreP(r5, FieldMemOperand(r2, JSObject::kElementsOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01004416 STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
Ben Murdochc5610432016-08-08 18:44:38 +01004417 __ AddP(r3, r2, Operand(JSObject::kHeaderSize - kHeapObjectTag));
Ben Murdochda12d292016-06-02 14:46:10 +01004418
4419 // ----------- S t a t e -------------
Ben Murdochc5610432016-08-08 18:44:38 +01004420 // -- r2 : result (tagged)
Ben Murdochda12d292016-06-02 14:46:10 +01004421 // -- r3 : result fields (untagged)
4422 // -- r7 : result end (untagged)
4423 // -- r4 : initial map
4424 // -- cp : context
4425 // -- lr : return address
4426 // -----------------------------------
4427
4428 // Perform in-object slack tracking if requested.
4429 Label slack_tracking;
4430 STATIC_ASSERT(Map::kNoSlackTracking == 0);
4431 __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
4432 __ LoadlW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
4433 __ DecodeField<Map::ConstructionCounter>(r9, r5);
4434 __ LoadAndTestP(r9, r9);
4435 __ bne(&slack_tracking);
4436 {
4437 // Initialize all in-object fields with undefined.
4438 __ InitializeFieldsWithFiller(r3, r7, r8);
4439
Ben Murdochda12d292016-06-02 14:46:10 +01004440 __ Ret();
4441 }
4442 __ bind(&slack_tracking);
4443 {
4444 // Decrease generous allocation count.
4445 STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
4446 __ Add32(r5, r5, Operand(-(1 << Map::ConstructionCounter::kShift)));
4447 __ StoreW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
4448
4449 // Initialize the in-object fields with undefined.
4450 __ LoadlB(r6, FieldMemOperand(r4, Map::kUnusedPropertyFieldsOffset));
4451 __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
4452 __ SubP(r6, r7, r6);
4453 __ InitializeFieldsWithFiller(r3, r6, r8);
4454
4455 // Initialize the remaining (reserved) fields with one pointer filler map.
4456 __ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex);
4457 __ InitializeFieldsWithFiller(r3, r7, r8);
4458
Ben Murdochda12d292016-06-02 14:46:10 +01004459 // Check if we can finalize the instance size.
4460 __ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd));
4461 __ Ret(ne);
4462
4463 // Finalize the instance size.
4464 {
4465 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4466 __ Push(r2, r4);
4467 __ CallRuntime(Runtime::kFinalizeInstanceSize);
4468 __ Pop(r2);
4469 }
4470 __ Ret();
4471 }
4472
4473 // Fall back to %AllocateInNewSpace.
4474 __ bind(&allocate);
4475 {
4476 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4477 STATIC_ASSERT(kSmiTag == 0);
4478 __ ShiftLeftP(r6, r6,
4479 Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
4480 __ Push(r4, r6);
4481 __ CallRuntime(Runtime::kAllocateInNewSpace);
4482 __ Pop(r4);
4483 }
Ben Murdochda12d292016-06-02 14:46:10 +01004484 __ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset));
4485 __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
4486 __ AddP(r7, r2, r7);
Ben Murdochc5610432016-08-08 18:44:38 +01004487 __ SubP(r7, r7, Operand(kHeapObjectTag));
Ben Murdochda12d292016-06-02 14:46:10 +01004488 __ b(&done_allocate);
4489
4490 // Fall back to %NewObject.
4491 __ bind(&new_object);
4492 __ Push(r3, r5);
4493 __ TailCallRuntime(Runtime::kNewObject);
4494}
4495
4496void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
4497 // ----------- S t a t e -------------
4498 // -- r3 : function
4499 // -- cp : context
4500 // -- fp : frame pointer
4501 // -- lr : return address
4502 // -----------------------------------
4503 __ AssertFunction(r3);
4504
Ben Murdochc5610432016-08-08 18:44:38 +01004505 // Make r4 point to the JavaScript frame.
4506 __ LoadRR(r4, fp);
4507 if (skip_stub_frame()) {
4508 // For Ignition we need to skip the handler/stub frame to reach the
4509 // JavaScript frame for the function.
Ben Murdochda12d292016-06-02 14:46:10 +01004510 __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
Ben Murdochc5610432016-08-08 18:44:38 +01004511 }
4512 if (FLAG_debug_code) {
4513 Label ok;
Ben Murdochda12d292016-06-02 14:46:10 +01004514 __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
4515 __ CmpP(ip, r3);
Ben Murdochc5610432016-08-08 18:44:38 +01004516 __ b(&ok, Label::kNear);
4517 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4518 __ bind(&ok);
Ben Murdochda12d292016-06-02 14:46:10 +01004519 }
4520
4521 // Check if we have rest parameters (only possible if we have an
4522 // arguments adaptor frame below the function frame).
4523 Label no_rest_parameters;
4524 __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
4525 __ LoadP(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
4526 __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
4527 __ bne(&no_rest_parameters);
4528
4529 // Check if the arguments adaptor frame contains more arguments than
4530 // specified by the function's internal formal parameter count.
4531 Label rest_parameters;
4532 __ LoadP(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
Ben Murdoch61f157c2016-09-16 13:49:30 +01004533 __ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01004534 __ LoadW(
Ben Murdoch61f157c2016-09-16 13:49:30 +01004535 r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01004536#if V8_TARGET_ARCH_S390X
Ben Murdoch61f157c2016-09-16 13:49:30 +01004537 __ SmiTag(r5);
Ben Murdochda12d292016-06-02 14:46:10 +01004538#endif
Ben Murdoch61f157c2016-09-16 13:49:30 +01004539 __ SubP(r2, r2, r5);
Ben Murdochda12d292016-06-02 14:46:10 +01004540 __ bgt(&rest_parameters);
4541
4542 // Return an empty rest parameter array.
4543 __ bind(&no_rest_parameters);
4544 {
4545 // ----------- S t a t e -------------
4546 // -- cp : context
4547 // -- lr : return address
4548 // -----------------------------------
4549
4550 // Allocate an empty rest parameter array.
4551 Label allocate, done_allocate;
Ben Murdochc5610432016-08-08 18:44:38 +01004552 __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, NO_ALLOCATION_FLAGS);
Ben Murdochda12d292016-06-02 14:46:10 +01004553 __ bind(&done_allocate);
4554
4555 // Setup the rest parameter array in r0.
4556 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
4557 __ StoreP(r3, FieldMemOperand(r2, JSArray::kMapOffset), r0);
4558 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
4559 __ StoreP(r3, FieldMemOperand(r2, JSArray::kPropertiesOffset), r0);
4560 __ StoreP(r3, FieldMemOperand(r2, JSArray::kElementsOffset), r0);
4561 __ LoadImmP(r3, Operand::Zero());
4562 __ StoreP(r3, FieldMemOperand(r2, JSArray::kLengthOffset), r0);
4563 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4564 __ Ret();
4565
4566 // Fall back to %AllocateInNewSpace.
4567 __ bind(&allocate);
4568 {
4569 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4570 __ Push(Smi::FromInt(JSArray::kSize));
4571 __ CallRuntime(Runtime::kAllocateInNewSpace);
4572 }
4573 __ b(&done_allocate);
4574 }
4575
4576 __ bind(&rest_parameters);
4577 {
4578 // Compute the pointer to the first rest parameter (skippping the receiver).
4579 __ SmiToPtrArrayOffset(r8, r2);
4580 __ AddP(r4, r4, r8);
4581 __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
4582
4583 // ----------- S t a t e -------------
4584 // -- cp : context
4585 // -- r2 : number of rest parameters (tagged)
Ben Murdoch61f157c2016-09-16 13:49:30 +01004586 // -- r3 : function
Ben Murdochda12d292016-06-02 14:46:10 +01004587 // -- r4 : pointer just past first rest parameters
4588 // -- r8 : size of rest parameters
4589 // -- lr : return address
4590 // -----------------------------------
4591
4592 // Allocate space for the rest parameter array plus the backing store.
4593 Label allocate, done_allocate;
Ben Murdoch61f157c2016-09-16 13:49:30 +01004594 __ mov(r9, Operand(JSArray::kSize + FixedArray::kHeaderSize));
4595 __ AddP(r9, r9, r8);
4596 __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
Ben Murdochda12d292016-06-02 14:46:10 +01004597 __ bind(&done_allocate);
4598
4599 // Setup the elements array in r5.
4600 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4601 __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
4602 __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
4603 __ AddP(r6, r5,
4604 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
4605 {
4606 Label loop;
4607 __ SmiUntag(r1, r2);
4608 // __ mtctr(r0);
4609 __ bind(&loop);
4610 __ lay(r4, MemOperand(r4, -kPointerSize));
4611 __ LoadP(ip, MemOperand(r4));
4612 __ la(r6, MemOperand(r6, kPointerSize));
4613 __ StoreP(ip, MemOperand(r6));
4614 // __ bdnz(&loop);
4615 __ BranchOnCount(r1, &loop);
4616 __ AddP(r6, r6, Operand(kPointerSize));
4617 }
4618
4619 // Setup the rest parameter array in r6.
4620 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
4621 __ StoreP(r3, MemOperand(r6, JSArray::kMapOffset));
4622 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
4623 __ StoreP(r3, MemOperand(r6, JSArray::kPropertiesOffset));
4624 __ StoreP(r5, MemOperand(r6, JSArray::kElementsOffset));
4625 __ StoreP(r2, MemOperand(r6, JSArray::kLengthOffset));
4626 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4627 __ AddP(r2, r6, Operand(kHeapObjectTag));
4628 __ Ret();
4629
Ben Murdoch61f157c2016-09-16 13:49:30 +01004630 // Fall back to %AllocateInNewSpace (if not too big).
4631 Label too_big_for_new_space;
Ben Murdochda12d292016-06-02 14:46:10 +01004632 __ bind(&allocate);
Ben Murdoch61f157c2016-09-16 13:49:30 +01004633 __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
4634 __ bgt(&too_big_for_new_space);
Ben Murdochda12d292016-06-02 14:46:10 +01004635 {
4636 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
Ben Murdoch61f157c2016-09-16 13:49:30 +01004637 __ SmiTag(r9);
4638 __ Push(r2, r4, r9);
Ben Murdochda12d292016-06-02 14:46:10 +01004639 __ CallRuntime(Runtime::kAllocateInNewSpace);
4640 __ LoadRR(r5, r2);
4641 __ Pop(r2, r4);
4642 }
4643 __ b(&done_allocate);
Ben Murdoch61f157c2016-09-16 13:49:30 +01004644
4645 // Fall back to %NewRestParameter.
4646 __ bind(&too_big_for_new_space);
4647 __ push(r3);
4648 __ TailCallRuntime(Runtime::kNewRestParameter);
Ben Murdochda12d292016-06-02 14:46:10 +01004649 }
4650}
4651
4652void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
4653 // ----------- S t a t e -------------
4654 // -- r3 : function
4655 // -- cp : context
4656 // -- fp : frame pointer
4657 // -- lr : return address
4658 // -----------------------------------
4659 __ AssertFunction(r3);
4660
Ben Murdochc5610432016-08-08 18:44:38 +01004661 // Make r9 point to the JavaScript frame.
4662 __ LoadRR(r9, fp);
4663 if (skip_stub_frame()) {
4664 // For Ignition we need to skip the handler/stub frame to reach the
4665 // JavaScript frame for the function.
4666 __ LoadP(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
4667 }
4668 if (FLAG_debug_code) {
4669 Label ok;
4670 __ LoadP(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
4671 __ CmpP(ip, r3);
4672 __ beq(&ok, Label::kNear);
4673 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4674 __ bind(&ok);
4675 }
4676
Ben Murdochda12d292016-06-02 14:46:10 +01004677 // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
4678 __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
4679 __ LoadW(
4680 r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
4681#if V8_TARGET_ARCH_S390X
4682 __ SmiTag(r4);
4683#endif
4684 __ SmiToPtrArrayOffset(r5, r4);
Ben Murdochc5610432016-08-08 18:44:38 +01004685 __ AddP(r5, r9, r5);
Ben Murdochda12d292016-06-02 14:46:10 +01004686 __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
4687
4688 // r3 : function
4689 // r4 : number of parameters (tagged)
4690 // r5 : parameters pointer
Ben Murdochc5610432016-08-08 18:44:38 +01004691 // r9 : JavaScript frame pointer
Ben Murdochda12d292016-06-02 14:46:10 +01004692 // Registers used over whole function:
4693 // r7 : arguments count (tagged)
4694 // r8 : mapped parameter count (tagged)
4695
4696 // Check if the calling frame is an arguments adaptor frame.
4697 Label adaptor_frame, try_allocate, runtime;
Ben Murdochc5610432016-08-08 18:44:38 +01004698 __ LoadP(r6, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01004699 __ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
4700 __ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
4701 __ beq(&adaptor_frame);
4702
4703 // No adaptor, parameter count = argument count.
4704 __ LoadRR(r7, r4);
4705 __ LoadRR(r8, r4);
4706 __ b(&try_allocate);
4707
4708 // We have an adaptor frame. Patch the parameters pointer.
4709 __ bind(&adaptor_frame);
4710 __ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
4711 __ SmiToPtrArrayOffset(r5, r7);
4712 __ AddP(r5, r5, r6);
4713 __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
4714
4715 // r7 = argument count (tagged)
4716 // r8 = parameter count (tagged)
4717 // Compute the mapped parameter count = min(r4, r7) in r8.
4718 __ CmpP(r4, r7);
4719 Label skip;
4720 __ LoadRR(r8, r4);
4721 __ blt(&skip);
4722 __ LoadRR(r8, r7);
4723 __ bind(&skip);
4724
4725 __ bind(&try_allocate);
4726
4727 // Compute the sizes of backing store, parameter map, and arguments object.
4728 // 1. Parameter map, has 2 extra words containing context and backing store.
4729 const int kParameterMapHeaderSize =
4730 FixedArray::kHeaderSize + 2 * kPointerSize;
4731 // If there are no mapped parameters, we do not need the parameter_map.
4732 __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
4733 Label skip2, skip3;
4734 __ bne(&skip2);
4735 __ LoadImmP(r1, Operand::Zero());
4736 __ b(&skip3);
4737 __ bind(&skip2);
4738 __ SmiToPtrArrayOffset(r1, r8);
4739 __ AddP(r1, r1, Operand(kParameterMapHeaderSize));
4740 __ bind(&skip3);
4741
4742 // 2. Backing store.
4743 __ SmiToPtrArrayOffset(r6, r7);
4744 __ AddP(r1, r1, r6);
4745 __ AddP(r1, r1, Operand(FixedArray::kHeaderSize));
4746
4747 // 3. Arguments object.
4748 __ AddP(r1, r1, Operand(JSSloppyArgumentsObject::kSize));
4749
4750 // Do the allocation of all three objects in one go.
Ben Murdochc5610432016-08-08 18:44:38 +01004751 __ Allocate(r1, r2, r1, r6, &runtime, NO_ALLOCATION_FLAGS);
Ben Murdochda12d292016-06-02 14:46:10 +01004752
4753 // r2 = address of new object(s) (tagged)
4754 // r4 = argument count (smi-tagged)
4755 // Get the arguments boilerplate from the current native context into r3.
4756 const int kNormalOffset =
4757 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
4758 const int kAliasedOffset =
4759 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
4760
4761 __ LoadP(r6, NativeContextMemOperand());
4762 __ CmpP(r8, Operand::Zero());
4763 Label skip4, skip5;
4764 __ bne(&skip4);
4765 __ LoadP(r6, MemOperand(r6, kNormalOffset));
4766 __ b(&skip5);
4767 __ bind(&skip4);
4768 __ LoadP(r6, MemOperand(r6, kAliasedOffset));
4769 __ bind(&skip5);
4770
4771 // r2 = address of new object (tagged)
4772 // r4 = argument count (smi-tagged)
4773 // r6 = address of arguments map (tagged)
4774 // r8 = mapped parameter count (tagged)
4775 __ StoreP(r6, FieldMemOperand(r2, JSObject::kMapOffset), r0);
4776 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
4777 __ StoreP(r1, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
4778 __ StoreP(r1, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
4779
4780 // Set up the callee in-object property.
4781 __ AssertNotSmi(r3);
4782 __ StoreP(r3, FieldMemOperand(r2, JSSloppyArgumentsObject::kCalleeOffset),
4783 r0);
4784
4785 // Use the length (smi tagged) and set that as an in-object property too.
4786 __ AssertSmi(r7);
4787 __ StoreP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset),
4788 r0);
4789
4790 // Set up the elements pointer in the allocated arguments object.
4791 // If we allocated a parameter map, r6 will point there, otherwise
4792 // it will point to the backing store.
4793 __ AddP(r6, r2, Operand(JSSloppyArgumentsObject::kSize));
4794 __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
4795
4796 // r2 = address of new object (tagged)
4797 // r4 = argument count (tagged)
4798 // r6 = address of parameter map or backing store (tagged)
4799 // r8 = mapped parameter count (tagged)
4800 // Initialize parameter map. If there are no mapped arguments, we're done.
4801 Label skip_parameter_map;
4802 __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
4803 Label skip6;
4804 __ bne(&skip6);
4805 // Move backing store address to r3, because it is
4806 // expected there when filling in the unmapped arguments.
4807 __ LoadRR(r3, r6);
4808 __ b(&skip_parameter_map);
4809 __ bind(&skip6);
4810
4811 __ LoadRoot(r7, Heap::kSloppyArgumentsElementsMapRootIndex);
4812 __ StoreP(r7, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
4813 __ AddSmiLiteral(r7, r8, Smi::FromInt(2), r0);
4814 __ StoreP(r7, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
4815 __ StoreP(cp, FieldMemOperand(r6, FixedArray::kHeaderSize + 0 * kPointerSize),
4816 r0);
4817 __ SmiToPtrArrayOffset(r7, r8);
4818 __ AddP(r7, r7, r6);
4819 __ AddP(r7, r7, Operand(kParameterMapHeaderSize));
4820 __ StoreP(r7, FieldMemOperand(r6, FixedArray::kHeaderSize + 1 * kPointerSize),
4821 r0);
4822
4823 // Copy the parameter slots and the holes in the arguments.
4824 // We need to fill in mapped_parameter_count slots. They index the context,
4825 // where parameters are stored in reverse order, at
4826 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4827 // The mapped parameter thus need to get indices
4828 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4829 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4830 // We loop from right to left.
4831 Label parameters_loop;
4832 __ LoadRR(r7, r8);
4833 __ AddSmiLiteral(r1, r4, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
4834 __ SubP(r1, r1, r8);
4835 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
4836 __ SmiToPtrArrayOffset(r3, r7);
4837 __ AddP(r3, r3, r6);
4838 __ AddP(r3, r3, Operand(kParameterMapHeaderSize));
4839
4840 // r3 = address of backing store (tagged)
4841 // r6 = address of parameter map (tagged)
4842 // r7 = temporary scratch (a.o., for address calculation)
4843 // r9 = temporary scratch (a.o., for address calculation)
4844 // ip = the hole value
4845 __ SmiUntag(r7);
4846 __ push(r4);
4847 __ LoadRR(r4, r7);
4848 __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
4849 __ AddP(r9, r3, r7);
4850 __ AddP(r7, r6, r7);
4851 __ AddP(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4852 __ AddP(r7, r7, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4853
4854 __ bind(&parameters_loop);
4855 __ StoreP(r1, MemOperand(r7, -kPointerSize));
4856 __ lay(r7, MemOperand(r7, -kPointerSize));
4857 __ StoreP(ip, MemOperand(r9, -kPointerSize));
4858 __ lay(r9, MemOperand(r9, -kPointerSize));
4859 __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
4860 __ BranchOnCount(r4, &parameters_loop);
4861 __ pop(r4);
4862
4863 // Restore r7 = argument count (tagged).
4864 __ LoadP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset));
4865
4866 __ bind(&skip_parameter_map);
4867 // r2 = address of new object (tagged)
4868 // r3 = address of backing store (tagged)
4869 // r7 = argument count (tagged)
4870 // r8 = mapped parameter count (tagged)
4871 // r1 = scratch
4872 // Copy arguments header and remaining slots (if there are any).
4873 __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
4874 __ StoreP(r1, FieldMemOperand(r3, FixedArray::kMapOffset), r0);
4875 __ StoreP(r7, FieldMemOperand(r3, FixedArray::kLengthOffset), r0);
4876 __ SubP(r1, r7, r8);
4877 __ Ret(eq);
4878
4879 Label arguments_loop;
4880 __ SmiUntag(r1);
4881 __ LoadRR(r4, r1);
4882
4883 __ SmiToPtrArrayOffset(r0, r8);
4884 __ SubP(r5, r5, r0);
4885 __ AddP(r1, r3, r0);
4886 __ AddP(r1, r1,
4887 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
4888
4889 __ bind(&arguments_loop);
4890 __ LoadP(r6, MemOperand(r5, -kPointerSize));
4891 __ lay(r5, MemOperand(r5, -kPointerSize));
4892 __ StoreP(r6, MemOperand(r1, kPointerSize));
4893 __ la(r1, MemOperand(r1, kPointerSize));
4894 __ BranchOnCount(r4, &arguments_loop);
4895
4896 // Return.
4897 __ Ret();
4898
4899 // Do the runtime call to allocate the arguments object.
4900 // r7 = argument count (tagged)
4901 __ bind(&runtime);
4902 __ Push(r3, r5, r7);
4903 __ TailCallRuntime(Runtime::kNewSloppyArguments);
4904}
4905
4906void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
4907 // ----------- S t a t e -------------
4908 // -- r3 : function
4909 // -- cp : context
4910 // -- fp : frame pointer
4911 // -- lr : return address
4912 // -----------------------------------
4913 __ AssertFunction(r3);
4914
Ben Murdochc5610432016-08-08 18:44:38 +01004915 // Make r4 point to the JavaScript frame.
4916 __ LoadRR(r4, fp);
4917 if (skip_stub_frame()) {
4918 // For Ignition we need to skip the handler/stub frame to reach the
4919 // JavaScript frame for the function.
Ben Murdochda12d292016-06-02 14:46:10 +01004920 __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
Ben Murdochc5610432016-08-08 18:44:38 +01004921 }
4922 if (FLAG_debug_code) {
4923 Label ok;
Ben Murdochda12d292016-06-02 14:46:10 +01004924 __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
4925 __ CmpP(ip, r3);
Ben Murdochc5610432016-08-08 18:44:38 +01004926 __ beq(&ok, Label::kNear);
4927 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4928 __ bind(&ok);
Ben Murdochda12d292016-06-02 14:46:10 +01004929 }
4930
4931 // Check if we have an arguments adaptor frame below the function frame.
4932 Label arguments_adaptor, arguments_done;
4933 __ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
4934 __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
4935 __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
4936 __ beq(&arguments_adaptor);
4937 {
Ben Murdoch61f157c2016-09-16 13:49:30 +01004938 __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01004939 __ LoadW(r2, FieldMemOperand(
Ben Murdoch61f157c2016-09-16 13:49:30 +01004940 r6, SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01004941#if V8_TARGET_ARCH_S390X
4942 __ SmiTag(r2);
4943#endif
4944 __ SmiToPtrArrayOffset(r8, r2);
4945 __ AddP(r4, r4, r8);
4946 }
4947 __ b(&arguments_done);
4948 __ bind(&arguments_adaptor);
4949 {
4950 __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
4951 __ SmiToPtrArrayOffset(r8, r2);
4952 __ AddP(r4, r5, r8);
4953 }
4954 __ bind(&arguments_done);
4955 __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
4956
4957 // ----------- S t a t e -------------
4958 // -- cp : context
4959 // -- r2 : number of rest parameters (tagged)
Ben Murdoch61f157c2016-09-16 13:49:30 +01004960 // -- r3 : function
Ben Murdochda12d292016-06-02 14:46:10 +01004961 // -- r4 : pointer just past first rest parameters
4962 // -- r8 : size of rest parameters
4963 // -- lr : return address
4964 // -----------------------------------
4965
4966 // Allocate space for the strict arguments object plus the backing store.
4967 Label allocate, done_allocate;
Ben Murdoch61f157c2016-09-16 13:49:30 +01004968 __ mov(r9, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
4969 __ AddP(r9, r9, r8);
4970 __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
Ben Murdochda12d292016-06-02 14:46:10 +01004971 __ bind(&done_allocate);
4972
4973 // Setup the elements array in r5.
4974 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4975 __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
4976 __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
4977 __ AddP(r6, r5,
4978 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
4979 {
4980 Label loop, done_loop;
4981 __ SmiUntag(r1, r2);
4982 __ LoadAndTestP(r1, r1);
4983 __ beq(&done_loop);
4984 __ bind(&loop);
4985 __ lay(r4, MemOperand(r4, -kPointerSize));
4986 __ LoadP(ip, MemOperand(r4));
4987 __ la(r6, MemOperand(r6, kPointerSize));
4988 __ StoreP(ip, MemOperand(r6));
4989 __ BranchOnCount(r1, &loop);
4990 __ bind(&done_loop);
4991 __ AddP(r6, r6, Operand(kPointerSize));
4992 }
4993
4994 // Setup the rest parameter array in r6.
4995 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r3);
4996 __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kMapOffset));
4997 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
4998 __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kPropertiesOffset));
4999 __ StoreP(r5, MemOperand(r6, JSStrictArgumentsObject::kElementsOffset));
5000 __ StoreP(r2, MemOperand(r6, JSStrictArgumentsObject::kLengthOffset));
5001 STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
5002 __ AddP(r2, r6, Operand(kHeapObjectTag));
5003 __ Ret();
5004
Ben Murdoch61f157c2016-09-16 13:49:30 +01005005 // Fall back to %AllocateInNewSpace (if not too big).
5006 Label too_big_for_new_space;
Ben Murdochda12d292016-06-02 14:46:10 +01005007 __ bind(&allocate);
Ben Murdoch61f157c2016-09-16 13:49:30 +01005008 __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
5009 __ bgt(&too_big_for_new_space);
Ben Murdochda12d292016-06-02 14:46:10 +01005010 {
5011 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
Ben Murdoch61f157c2016-09-16 13:49:30 +01005012 __ SmiTag(r9);
5013 __ Push(r2, r4, r9);
Ben Murdochda12d292016-06-02 14:46:10 +01005014 __ CallRuntime(Runtime::kAllocateInNewSpace);
5015 __ LoadRR(r5, r2);
5016 __ Pop(r2, r4);
5017 }
5018 __ b(&done_allocate);
Ben Murdochda12d292016-06-02 14:46:10 +01005019
Ben Murdoch61f157c2016-09-16 13:49:30 +01005020 // Fall back to %NewStrictArguments.
5021 __ bind(&too_big_for_new_space);
5022 __ push(r3);
5023 __ TailCallRuntime(Runtime::kNewStrictArguments);
Ben Murdochda12d292016-06-02 14:46:10 +01005024}
5025
5026void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
5027 Register value = r2;
5028 Register slot = r4;
5029
5030 Register cell = r3;
5031 Register cell_details = r5;
5032 Register cell_value = r6;
5033 Register cell_value_map = r7;
5034 Register scratch = r8;
5035
5036 Register context = cp;
5037 Register context_temp = cell;
5038
5039 Label fast_heapobject_case, fast_smi_case, slow_case;
5040
5041 if (FLAG_debug_code) {
5042 __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
5043 __ Check(ne, kUnexpectedValue);
5044 }
5045
5046 // Go up the context chain to the script context.
5047 for (int i = 0; i < depth(); i++) {
5048 __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
5049 context = context_temp;
5050 }
5051
5052 // Load the PropertyCell at the specified slot.
5053 __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2));
5054 __ AddP(cell, context, r0);
5055 __ LoadP(cell, ContextMemOperand(cell));
5056
5057 // Load PropertyDetails for the cell (actually only the cell_type and kind).
5058 __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
5059 __ SmiUntag(cell_details);
5060 __ AndP(cell_details, cell_details,
5061 Operand(PropertyDetails::PropertyCellTypeField::kMask |
5062 PropertyDetails::KindField::kMask |
5063 PropertyDetails::kAttributesReadOnlyMask));
5064
5065 // Check if PropertyCell holds mutable data.
5066 Label not_mutable_data;
5067 __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5068 PropertyCellType::kMutable) |
5069 PropertyDetails::KindField::encode(kData)));
5070 __ bne(&not_mutable_data);
5071 __ JumpIfSmi(value, &fast_smi_case);
5072
5073 __ bind(&fast_heapobject_case);
5074 __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
5075 // RecordWriteField clobbers the value register, so we copy it before the
5076 // call.
5077 __ LoadRR(r5, value);
5078 __ RecordWriteField(cell, PropertyCell::kValueOffset, r5, scratch,
5079 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
5080 OMIT_SMI_CHECK);
5081 __ Ret();
5082
5083 __ bind(&not_mutable_data);
5084 // Check if PropertyCell value matches the new value (relevant for Constant,
5085 // ConstantType and Undefined cells).
5086 Label not_same_value;
5087 __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5088 __ CmpP(cell_value, value);
5089 __ bne(&not_same_value);
5090
5091 // Make sure the PropertyCell is not marked READ_ONLY.
5092 __ AndP(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
5093 __ bne(&slow_case);
5094
5095 if (FLAG_debug_code) {
5096 Label done;
5097 // This can only be true for Constant, ConstantType and Undefined cells,
5098 // because we never store the_hole via this stub.
5099 __ CmpP(cell_details,
5100 Operand(PropertyDetails::PropertyCellTypeField::encode(
5101 PropertyCellType::kConstant) |
5102 PropertyDetails::KindField::encode(kData)));
5103 __ beq(&done);
5104 __ CmpP(cell_details,
5105 Operand(PropertyDetails::PropertyCellTypeField::encode(
5106 PropertyCellType::kConstantType) |
5107 PropertyDetails::KindField::encode(kData)));
5108 __ beq(&done);
5109 __ CmpP(cell_details,
5110 Operand(PropertyDetails::PropertyCellTypeField::encode(
5111 PropertyCellType::kUndefined) |
5112 PropertyDetails::KindField::encode(kData)));
5113 __ Check(eq, kUnexpectedValue);
5114 __ bind(&done);
5115 }
5116 __ Ret();
5117 __ bind(&not_same_value);
5118
5119 // Check if PropertyCell contains data with constant type (and is not
5120 // READ_ONLY).
5121 __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5122 PropertyCellType::kConstantType) |
5123 PropertyDetails::KindField::encode(kData)));
5124 __ bne(&slow_case);
5125
5126 // Now either both old and new values must be smis or both must be heap
5127 // objects with same map.
5128 Label value_is_heap_object;
5129 __ JumpIfNotSmi(value, &value_is_heap_object);
5130 __ JumpIfNotSmi(cell_value, &slow_case);
5131 // Old and new values are smis, no need for a write barrier here.
5132 __ bind(&fast_smi_case);
5133 __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
5134 __ Ret();
5135
5136 __ bind(&value_is_heap_object);
5137 __ JumpIfSmi(cell_value, &slow_case);
5138
5139 __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
5140 __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5141 __ CmpP(cell_value_map, scratch);
5142 __ beq(&fast_heapobject_case);
5143
5144 // Fallback to runtime.
5145 __ bind(&slow_case);
5146 __ SmiTag(slot);
5147 __ Push(slot, value);
5148 __ TailCallRuntime(is_strict(language_mode())
5149 ? Runtime::kStoreGlobalViaContext_Strict
5150 : Runtime::kStoreGlobalViaContext_Sloppy);
5151}
5152
5153static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5154 return ref0.address() - ref1.address();
5155}
5156
5157// Calls an API function. Allocates HandleScope, extracts returned value
5158// from handle and propagates exceptions. Restores context. stack_space
5159// - space to be unwound on exit (includes the call JS arguments space and
5160// the additional space allocated for the fast call).
5161static void CallApiFunctionAndReturn(MacroAssembler* masm,
5162 Register function_address,
5163 ExternalReference thunk_ref,
5164 int stack_space,
5165 MemOperand* stack_space_operand,
5166 MemOperand return_value_operand,
5167 MemOperand* context_restore_operand) {
5168 Isolate* isolate = masm->isolate();
5169 ExternalReference next_address =
5170 ExternalReference::handle_scope_next_address(isolate);
5171 const int kNextOffset = 0;
5172 const int kLimitOffset = AddressOffset(
5173 ExternalReference::handle_scope_limit_address(isolate), next_address);
5174 const int kLevelOffset = AddressOffset(
5175 ExternalReference::handle_scope_level_address(isolate), next_address);
5176
5177 // Additional parameter is the address of the actual callback.
5178 DCHECK(function_address.is(r3) || function_address.is(r4));
5179 Register scratch = r5;
5180
5181 __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
5182 __ LoadlB(scratch, MemOperand(scratch, 0));
5183 __ CmpP(scratch, Operand::Zero());
5184
5185 Label profiler_disabled;
5186 Label end_profiler_check;
5187 __ beq(&profiler_disabled, Label::kNear);
5188 __ mov(scratch, Operand(thunk_ref));
5189 __ b(&end_profiler_check, Label::kNear);
5190 __ bind(&profiler_disabled);
5191 __ LoadRR(scratch, function_address);
5192 __ bind(&end_profiler_check);
5193
5194 // Allocate HandleScope in callee-save registers.
5195 // r9 - next_address
5196 // r6 - next_address->kNextOffset
5197 // r7 - next_address->kLimitOffset
5198 // r8 - next_address->kLevelOffset
5199 __ mov(r9, Operand(next_address));
5200 __ LoadP(r6, MemOperand(r9, kNextOffset));
5201 __ LoadP(r7, MemOperand(r9, kLimitOffset));
5202 __ LoadlW(r8, MemOperand(r9, kLevelOffset));
5203 __ AddP(r8, Operand(1));
5204 __ StoreW(r8, MemOperand(r9, kLevelOffset));
5205
5206 if (FLAG_log_timer_events) {
5207 FrameScope frame(masm, StackFrame::MANUAL);
5208 __ PushSafepointRegisters();
5209 __ PrepareCallCFunction(1, r2);
5210 __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
5211 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5212 1);
5213 __ PopSafepointRegisters();
5214 }
5215
5216 // Native call returns to the DirectCEntry stub which redirects to the
5217 // return address pushed on stack (could have moved after GC).
5218 // DirectCEntry stub itself is generated early and never moves.
5219 DirectCEntryStub stub(isolate);
5220 stub.GenerateCall(masm, scratch);
5221
5222 if (FLAG_log_timer_events) {
5223 FrameScope frame(masm, StackFrame::MANUAL);
5224 __ PushSafepointRegisters();
5225 __ PrepareCallCFunction(1, r2);
5226 __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
5227 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5228 1);
5229 __ PopSafepointRegisters();
5230 }
5231
5232 Label promote_scheduled_exception;
5233 Label delete_allocated_handles;
5234 Label leave_exit_frame;
5235 Label return_value_loaded;
5236
5237 // load value from ReturnValue
5238 __ LoadP(r2, return_value_operand);
5239 __ bind(&return_value_loaded);
5240 // No more valid handles (the result handle was the last one). Restore
5241 // previous handle scope.
5242 __ StoreP(r6, MemOperand(r9, kNextOffset));
5243 if (__ emit_debug_code()) {
5244 __ LoadlW(r3, MemOperand(r9, kLevelOffset));
5245 __ CmpP(r3, r8);
5246 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
5247 }
5248 __ SubP(r8, Operand(1));
5249 __ StoreW(r8, MemOperand(r9, kLevelOffset));
5250 __ CmpP(r7, MemOperand(r9, kLimitOffset));
5251 __ bne(&delete_allocated_handles, Label::kNear);
5252
5253 // Leave the API exit frame.
5254 __ bind(&leave_exit_frame);
5255 bool restore_context = context_restore_operand != NULL;
5256 if (restore_context) {
5257 __ LoadP(cp, *context_restore_operand);
5258 }
5259 // LeaveExitFrame expects unwind space to be in a register.
5260 if (stack_space_operand != NULL) {
5261 __ l(r6, *stack_space_operand);
5262 } else {
5263 __ mov(r6, Operand(stack_space));
5264 }
5265 __ LeaveExitFrame(false, r6, !restore_context, stack_space_operand != NULL);
5266
5267 // Check if the function scheduled an exception.
5268 __ mov(r7, Operand(ExternalReference::scheduled_exception_address(isolate)));
5269 __ LoadP(r7, MemOperand(r7));
5270 __ CompareRoot(r7, Heap::kTheHoleValueRootIndex);
5271 __ bne(&promote_scheduled_exception, Label::kNear);
5272
5273 __ b(r14);
5274
5275 // Re-throw by promoting a scheduled exception.
5276 __ bind(&promote_scheduled_exception);
5277 __ TailCallRuntime(Runtime::kPromoteScheduledException);
5278
5279 // HandleScope limit has changed. Delete allocated extensions.
5280 __ bind(&delete_allocated_handles);
5281 __ StoreP(r7, MemOperand(r9, kLimitOffset));
5282 __ LoadRR(r6, r2);
5283 __ PrepareCallCFunction(1, r7);
5284 __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
5285 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5286 1);
5287 __ LoadRR(r2, r6);
5288 __ b(&leave_exit_frame, Label::kNear);
5289}
5290
5291void CallApiCallbackStub::Generate(MacroAssembler* masm) {
5292 // ----------- S t a t e -------------
5293 // -- r2 : callee
5294 // -- r6 : call_data
5295 // -- r4 : holder
5296 // -- r3 : api_function_address
5297 // -- cp : context
5298 // --
5299 // -- sp[0] : last argument
5300 // -- ...
5301 // -- sp[(argc - 1)* 4] : first argument
5302 // -- sp[argc * 4] : receiver
5303 // -----------------------------------
5304
5305 Register callee = r2;
5306 Register call_data = r6;
5307 Register holder = r4;
5308 Register api_function_address = r3;
5309 Register context = cp;
5310
5311 typedef FunctionCallbackArguments FCA;
5312
5313 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5314 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5315 STATIC_ASSERT(FCA::kDataIndex == 4);
5316 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5317 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5318 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5319 STATIC_ASSERT(FCA::kHolderIndex == 0);
Ben Murdochc5610432016-08-08 18:44:38 +01005320 STATIC_ASSERT(FCA::kNewTargetIndex == 7);
5321 STATIC_ASSERT(FCA::kArgsLength == 8);
5322
5323 // new target
5324 __ PushRoot(Heap::kUndefinedValueRootIndex);
Ben Murdochda12d292016-06-02 14:46:10 +01005325
5326 // context save
5327 __ push(context);
5328 if (!is_lazy()) {
5329 // load context from callee
5330 __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5331 }
5332
5333 // callee
5334 __ push(callee);
5335
5336 // call data
5337 __ push(call_data);
5338
5339 Register scratch = call_data;
5340 if (!call_data_undefined()) {
5341 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5342 }
5343 // return value
5344 __ push(scratch);
5345 // return value default
5346 __ push(scratch);
5347 // isolate
5348 __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
5349 __ push(scratch);
5350 // holder
5351 __ push(holder);
5352
5353 // Prepare arguments.
5354 __ LoadRR(scratch, sp);
5355
5356 // Allocate the v8::Arguments structure in the arguments' space since
5357 // it's not controlled by GC.
5358 // S390 LINUX ABI:
5359 //
Ben Murdochc5610432016-08-08 18:44:38 +01005360 // Create 4 extra slots on stack:
Ben Murdochda12d292016-06-02 14:46:10 +01005361 // [0] space for DirectCEntryStub's LR save
Ben Murdochc5610432016-08-08 18:44:38 +01005362 // [1-3] FunctionCallbackInfo
5363 const int kApiStackSpace = 4;
Ben Murdochda12d292016-06-02 14:46:10 +01005364 const int kFunctionCallbackInfoOffset =
5365 (kStackFrameExtraParamSlot + 1) * kPointerSize;
5366
5367 FrameScope frame_scope(masm, StackFrame::MANUAL);
5368 __ EnterExitFrame(false, kApiStackSpace);
5369
5370 DCHECK(!api_function_address.is(r2) && !scratch.is(r2));
5371 // r2 = FunctionCallbackInfo&
5372 // Arguments is after the return address.
5373 __ AddP(r2, sp, Operand(kFunctionCallbackInfoOffset));
5374 // FunctionCallbackInfo::implicit_args_
5375 __ StoreP(scratch, MemOperand(r2, 0 * kPointerSize));
5376 // FunctionCallbackInfo::values_
5377 __ AddP(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
5378 __ StoreP(ip, MemOperand(r2, 1 * kPointerSize));
5379 // FunctionCallbackInfo::length_ = argc
5380 __ LoadImmP(ip, Operand(argc()));
5381 __ StoreW(ip, MemOperand(r2, 2 * kPointerSize));
Ben Murdochda12d292016-06-02 14:46:10 +01005382
5383 ExternalReference thunk_ref =
5384 ExternalReference::invoke_function_callback(masm->isolate());
5385
5386 AllowExternalCallThatCantCauseGC scope(masm);
5387 MemOperand context_restore_operand(
5388 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5389 // Stores return the first js argument
5390 int return_value_offset = 0;
5391 if (is_store()) {
5392 return_value_offset = 2 + FCA::kArgsLength;
5393 } else {
5394 return_value_offset = 2 + FCA::kReturnValueOffset;
5395 }
5396 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5397 int stack_space = 0;
Ben Murdochc5610432016-08-08 18:44:38 +01005398 MemOperand length_operand =
5399 MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
5400 MemOperand* stack_space_operand = &length_operand;
Ben Murdochda12d292016-06-02 14:46:10 +01005401 stack_space = argc() + FCA::kArgsLength + 1;
5402 stack_space_operand = NULL;
5403 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5404 stack_space_operand, return_value_operand,
5405 &context_restore_operand);
5406}
5407
5408void CallApiGetterStub::Generate(MacroAssembler* masm) {
Ben Murdochda12d292016-06-02 14:46:10 +01005409 int arg0Slot = 0;
5410 int accessorInfoSlot = 0;
5411 int apiStackSpace = 0;
Ben Murdochc5610432016-08-08 18:44:38 +01005412 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
5413 // name below the exit frame to make GC aware of them.
5414 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
5415 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
5416 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
5417 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
5418 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
5419 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
5420 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
5421 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
5422
5423 Register receiver = ApiGetterDescriptor::ReceiverRegister();
5424 Register holder = ApiGetterDescriptor::HolderRegister();
5425 Register callback = ApiGetterDescriptor::CallbackRegister();
5426 Register scratch = r6;
5427 DCHECK(!AreAliased(receiver, holder, callback, scratch));
5428
5429 Register api_function_address = r4;
5430
5431 __ push(receiver);
5432 // Push data from AccessorInfo.
5433 __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
5434 __ push(scratch);
5435 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5436 __ Push(scratch, scratch);
5437 __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
5438 __ Push(scratch, holder);
5439 __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
5440 __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
5441 __ push(scratch);
Ben Murdochda12d292016-06-02 14:46:10 +01005442
5443 // v8::PropertyCallbackInfo::args_ array and name handle.
5444 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5445
5446 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
5447 __ LoadRR(r2, sp); // r2 = Handle<Name>
5448 __ AddP(r3, r2, Operand(1 * kPointerSize)); // r3 = v8::PCI::args_
5449
5450 // If ABI passes Handles (pointer-sized struct) in a register:
5451 //
5452 // Create 2 extra slots on stack:
5453 // [0] space for DirectCEntryStub's LR save
5454 // [1] AccessorInfo&
5455 //
5456 // Otherwise:
5457 //
5458 // Create 3 extra slots on stack:
5459 // [0] space for DirectCEntryStub's LR save
5460 // [1] copy of Handle (first arg)
5461 // [2] AccessorInfo&
5462 if (ABI_PASSES_HANDLES_IN_REGS) {
5463 accessorInfoSlot = kStackFrameExtraParamSlot + 1;
5464 apiStackSpace = 2;
5465 } else {
5466 arg0Slot = kStackFrameExtraParamSlot + 1;
5467 accessorInfoSlot = arg0Slot + 1;
5468 apiStackSpace = 3;
5469 }
5470
5471 FrameScope frame_scope(masm, StackFrame::MANUAL);
5472 __ EnterExitFrame(false, apiStackSpace);
5473
5474 if (!ABI_PASSES_HANDLES_IN_REGS) {
5475 // pass 1st arg by reference
5476 __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize));
5477 __ AddP(r2, sp, Operand(arg0Slot * kPointerSize));
5478 }
5479
5480 // Create v8::PropertyCallbackInfo object on the stack and initialize
5481 // it's args_ field.
5482 __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize));
5483 __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize));
5484 // r3 = v8::PropertyCallbackInfo&
5485
5486 ExternalReference thunk_ref =
5487 ExternalReference::invoke_accessor_getter_callback(isolate());
5488
Ben Murdochc5610432016-08-08 18:44:38 +01005489 __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
5490 __ LoadP(api_function_address,
5491 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
5492
Ben Murdochda12d292016-06-02 14:46:10 +01005493 // +3 is to skip prolog, return address and name handle.
5494 MemOperand return_value_operand(
5495 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
5496 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5497 kStackUnwindSpace, NULL, return_value_operand, NULL);
5498}
5499
5500#undef __
5501
5502} // namespace internal
5503} // namespace v8
5504
5505#endif // V8_TARGET_ARCH_S390