blob: a96b3dfa9833285d67ebdb60384c6c533680e735 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#if V8_TARGET_ARCH_ARM64
6
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007#include "src/code-stubs.h"
Ben Murdochda12d292016-06-02 14:46:10 +01008#include "src/api-arguments.h"
9#include "src/bootstrapper.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/codegen.h"
11#include "src/ic/handler-compiler.h"
12#include "src/ic/ic.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/ic/stub-cache.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014#include "src/isolate.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000015#include "src/regexp/jsregexp.h"
16#include "src/regexp/regexp-macro-assembler.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040017#include "src/runtime/runtime.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000018
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000019#include "src/arm64/code-stubs-arm64.h"
20#include "src/arm64/frames-arm64.h"
21
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022namespace v8 {
23namespace internal {
24
25
26static void InitializeArrayConstructorDescriptor(
27 Isolate* isolate, CodeStubDescriptor* descriptor,
28 int constant_stack_parameter_count) {
29 // cp: context
30 // x1: function
31 // x2: allocation site with elements kind
32 // x0: number of arguments to the constructor function
33 Address deopt_handler = Runtime::FunctionForId(
34 Runtime::kArrayConstructor)->entry;
35
36 if (constant_stack_parameter_count == 0) {
37 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
38 JS_FUNCTION_STUB_MODE);
39 } else {
40 descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000041 JS_FUNCTION_STUB_MODE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000042 }
43}
44
45
Ben Murdochb8a8cc12014-11-26 15:28:44 +000046void ArraySingleArgumentConstructorStub::InitializeDescriptor(
47 CodeStubDescriptor* descriptor) {
48 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
49}
50
51
52void ArrayNArgumentsConstructorStub::InitializeDescriptor(
53 CodeStubDescriptor* descriptor) {
54 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
55}
56
57
58static void InitializeInternalArrayConstructorDescriptor(
59 Isolate* isolate, CodeStubDescriptor* descriptor,
60 int constant_stack_parameter_count) {
61 Address deopt_handler = Runtime::FunctionForId(
62 Runtime::kInternalArrayConstructor)->entry;
63
64 if (constant_stack_parameter_count == 0) {
65 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
66 JS_FUNCTION_STUB_MODE);
67 } else {
68 descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000069 JS_FUNCTION_STUB_MODE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000070 }
71}
72
73
Ben Murdochda12d292016-06-02 14:46:10 +010074void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
75 Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
76 descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
77}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000078
79void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
80 CodeStubDescriptor* descriptor) {
81 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
82}
83
84
85void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
86 CodeStubDescriptor* descriptor) {
87 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
88}
89
90
91#define __ ACCESS_MASM(masm)
92
93
94void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
95 ExternalReference miss) {
96 // Update the static counter each time a new code stub is generated.
97 isolate()->counters()->code_stubs()->Increment();
98
99 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000100 int param_count = descriptor.GetRegisterParameterCount();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000101 {
102 // Call the runtime system in a fresh internal frame.
103 FrameScope scope(masm, StackFrame::INTERNAL);
104 DCHECK((param_count == 0) ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000105 x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000106
107 // Push arguments
108 MacroAssembler::PushPopQueue queue(masm);
109 for (int i = 0; i < param_count; ++i) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000110 queue.Queue(descriptor.GetRegisterParameter(i));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000111 }
112 queue.PushQueued();
113
114 __ CallExternalReference(miss, param_count);
115 }
116
117 __ Ret();
118}
119
120
121void DoubleToIStub::Generate(MacroAssembler* masm) {
122 Label done;
123 Register input = source();
124 Register result = destination();
125 DCHECK(is_truncating());
126
127 DCHECK(result.Is64Bits());
128 DCHECK(jssp.Is(masm->StackPointer()));
129
130 int double_offset = offset();
131
132 DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
133 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
134 Register scratch2 =
135 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
136
137 __ Push(scratch1, scratch2);
138 // Account for saved regs if input is jssp.
139 if (input.is(jssp)) double_offset += 2 * kPointerSize;
140
141 if (!skip_fastpath()) {
142 __ Push(double_scratch);
143 if (input.is(jssp)) double_offset += 1 * kDoubleSize;
144 __ Ldr(double_scratch, MemOperand(input, double_offset));
145 // Try to convert with a FPU convert instruction. This handles all
146 // non-saturating cases.
147 __ TryConvertDoubleToInt64(result, double_scratch, &done);
148 __ Fmov(result, double_scratch);
149 } else {
150 __ Ldr(result, MemOperand(input, double_offset));
151 }
152
153 // If we reach here we need to manually convert the input to an int32.
154
155 // Extract the exponent.
156 Register exponent = scratch1;
157 __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
158 HeapNumber::kExponentBits);
159
160 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
161 // the mantissa gets shifted completely out of the int32_t result.
162 __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
163 __ CzeroX(result, ge);
164 __ B(ge, &done);
165
166 // The Fcvtzs sequence handles all cases except where the conversion causes
167 // signed overflow in the int64_t target. Since we've already handled
168 // exponents >= 84, we can guarantee that 63 <= exponent < 84.
169
170 if (masm->emit_debug_code()) {
171 __ Cmp(exponent, HeapNumber::kExponentBias + 63);
172 // Exponents less than this should have been handled by the Fcvt case.
173 __ Check(ge, kUnexpectedValue);
174 }
175
176 // Isolate the mantissa bits, and set the implicit '1'.
177 Register mantissa = scratch2;
178 __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
179 __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
180
181 // Negate the mantissa if necessary.
182 __ Tst(result, kXSignMask);
183 __ Cneg(mantissa, mantissa, ne);
184
185 // Shift the mantissa bits in the correct place. We know that we have to shift
186 // it left here, because exponent >= 63 >= kMantissaBits.
187 __ Sub(exponent, exponent,
188 HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
189 __ Lsl(result, mantissa, exponent);
190
191 __ Bind(&done);
192 if (!skip_fastpath()) {
193 __ Pop(double_scratch);
194 }
195 __ Pop(scratch2, scratch1);
196 __ Ret();
197}
198
199
200// See call site for description.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000201static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
202 Register right, Register scratch,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000203 FPRegister double_scratch,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100204 Label* slow, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000205 DCHECK(!AreAliased(left, right, scratch));
206 Label not_identical, return_equal, heap_number;
207 Register result = x0;
208
209 __ Cmp(right, left);
210 __ B(ne, &not_identical);
211
212 // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
213 // so we do the second best thing - test it ourselves.
214 // They are both equal and they are not both Smis so both of them are not
215 // Smis. If it's not a heap number, then return equal.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000216 Register right_type = scratch;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000217 if ((cond == lt) || (cond == gt)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000218 // Call runtime on identical JSObjects. Otherwise return equal.
219 __ JumpIfObjectType(right, right_type, right_type, FIRST_JS_RECEIVER_TYPE,
220 slow, ge);
221 // Call runtime on identical symbols since we need to throw a TypeError.
222 __ Cmp(right_type, SYMBOL_TYPE);
223 __ B(eq, slow);
224 // Call runtime on identical SIMD values since we must throw a TypeError.
225 __ Cmp(right_type, SIMD128_VALUE_TYPE);
226 __ B(eq, slow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227 } else if (cond == eq) {
228 __ JumpIfHeapNumber(right, &heap_number);
229 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000230 __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
231 &heap_number);
232 // Comparing JS objects with <=, >= is complicated.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000233 __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000234 __ B(ge, slow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000235 // Call runtime on identical symbols since we need to throw a TypeError.
236 __ Cmp(right_type, SYMBOL_TYPE);
237 __ B(eq, slow);
238 // Call runtime on identical SIMD values since we must throw a TypeError.
239 __ Cmp(right_type, SIMD128_VALUE_TYPE);
240 __ B(eq, slow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000241 // Normally here we fall through to return_equal, but undefined is
242 // special: (undefined == undefined) == true, but
243 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
244 if ((cond == le) || (cond == ge)) {
245 __ Cmp(right_type, ODDBALL_TYPE);
246 __ B(ne, &return_equal);
247 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
248 if (cond == le) {
249 // undefined <= undefined should fail.
250 __ Mov(result, GREATER);
251 } else {
252 // undefined >= undefined should fail.
253 __ Mov(result, LESS);
254 }
255 __ Ret();
256 }
257 }
258
259 __ Bind(&return_equal);
260 if (cond == lt) {
261 __ Mov(result, GREATER); // Things aren't less than themselves.
262 } else if (cond == gt) {
263 __ Mov(result, LESS); // Things aren't greater than themselves.
264 } else {
265 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
266 }
267 __ Ret();
268
269 // Cases lt and gt have been handled earlier, and case ne is never seen, as
270 // it is handled in the parser (see Parser::ParseBinaryExpression). We are
271 // only concerned with cases ge, le and eq here.
272 if ((cond != lt) && (cond != gt)) {
273 DCHECK((cond == ge) || (cond == le) || (cond == eq));
274 __ Bind(&heap_number);
275 // Left and right are identical pointers to a heap number object. Return
276 // non-equal if the heap number is a NaN, and equal otherwise. Comparing
277 // the number to itself will set the overflow flag iff the number is NaN.
278 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
279 __ Fcmp(double_scratch, double_scratch);
280 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
281
282 if (cond == le) {
283 __ Mov(result, GREATER);
284 } else {
285 __ Mov(result, LESS);
286 }
287 __ Ret();
288 }
289
290 // No fall through here.
291 if (FLAG_debug_code) {
292 __ Unreachable();
293 }
294
295 __ Bind(&not_identical);
296}
297
298
299// See call site for description.
300static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
301 Register left,
302 Register right,
303 Register left_type,
304 Register right_type,
305 Register scratch) {
306 DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
307
308 if (masm->emit_debug_code()) {
309 // We assume that the arguments are not identical.
310 __ Cmp(left, right);
311 __ Assert(ne, kExpectedNonIdenticalObjects);
312 }
313
314 // If either operand is a JS object or an oddball value, then they are not
315 // equal since their pointers are different.
316 // There is no test for undetectability in strict equality.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000317 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000318 Label right_non_object;
319
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000320 __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000321 __ B(lt, &right_non_object);
322
323 // Return non-zero - x0 already contains a non-zero pointer.
324 DCHECK(left.is(x0) || right.is(x0));
325 Label return_not_equal;
326 __ Bind(&return_not_equal);
327 __ Ret();
328
329 __ Bind(&right_non_object);
330
331 // Check for oddballs: true, false, null, undefined.
332 __ Cmp(right_type, ODDBALL_TYPE);
333
334 // If right is not ODDBALL, test left. Otherwise, set eq condition.
335 __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
336
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000337 // If right or left is not ODDBALL, test left >= FIRST_JS_RECEIVER_TYPE.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000338 // Otherwise, right or left is ODDBALL, so set a ge condition.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000339 __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NVFlag, ne);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000340
341 __ B(ge, &return_not_equal);
342
343 // Internalized strings are unique, so they can only be equal if they are the
344 // same object. We have already tested that case, so if left and right are
345 // both internalized strings, they cannot be equal.
346 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
347 __ Orr(scratch, left_type, right_type);
348 __ TestAndBranchIfAllClear(
349 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
350}
351
352
353// See call site for description.
354static void EmitSmiNonsmiComparison(MacroAssembler* masm,
355 Register left,
356 Register right,
357 FPRegister left_d,
358 FPRegister right_d,
359 Label* slow,
360 bool strict) {
361 DCHECK(!AreAliased(left_d, right_d));
362 DCHECK((left.is(x0) && right.is(x1)) ||
363 (right.is(x0) && left.is(x1)));
364 Register result = x0;
365
366 Label right_is_smi, done;
367 __ JumpIfSmi(right, &right_is_smi);
368
369 // Left is the smi. Check whether right is a heap number.
370 if (strict) {
371 // If right is not a number and left is a smi, then strict equality cannot
372 // succeed. Return non-equal.
373 Label is_heap_number;
374 __ JumpIfHeapNumber(right, &is_heap_number);
375 // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
376 if (!right.is(result)) {
377 __ Mov(result, NOT_EQUAL);
378 }
379 __ Ret();
380 __ Bind(&is_heap_number);
381 } else {
382 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
383 // runtime.
384 __ JumpIfNotHeapNumber(right, slow);
385 }
386
387 // Left is the smi. Right is a heap number. Load right value into right_d, and
388 // convert left smi into double in left_d.
389 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
390 __ SmiUntagToDouble(left_d, left);
391 __ B(&done);
392
393 __ Bind(&right_is_smi);
394 // Right is a smi. Check whether the non-smi left is a heap number.
395 if (strict) {
396 // If left is not a number and right is a smi then strict equality cannot
397 // succeed. Return non-equal.
398 Label is_heap_number;
399 __ JumpIfHeapNumber(left, &is_heap_number);
400 // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
401 if (!left.is(result)) {
402 __ Mov(result, NOT_EQUAL);
403 }
404 __ Ret();
405 __ Bind(&is_heap_number);
406 } else {
407 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
408 // runtime.
409 __ JumpIfNotHeapNumber(left, slow);
410 }
411
412 // Right is the smi. Left is a heap number. Load left value into left_d, and
413 // convert right smi into double in right_d.
414 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
415 __ SmiUntagToDouble(right_d, right);
416
417 // Fall through to both_loaded_as_doubles.
418 __ Bind(&done);
419}
420
421
Ben Murdochda12d292016-06-02 14:46:10 +0100422// Fast negative check for internalized-to-internalized equality or receiver
423// equality. Also handles the undetectable receiver to null/undefined
424// comparison.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000425// See call site for description.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100426static void EmitCheckForInternalizedStringsOrObjects(
427 MacroAssembler* masm, Register left, Register right, Register left_map,
428 Register right_map, Register left_type, Register right_type,
429 Label* possible_strings, Label* runtime_call) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000430 DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
431 Register result = x0;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100432 DCHECK(left.is(x0) || right.is(x0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000433
Ben Murdochda12d292016-06-02 14:46:10 +0100434 Label object_test, return_equal, return_unequal, undetectable;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000435 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
436 // TODO(all): reexamine this branch sequence for optimisation wrt branch
437 // prediction.
438 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
439 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100440 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), runtime_call);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000441 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
442
Ben Murdoch097c5b22016-05-18 11:27:45 +0100443 // Both are internalized. We already checked they weren't the same pointer so
444 // they are not equal. Return non-equal by returning the non-zero object
445 // pointer in x0.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000446 __ Ret();
447
448 __ Bind(&object_test);
449
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000450 Register left_bitfield = left_type;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100451 Register right_bitfield = right_type;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000452 __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
453 __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100454 __ Tbnz(right_bitfield, MaskToBit(1 << Map::kIsUndetectable), &undetectable);
455 __ Tbnz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
456
457 __ CompareInstanceType(right_map, right_type, FIRST_JS_RECEIVER_TYPE);
458 __ B(lt, runtime_call);
459 __ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
460 __ B(lt, runtime_call);
461
Ben Murdochda12d292016-06-02 14:46:10 +0100462 __ Bind(&return_unequal);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100463 // Return non-equal by returning the non-zero object pointer in x0.
464 __ Ret();
465
Ben Murdochda12d292016-06-02 14:46:10 +0100466 __ Bind(&undetectable);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100467 __ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
Ben Murdochda12d292016-06-02 14:46:10 +0100468
469 // If both sides are JSReceivers, then the result is false according to
470 // the HTML specification, which says that only comparisons with null or
471 // undefined are affected by special casing for document.all.
472 __ CompareInstanceType(right_map, right_type, ODDBALL_TYPE);
473 __ B(eq, &return_equal);
474 __ CompareInstanceType(left_map, left_type, ODDBALL_TYPE);
475 __ B(ne, &return_unequal);
476
477 __ Bind(&return_equal);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100478 __ Mov(result, EQUAL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000479 __ Ret();
480}
481
482
483static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
484 CompareICState::State expected,
485 Label* fail) {
486 Label ok;
487 if (expected == CompareICState::SMI) {
488 __ JumpIfNotSmi(input, fail);
489 } else if (expected == CompareICState::NUMBER) {
490 __ JumpIfSmi(input, &ok);
491 __ JumpIfNotHeapNumber(input, fail);
492 }
493 // We could be strict about internalized/non-internalized here, but as long as
494 // hydrogen doesn't care, the stub doesn't have to care either.
495 __ Bind(&ok);
496}
497
498
499void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
500 Register lhs = x1;
501 Register rhs = x0;
502 Register result = x0;
503 Condition cond = GetCondition();
504
505 Label miss;
506 CompareICStub_CheckInputType(masm, lhs, left(), &miss);
507 CompareICStub_CheckInputType(masm, rhs, right(), &miss);
508
509 Label slow; // Call builtin.
510 Label not_smis, both_loaded_as_doubles;
511 Label not_two_smis, smi_done;
512 __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
513 __ SmiUntag(lhs);
514 __ Sub(result, lhs, Operand::UntagSmi(rhs));
515 __ Ret();
516
517 __ Bind(&not_two_smis);
518
519 // NOTICE! This code is only reached after a smi-fast-case check, so it is
520 // certain that at least one operand isn't a smi.
521
522 // Handle the case where the objects are identical. Either returns the answer
523 // or goes to slow. Only falls through if the objects were not identical.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100524 EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000525
526 // If either is a smi (we know that at least one is not a smi), then they can
527 // only be strictly equal if the other is a HeapNumber.
528 __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
529
530 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
531 // can:
532 // 1) Return the answer.
533 // 2) Branch to the slow case.
534 // 3) Fall through to both_loaded_as_doubles.
535 // In case 3, we have found out that we were dealing with a number-number
536 // comparison. The double values of the numbers have been loaded, right into
537 // rhs_d, left into lhs_d.
538 FPRegister rhs_d = d0;
539 FPRegister lhs_d = d1;
540 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
541
542 __ Bind(&both_loaded_as_doubles);
543 // The arguments have been converted to doubles and stored in rhs_d and
544 // lhs_d.
545 Label nan;
546 __ Fcmp(lhs_d, rhs_d);
547 __ B(vs, &nan); // Overflow flag set if either is NaN.
548 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
549 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
550 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
551 __ Ret();
552
553 __ Bind(&nan);
554 // Left and/or right is a NaN. Load the result register with whatever makes
555 // the comparison fail, since comparisons with NaN always fail (except ne,
556 // which is filtered out at a higher level.)
557 DCHECK(cond != ne);
558 if ((cond == lt) || (cond == le)) {
559 __ Mov(result, GREATER);
560 } else {
561 __ Mov(result, LESS);
562 }
563 __ Ret();
564
565 __ Bind(&not_smis);
566 // At this point we know we are dealing with two different objects, and
567 // neither of them is a smi. The objects are in rhs_ and lhs_.
568
569 // Load the maps and types of the objects.
570 Register rhs_map = x10;
571 Register rhs_type = x11;
572 Register lhs_map = x12;
573 Register lhs_type = x13;
574 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
575 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
576 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
577 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
578
579 if (strict()) {
580 // This emits a non-equal return sequence for some object types, or falls
581 // through if it was not lucky.
582 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
583 }
584
585 Label check_for_internalized_strings;
586 Label flat_string_check;
587 // Check for heap number comparison. Branch to earlier double comparison code
588 // if they are heap numbers, otherwise, branch to internalized string check.
589 __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
590 __ B(ne, &check_for_internalized_strings);
591 __ Cmp(lhs_map, rhs_map);
592
593 // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
594 // string check.
595 __ B(ne, &flat_string_check);
596
597 // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
598 // comparison code.
599 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
600 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
601 __ B(&both_loaded_as_doubles);
602
603 __ Bind(&check_for_internalized_strings);
604 // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
605 // of internalized strings.
606 if ((cond == eq) && !strict()) {
607 // Returns an answer for two internalized strings or two detectable objects.
608 // Otherwise branches to the string case or not both strings case.
609 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
610 lhs_type, rhs_type,
611 &flat_string_check, &slow);
612 }
613
614 // Check for both being sequential one-byte strings,
615 // and inline if that is the case.
616 __ Bind(&flat_string_check);
617 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
618 x15, &slow);
619
620 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
621 x11);
622 if (cond == eq) {
623 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
624 x12);
625 } else {
626 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
627 x12, x13);
628 }
629
630 // Never fall through to here.
631 if (FLAG_debug_code) {
632 __ Unreachable();
633 }
634
635 __ Bind(&slow);
636
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000637 if (cond == eq) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100638 {
639 FrameScope scope(masm, StackFrame::INTERNAL);
640 __ Push(lhs, rhs);
641 __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
642 }
643 // Turn true into 0 and false into some non-zero value.
644 STATIC_ASSERT(EQUAL == 0);
645 __ LoadRoot(x1, Heap::kTrueValueRootIndex);
646 __ Sub(x0, x0, x1);
647 __ Ret();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000648 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100649 __ Push(lhs, rhs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000650 int ncr; // NaN compare result
651 if ((cond == lt) || (cond == le)) {
652 ncr = GREATER;
653 } else {
654 DCHECK((cond == gt) || (cond == ge)); // remaining cases
655 ncr = LESS;
656 }
657 __ Mov(x10, Smi::FromInt(ncr));
658 __ Push(x10);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000659
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000660 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
661 // tagged as a small integer.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100662 __ TailCallRuntime(Runtime::kCompare);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000663 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000664
665 __ Bind(&miss);
666 GenerateMiss(masm);
667}
668
669
670void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
671 CPURegList saved_regs = kCallerSaved;
672 CPURegList saved_fp_regs = kCallerSavedFP;
673
674 // We don't allow a GC during a store buffer overflow so there is no need to
675 // store the registers in any particular way, but we do have to store and
676 // restore them.
677
678 // We don't care if MacroAssembler scratch registers are corrupted.
679 saved_regs.Remove(*(masm->TmpList()));
680 saved_fp_regs.Remove(*(masm->FPTmpList()));
681
682 __ PushCPURegList(saved_regs);
683 if (save_doubles()) {
684 __ PushCPURegList(saved_fp_regs);
685 }
686
687 AllowExternalCallThatCantCauseGC scope(masm);
688 __ Mov(x0, ExternalReference::isolate_address(isolate()));
689 __ CallCFunction(
690 ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
691
692 if (save_doubles()) {
693 __ PopCPURegList(saved_fp_regs);
694 }
695 __ PopCPURegList(saved_regs);
696 __ Ret();
697}
698
699
700void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
701 Isolate* isolate) {
702 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
703 stub1.GetCode();
704 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
705 stub2.GetCode();
706}
707
708
709void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
710 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
711 UseScratchRegisterScope temps(masm);
712 Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
713 Register return_address = temps.AcquireX();
714 __ Mov(return_address, lr);
715 // Restore lr with the value it had before the call to this stub (the value
716 // which must be pushed).
717 __ Mov(lr, saved_lr);
718 __ PushSafepointRegisters();
719 __ Ret(return_address);
720}
721
722
723void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
724 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
725 UseScratchRegisterScope temps(masm);
726 Register return_address = temps.AcquireX();
727 // Preserve the return address (lr will be clobbered by the pop).
728 __ Mov(return_address, lr);
729 __ PopSafepointRegisters();
730 __ Ret(return_address);
731}
732
733
734void MathPowStub::Generate(MacroAssembler* masm) {
735 // Stack on entry:
736 // jssp[0]: Exponent (as a tagged value).
737 // jssp[1]: Base (as a tagged value).
738 //
739 // The (tagged) result will be returned in x0, as a heap number.
740
741 Register result_tagged = x0;
742 Register base_tagged = x10;
743 Register exponent_tagged = MathPowTaggedDescriptor::exponent();
744 DCHECK(exponent_tagged.is(x11));
745 Register exponent_integer = MathPowIntegerDescriptor::exponent();
746 DCHECK(exponent_integer.is(x12));
747 Register scratch1 = x14;
748 Register scratch0 = x15;
749 Register saved_lr = x19;
750 FPRegister result_double = d0;
751 FPRegister base_double = d0;
752 FPRegister exponent_double = d1;
753 FPRegister base_double_copy = d2;
754 FPRegister scratch1_double = d6;
755 FPRegister scratch0_double = d7;
756
757 // A fast-path for integer exponents.
758 Label exponent_is_smi, exponent_is_integer;
759 // Bail out to runtime.
760 Label call_runtime;
761 // Allocate a heap number for the result, and return it.
762 Label done;
763
764 // Unpack the inputs.
765 if (exponent_type() == ON_STACK) {
766 Label base_is_smi;
767 Label unpack_exponent;
768
769 __ Pop(exponent_tagged, base_tagged);
770
771 __ JumpIfSmi(base_tagged, &base_is_smi);
772 __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
773 // base_tagged is a heap number, so load its double value.
774 __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
775 __ B(&unpack_exponent);
776 __ Bind(&base_is_smi);
777 // base_tagged is a SMI, so untag it and convert it to a double.
778 __ SmiUntagToDouble(base_double, base_tagged);
779
780 __ Bind(&unpack_exponent);
781 // x10 base_tagged The tagged base (input).
782 // x11 exponent_tagged The tagged exponent (input).
783 // d1 base_double The base as a double.
784 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
785 __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
786 // exponent_tagged is a heap number, so load its double value.
787 __ Ldr(exponent_double,
788 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
789 } else if (exponent_type() == TAGGED) {
790 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
791 __ Ldr(exponent_double,
792 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
793 }
794
795 // Handle double (heap number) exponents.
796 if (exponent_type() != INTEGER) {
797 // Detect integer exponents stored as doubles and handle those in the
798 // integer fast-path.
799 __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
800 scratch0_double, &exponent_is_integer);
801
802 if (exponent_type() == ON_STACK) {
803 FPRegister half_double = d3;
804 FPRegister minus_half_double = d4;
805 // Detect square root case. Crankshaft detects constant +/-0.5 at compile
806 // time and uses DoMathPowHalf instead. We then skip this check for
807 // non-constant cases of +/-0.5 as these hardly occur.
808
809 __ Fmov(minus_half_double, -0.5);
810 __ Fmov(half_double, 0.5);
811 __ Fcmp(minus_half_double, exponent_double);
812 __ Fccmp(half_double, exponent_double, NZFlag, ne);
813 // Condition flags at this point:
814 // 0.5; nZCv // Identified by eq && pl
815 // -0.5: NZcv // Identified by eq && mi
816 // other: ?z?? // Identified by ne
817 __ B(ne, &call_runtime);
818
819 // The exponent is 0.5 or -0.5.
820
821 // Given that exponent is known to be either 0.5 or -0.5, the following
822 // special cases could apply (according to ECMA-262 15.8.2.13):
823 //
824 // base.isNaN(): The result is NaN.
825 // (base == +INFINITY) || (base == -INFINITY)
826 // exponent == 0.5: The result is +INFINITY.
827 // exponent == -0.5: The result is +0.
828 // (base == +0) || (base == -0)
829 // exponent == 0.5: The result is +0.
830 // exponent == -0.5: The result is +INFINITY.
831 // (base < 0) && base.isFinite(): The result is NaN.
832 //
833 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
834 // where base is -INFINITY or -0.
835
836 // Add +0 to base. This has no effect other than turning -0 into +0.
837 __ Fadd(base_double, base_double, fp_zero);
838 // The operation -0+0 results in +0 in all cases except where the
839 // FPCR rounding mode is 'round towards minus infinity' (RM). The
840 // ARM64 simulator does not currently simulate FPCR (where the rounding
841 // mode is set), so test the operation with some debug code.
842 if (masm->emit_debug_code()) {
843 UseScratchRegisterScope temps(masm);
844 Register temp = temps.AcquireX();
845 __ Fneg(scratch0_double, fp_zero);
846 // Verify that we correctly generated +0.0 and -0.0.
847 // bits(+0.0) = 0x0000000000000000
848 // bits(-0.0) = 0x8000000000000000
849 __ Fmov(temp, fp_zero);
850 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
851 __ Fmov(temp, scratch0_double);
852 __ Eor(temp, temp, kDSignMask);
853 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
854 // Check that -0.0 + 0.0 == +0.0.
855 __ Fadd(scratch0_double, scratch0_double, fp_zero);
856 __ Fmov(temp, scratch0_double);
857 __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
858 }
859
860 // If base is -INFINITY, make it +INFINITY.
861 // * Calculate base - base: All infinities will become NaNs since both
862 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
863 // * If the result is NaN, calculate abs(base).
864 __ Fsub(scratch0_double, base_double, base_double);
865 __ Fcmp(scratch0_double, 0.0);
866 __ Fabs(scratch1_double, base_double);
867 __ Fcsel(base_double, scratch1_double, base_double, vs);
868
869 // Calculate the square root of base.
870 __ Fsqrt(result_double, base_double);
871 __ Fcmp(exponent_double, 0.0);
872 __ B(ge, &done); // Finish now for exponents of 0.5.
873 // Find the inverse for exponents of -0.5.
874 __ Fmov(scratch0_double, 1.0);
875 __ Fdiv(result_double, scratch0_double, result_double);
876 __ B(&done);
877 }
878
879 {
880 AllowExternalCallThatCantCauseGC scope(masm);
881 __ Mov(saved_lr, lr);
882 __ CallCFunction(
883 ExternalReference::power_double_double_function(isolate()),
884 0, 2);
885 __ Mov(lr, saved_lr);
886 __ B(&done);
887 }
888
889 // Handle SMI exponents.
890 __ Bind(&exponent_is_smi);
891 // x10 base_tagged The tagged base (input).
892 // x11 exponent_tagged The tagged exponent (input).
893 // d1 base_double The base as a double.
894 __ SmiUntag(exponent_integer, exponent_tagged);
895 }
896
897 __ Bind(&exponent_is_integer);
898 // x10 base_tagged The tagged base (input).
899 // x11 exponent_tagged The tagged exponent (input).
900 // x12 exponent_integer The exponent as an integer.
901 // d1 base_double The base as a double.
902
903 // Find abs(exponent). For negative exponents, we can find the inverse later.
904 Register exponent_abs = x13;
905 __ Cmp(exponent_integer, 0);
906 __ Cneg(exponent_abs, exponent_integer, mi);
907 // x13 exponent_abs The value of abs(exponent_integer).
908
909 // Repeatedly multiply to calculate the power.
910 // result = 1.0;
911 // For each bit n (exponent_integer{n}) {
912 // if (exponent_integer{n}) {
913 // result *= base;
914 // }
915 // base *= base;
916 // if (remaining bits in exponent_integer are all zero) {
917 // break;
918 // }
919 // }
920 Label power_loop, power_loop_entry, power_loop_exit;
921 __ Fmov(scratch1_double, base_double);
922 __ Fmov(base_double_copy, base_double);
923 __ Fmov(result_double, 1.0);
924 __ B(&power_loop_entry);
925
926 __ Bind(&power_loop);
927 __ Fmul(scratch1_double, scratch1_double, scratch1_double);
928 __ Lsr(exponent_abs, exponent_abs, 1);
929 __ Cbz(exponent_abs, &power_loop_exit);
930
931 __ Bind(&power_loop_entry);
932 __ Tbz(exponent_abs, 0, &power_loop);
933 __ Fmul(result_double, result_double, scratch1_double);
934 __ B(&power_loop);
935
936 __ Bind(&power_loop_exit);
937
938 // If the exponent was positive, result_double holds the result.
939 __ Tbz(exponent_integer, kXSignBit, &done);
940
941 // The exponent was negative, so find the inverse.
942 __ Fmov(scratch0_double, 1.0);
943 __ Fdiv(result_double, scratch0_double, result_double);
944 // ECMA-262 only requires Math.pow to return an 'implementation-dependent
945 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
946 // to calculate the subnormal value 2^-1074. This method of calculating
947 // negative powers doesn't work because 2^1074 overflows to infinity. To
948 // catch this corner-case, we bail out if the result was 0. (This can only
949 // occur if the divisor is infinity or the base is zero.)
950 __ Fcmp(result_double, 0.0);
951 __ B(&done, ne);
952
953 if (exponent_type() == ON_STACK) {
954 // Bail out to runtime code.
955 __ Bind(&call_runtime);
956 // Put the arguments back on the stack.
957 __ Push(base_tagged, exponent_tagged);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000958 __ TailCallRuntime(Runtime::kMathPowRT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000959
960 // Return.
961 __ Bind(&done);
962 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
963 result_double);
964 DCHECK(result_tagged.is(x0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000965 __ Ret();
966 } else {
967 AllowExternalCallThatCantCauseGC scope(masm);
968 __ Mov(saved_lr, lr);
969 __ Fmov(base_double, base_double_copy);
970 __ Scvtf(exponent_double, exponent_integer);
971 __ CallCFunction(
972 ExternalReference::power_double_double_function(isolate()),
973 0, 2);
974 __ Mov(lr, saved_lr);
975 __ Bind(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000976 __ Ret();
977 }
978}
979
980
981void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
982 // It is important that the following stubs are generated in this order
983 // because pregenerated stubs can only call other pregenerated stubs.
984 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
985 // CEntryStub.
986 CEntryStub::GenerateAheadOfTime(isolate);
987 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
988 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
989 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
990 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000991 CreateWeakCellStub::GenerateAheadOfTime(isolate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000992 BinaryOpICStub::GenerateAheadOfTime(isolate);
993 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
994 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
995 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000996 StoreFastElementStub::GenerateAheadOfTime(isolate);
997 TypeofStub::GenerateAheadOfTime(isolate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000998}
999
1000
1001void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1002 StoreRegistersStateStub stub(isolate);
1003 stub.GetCode();
1004}
1005
1006
1007void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1008 RestoreRegistersStateStub stub(isolate);
1009 stub.GetCode();
1010}
1011
1012
1013void CodeStub::GenerateFPStubs(Isolate* isolate) {
1014 // Floating-point code doesn't get special handling in ARM64, so there's
1015 // nothing to do here.
1016 USE(isolate);
1017}
1018
1019
1020bool CEntryStub::NeedsImmovableCode() {
1021 // CEntryStub stores the return address on the stack before calling into
1022 // C++ code. In some cases, the VM accesses this address, but it is not used
1023 // when the C++ code returns to the stub because LR holds the return address
1024 // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
1025 // returning to dead code.
1026 // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
1027 // find any comment to confirm this, and I don't hit any crashes whatever
1028 // this function returns. The anaylsis should be properly confirmed.
1029 return true;
1030}
1031
1032
1033void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1034 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1035 stub.GetCode();
1036 CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
1037 stub_fp.GetCode();
1038}
1039
1040
1041void CEntryStub::Generate(MacroAssembler* masm) {
1042 // The Abort mechanism relies on CallRuntime, which in turn relies on
1043 // CEntryStub, so until this stub has been generated, we have to use a
1044 // fall-back Abort mechanism.
1045 //
1046 // Note that this stub must be generated before any use of Abort.
1047 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1048
1049 ASM_LOCATION("CEntryStub::Generate entry");
1050 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1051
1052 // Register parameters:
1053 // x0: argc (including receiver, untagged)
1054 // x1: target
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001055 // If argv_in_register():
1056 // x11: argv (pointer to first argument)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001057 //
1058 // The stack on entry holds the arguments and the receiver, with the receiver
1059 // at the highest address:
1060 //
1061 // jssp]argc-1]: receiver
1062 // jssp[argc-2]: arg[argc-2]
1063 // ... ...
1064 // jssp[1]: arg[1]
1065 // jssp[0]: arg[0]
1066 //
1067 // The arguments are in reverse order, so that arg[argc-2] is actually the
1068 // first argument to the target function and arg[0] is the last.
1069 DCHECK(jssp.Is(__ StackPointer()));
1070 const Register& argc_input = x0;
1071 const Register& target_input = x1;
1072
1073 // Calculate argv, argc and the target address, and store them in
1074 // callee-saved registers so we can retry the call without having to reload
1075 // these arguments.
1076 // TODO(jbramley): If the first call attempt succeeds in the common case (as
1077 // it should), then we might be better off putting these parameters directly
1078 // into their argument registers, rather than using callee-saved registers and
1079 // preserving them on the stack.
1080 const Register& argv = x21;
1081 const Register& argc = x22;
1082 const Register& target = x23;
1083
1084 // Derive argv from the stack pointer so that it points to the first argument
1085 // (arg[argc-2]), or just below the receiver in case there are no arguments.
1086 // - Adjust for the arg[] array.
1087 Register temp_argv = x11;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001088 if (!argv_in_register()) {
1089 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
1090 // - Adjust for the receiver.
1091 __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
1092 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001093
Ben Murdoch097c5b22016-05-18 11:27:45 +01001094 // Reserve three slots to preserve x21-x23 callee-saved registers. If the
1095 // result size is too large to be returned in registers then also reserve
1096 // space for the return value.
1097 int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size());
1098 // Enter the exit frame.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001099 FrameScope scope(masm, StackFrame::MANUAL);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001100 __ EnterExitFrame(save_doubles(), x10, extra_stack_space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001101 DCHECK(csp.Is(__ StackPointer()));
1102
1103 // Poke callee-saved registers into reserved space.
1104 __ Poke(argv, 1 * kPointerSize);
1105 __ Poke(argc, 2 * kPointerSize);
1106 __ Poke(target, 3 * kPointerSize);
1107
Ben Murdoch097c5b22016-05-18 11:27:45 +01001108 if (result_size() > 2) {
1109 // Save the location of the return value into x8 for call.
1110 __ Add(x8, __ StackPointer(), Operand(4 * kPointerSize));
1111 }
1112
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001113 // We normally only keep tagged values in callee-saved registers, as they
1114 // could be pushed onto the stack by called stubs and functions, and on the
1115 // stack they can confuse the GC. However, we're only calling C functions
1116 // which can push arbitrary data onto the stack anyway, and so the GC won't
1117 // examine that part of the stack.
1118 __ Mov(argc, argc_input);
1119 __ Mov(target, target_input);
1120 __ Mov(argv, temp_argv);
1121
1122 // x21 : argv
1123 // x22 : argc
1124 // x23 : call target
1125 //
1126 // The stack (on entry) holds the arguments and the receiver, with the
1127 // receiver at the highest address:
1128 //
1129 // argv[8]: receiver
1130 // argv -> argv[0]: arg[argc-2]
1131 // ... ...
1132 // argv[...]: arg[1]
1133 // argv[...]: arg[0]
1134 //
1135 // Immediately below (after) this is the exit frame, as constructed by
1136 // EnterExitFrame:
1137 // fp[8]: CallerPC (lr)
1138 // fp -> fp[0]: CallerFP (old fp)
1139 // fp[-8]: Space reserved for SPOffset.
1140 // fp[-16]: CodeObject()
1141 // csp[...]: Saved doubles, if saved_doubles is true.
1142 // csp[32]: Alignment padding, if necessary.
1143 // csp[24]: Preserved x23 (used for target).
1144 // csp[16]: Preserved x22 (used for argc).
1145 // csp[8]: Preserved x21 (used for argv).
1146 // csp -> csp[0]: Space reserved for the return address.
1147 //
1148 // After a successful call, the exit frame, preserved registers (x21-x23) and
1149 // the arguments (including the receiver) are dropped or popped as
1150 // appropriate. The stub then returns.
1151 //
1152 // After an unsuccessful call, the exit frame and suchlike are left
1153 // untouched, and the stub either throws an exception by jumping to one of
1154 // the exception_returned label.
1155
1156 DCHECK(csp.Is(__ StackPointer()));
1157
1158 // Prepare AAPCS64 arguments to pass to the builtin.
1159 __ Mov(x0, argc);
1160 __ Mov(x1, argv);
1161 __ Mov(x2, ExternalReference::isolate_address(isolate()));
1162
1163 Label return_location;
1164 __ Adr(x12, &return_location);
1165 __ Poke(x12, 0);
1166
1167 if (__ emit_debug_code()) {
1168 // Verify that the slot below fp[kSPOffset]-8 points to the return location
1169 // (currently in x12).
1170 UseScratchRegisterScope temps(masm);
1171 Register temp = temps.AcquireX();
1172 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
1173 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
1174 __ Cmp(temp, x12);
1175 __ Check(eq, kReturnAddressNotFoundInFrame);
1176 }
1177
1178 // Call the builtin.
1179 __ Blr(target);
1180 __ Bind(&return_location);
1181
Ben Murdoch097c5b22016-05-18 11:27:45 +01001182 if (result_size() > 2) {
1183 DCHECK_EQ(3, result_size());
1184 // Read result values stored on stack.
1185 __ Ldr(x0, MemOperand(__ StackPointer(), 4 * kPointerSize));
1186 __ Ldr(x1, MemOperand(__ StackPointer(), 5 * kPointerSize));
1187 __ Ldr(x2, MemOperand(__ StackPointer(), 6 * kPointerSize));
1188 }
1189 // Result returned in x0, x1:x0 or x2:x1:x0 - do not destroy these registers!
1190
1191 // x0 result0 The return code from the call.
1192 // x1 result1 For calls which return ObjectPair or ObjectTriple.
1193 // x2 result2 For calls which return ObjectTriple.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001194 // x21 argv
1195 // x22 argc
1196 // x23 target
1197 const Register& result = x0;
1198
1199 // Check result for exception sentinel.
1200 Label exception_returned;
1201 __ CompareRoot(result, Heap::kExceptionRootIndex);
1202 __ B(eq, &exception_returned);
1203
1204 // The call succeeded, so unwind the stack and return.
1205
1206 // Restore callee-saved registers x21-x23.
1207 __ Mov(x11, argc);
1208
1209 __ Peek(argv, 1 * kPointerSize);
1210 __ Peek(argc, 2 * kPointerSize);
1211 __ Peek(target, 3 * kPointerSize);
1212
1213 __ LeaveExitFrame(save_doubles(), x10, true);
1214 DCHECK(jssp.Is(__ StackPointer()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001215 if (!argv_in_register()) {
1216 // Drop the remaining stack slots and return from the stub.
1217 __ Drop(x11);
1218 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001219 __ AssertFPCRState();
1220 __ Ret();
1221
1222 // The stack pointer is still csp if we aren't returning, and the frame
1223 // hasn't changed (except for the return address).
1224 __ SetStackPointer(csp);
1225
1226 // Handling of exception.
1227 __ Bind(&exception_returned);
1228
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001229 ExternalReference pending_handler_context_address(
1230 Isolate::kPendingHandlerContextAddress, isolate());
1231 ExternalReference pending_handler_code_address(
1232 Isolate::kPendingHandlerCodeAddress, isolate());
1233 ExternalReference pending_handler_offset_address(
1234 Isolate::kPendingHandlerOffsetAddress, isolate());
1235 ExternalReference pending_handler_fp_address(
1236 Isolate::kPendingHandlerFPAddress, isolate());
1237 ExternalReference pending_handler_sp_address(
1238 Isolate::kPendingHandlerSPAddress, isolate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001239
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001240 // Ask the runtime for help to determine the handler. This will set x0 to
1241 // contain the current pending exception, don't clobber it.
1242 ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1243 isolate());
1244 DCHECK(csp.Is(masm->StackPointer()));
1245 {
1246 FrameScope scope(masm, StackFrame::MANUAL);
1247 __ Mov(x0, 0); // argc.
1248 __ Mov(x1, 0); // argv.
1249 __ Mov(x2, ExternalReference::isolate_address(isolate()));
1250 __ CallCFunction(find_handler, 3);
1251 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001252
1253 // We didn't execute a return case, so the stack frame hasn't been updated
1254 // (except for the return address slot). However, we don't need to initialize
1255 // jssp because the throw method will immediately overwrite it when it
1256 // unwinds the stack.
1257 __ SetStackPointer(jssp);
1258
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001259 // Retrieve the handler context, SP and FP.
1260 __ Mov(cp, Operand(pending_handler_context_address));
1261 __ Ldr(cp, MemOperand(cp));
1262 __ Mov(jssp, Operand(pending_handler_sp_address));
1263 __ Ldr(jssp, MemOperand(jssp));
1264 __ Mov(fp, Operand(pending_handler_fp_address));
1265 __ Ldr(fp, MemOperand(fp));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001266
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001267 // If the handler is a JS frame, restore the context to the frame. Note that
1268 // the context will be set to (cp == 0) for non-JS frames.
1269 Label not_js_frame;
1270 __ Cbz(cp, &not_js_frame);
1271 __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1272 __ Bind(&not_js_frame);
1273
1274 // Compute the handler entry address and jump to it.
1275 __ Mov(x10, Operand(pending_handler_code_address));
1276 __ Ldr(x10, MemOperand(x10));
1277 __ Mov(x11, Operand(pending_handler_offset_address));
1278 __ Ldr(x11, MemOperand(x11));
1279 __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
1280 __ Add(x10, x10, x11);
1281 __ Br(x10);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001282}
1283
1284
1285// This is the entry point from C++. 5 arguments are provided in x0-x4.
1286// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
1287// Input:
1288// x0: code entry.
1289// x1: function.
1290// x2: receiver.
1291// x3: argc.
1292// x4: argv.
1293// Output:
1294// x0: result.
1295void JSEntryStub::Generate(MacroAssembler* masm) {
1296 DCHECK(jssp.Is(__ StackPointer()));
1297 Register code_entry = x0;
1298
1299 // Enable instruction instrumentation. This only works on the simulator, and
1300 // will have no effect on the model or real hardware.
1301 __ EnableInstrumentation();
1302
1303 Label invoke, handler_entry, exit;
1304
1305 // Push callee-saved registers and synchronize the system stack pointer (csp)
1306 // and the JavaScript stack pointer (jssp).
1307 //
1308 // We must not write to jssp until after the PushCalleeSavedRegisters()
1309 // call, since jssp is itself a callee-saved register.
1310 __ SetStackPointer(csp);
1311 __ PushCalleeSavedRegisters();
1312 __ Mov(jssp, csp);
1313 __ SetStackPointer(jssp);
1314
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001315 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1316
1317 // Set up the reserved register for 0.0.
1318 __ Fmov(fp_zero, 0.0);
1319
1320 // Build an entry frame (see layout below).
1321 int marker = type();
1322 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
1323 __ Mov(x13, bad_frame_pointer);
1324 __ Mov(x12, Smi::FromInt(marker));
1325 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1326 __ Ldr(x10, MemOperand(x11));
1327
Ben Murdochda12d292016-06-02 14:46:10 +01001328 __ Push(x13, x12, xzr, x10);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001329 // Set up fp.
1330 __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
1331
1332 // Push the JS entry frame marker. Also set js_entry_sp if this is the
1333 // outermost JS call.
1334 Label non_outermost_js, done;
1335 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1336 __ Mov(x10, ExternalReference(js_entry_sp));
1337 __ Ldr(x11, MemOperand(x10));
1338 __ Cbnz(x11, &non_outermost_js);
1339 __ Str(fp, MemOperand(x10));
1340 __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1341 __ Push(x12);
1342 __ B(&done);
1343 __ Bind(&non_outermost_js);
1344 // We spare one instruction by pushing xzr since the marker is 0.
1345 DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
1346 __ Push(xzr);
1347 __ Bind(&done);
1348
1349 // The frame set up looks like this:
1350 // jssp[0] : JS entry frame marker.
1351 // jssp[1] : C entry FP.
1352 // jssp[2] : stack frame marker.
1353 // jssp[3] : stack frmae marker.
1354 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1355
1356
1357 // Jump to a faked try block that does the invoke, with a faked catch
1358 // block that sets the pending exception.
1359 __ B(&invoke);
1360
1361 // Prevent the constant pool from being emitted between the record of the
1362 // handler_entry position and the first instruction of the sequence here.
1363 // There is no risk because Assembler::Emit() emits the instruction before
1364 // checking for constant pool emission, but we do not want to depend on
1365 // that.
1366 {
1367 Assembler::BlockPoolsScope block_pools(masm);
1368 __ bind(&handler_entry);
1369 handler_offset_ = handler_entry.pos();
1370 // Caught exception: Store result (exception) in the pending exception
1371 // field in the JSEnv and return a failure sentinel. Coming in here the
1372 // fp will be invalid because the PushTryHandler below sets it to 0 to
1373 // signal the existence of the JSEntry frame.
1374 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1375 isolate())));
1376 }
1377 __ Str(code_entry, MemOperand(x10));
1378 __ LoadRoot(x0, Heap::kExceptionRootIndex);
1379 __ B(&exit);
1380
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001381 // Invoke: Link this frame into the handler chain.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001382 __ Bind(&invoke);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001383 __ PushStackHandler();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001384 // If an exception not caught by another handler occurs, this handler
1385 // returns control to the code after the B(&invoke) above, which
1386 // restores all callee-saved registers (including cp and fp) to their
1387 // saved values before returning a failure to C.
1388
1389 // Clear any pending exceptions.
1390 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1391 __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1392 isolate())));
1393 __ Str(x10, MemOperand(x11));
1394
1395 // Invoke the function by calling through the JS entry trampoline builtin.
1396 // Notice that we cannot store a reference to the trampoline code directly in
1397 // this stub, because runtime stubs are not traversed when doing GC.
1398
1399 // Expected registers by Builtins::JSEntryTrampoline
1400 // x0: code entry.
1401 // x1: function.
1402 // x2: receiver.
1403 // x3: argc.
1404 // x4: argv.
1405 ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
1406 ? Builtins::kJSConstructEntryTrampoline
1407 : Builtins::kJSEntryTrampoline,
1408 isolate());
1409 __ Mov(x10, entry);
1410
1411 // Call the JSEntryTrampoline.
1412 __ Ldr(x11, MemOperand(x10)); // Dereference the address.
1413 __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
1414 __ Blr(x12);
1415
1416 // Unlink this frame from the handler chain.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001417 __ PopStackHandler();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001418
1419
1420 __ Bind(&exit);
1421 // x0 holds the result.
1422 // The stack pointer points to the top of the entry frame pushed on entry from
1423 // C++ (at the beginning of this stub):
1424 // jssp[0] : JS entry frame marker.
1425 // jssp[1] : C entry FP.
1426 // jssp[2] : stack frame marker.
1427 // jssp[3] : stack frmae marker.
1428 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1429
1430 // Check if the current stack frame is marked as the outermost JS frame.
1431 Label non_outermost_js_2;
1432 __ Pop(x10);
1433 __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1434 __ B(ne, &non_outermost_js_2);
1435 __ Mov(x11, ExternalReference(js_entry_sp));
1436 __ Str(xzr, MemOperand(x11));
1437 __ Bind(&non_outermost_js_2);
1438
1439 // Restore the top frame descriptors from the stack.
1440 __ Pop(x10);
1441 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1442 __ Str(x10, MemOperand(x11));
1443
1444 // Reset the stack to the callee saved registers.
1445 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
1446 // Restore the callee-saved registers and return.
1447 DCHECK(jssp.Is(__ StackPointer()));
1448 __ Mov(csp, jssp);
1449 __ SetStackPointer(csp);
1450 __ PopCalleeSavedRegisters();
1451 // After this point, we must not modify jssp because it is a callee-saved
1452 // register which we have just restored.
1453 __ Ret();
1454}
1455
1456
1457void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1458 Label miss;
1459 Register receiver = LoadDescriptor::ReceiverRegister();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001460 // Ensure that the vector and slot registers won't be clobbered before
1461 // calling the miss handler.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001462 DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
1463 LoadWithVectorDescriptor::SlotRegister()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001464
1465 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
1466 x11, &miss);
1467
1468 __ Bind(&miss);
1469 PropertyAccessCompiler::TailCallBuiltin(
1470 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1471}
1472
1473
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001474void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1475 // Return address is in lr.
1476 Label miss;
1477
1478 Register receiver = LoadDescriptor::ReceiverRegister();
1479 Register index = LoadDescriptor::NameRegister();
1480 Register result = x0;
1481 Register scratch = x10;
1482 DCHECK(!scratch.is(receiver) && !scratch.is(index));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001483 DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
1484 result.is(LoadWithVectorDescriptor::SlotRegister()));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001485
1486 // StringCharAtGenerator doesn't use the result register until it's passed
1487 // the different miss possibilities. If it did, we would have a conflict
1488 // when FLAG_vector_ics is true.
1489 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1490 &miss, // When not a string.
1491 &miss, // When not a number.
1492 &miss, // When index out of range.
1493 STRING_INDEX_IS_ARRAY_INDEX,
1494 RECEIVER_IS_STRING);
1495 char_at_generator.GenerateFast(masm);
1496 __ Ret();
1497
1498 StubRuntimeCallHelper call_helper;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001499 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001500
1501 __ Bind(&miss);
1502 PropertyAccessCompiler::TailCallBuiltin(
1503 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1504}
1505
1506
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001507void RegExpExecStub::Generate(MacroAssembler* masm) {
1508#ifdef V8_INTERPRETED_REGEXP
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001509 __ TailCallRuntime(Runtime::kRegExpExec);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001510#else // V8_INTERPRETED_REGEXP
1511
1512 // Stack frame on entry.
1513 // jssp[0]: last_match_info (expected JSArray)
1514 // jssp[8]: previous index
1515 // jssp[16]: subject string
1516 // jssp[24]: JSRegExp object
1517 Label runtime;
1518
1519 // Use of registers for this function.
1520
1521 // Variable registers:
1522 // x10-x13 used as scratch registers
1523 // w0 string_type type of subject string
1524 // x2 jsstring_length subject string length
1525 // x3 jsregexp_object JSRegExp object
1526 // w4 string_encoding Latin1 or UC16
1527 // w5 sliced_string_offset if the string is a SlicedString
1528 // offset to the underlying string
1529 // w6 string_representation groups attributes of the string:
1530 // - is a string
1531 // - type of the string
1532 // - is a short external string
1533 Register string_type = w0;
1534 Register jsstring_length = x2;
1535 Register jsregexp_object = x3;
1536 Register string_encoding = w4;
1537 Register sliced_string_offset = w5;
1538 Register string_representation = w6;
1539
1540 // These are in callee save registers and will be preserved by the call
1541 // to the native RegExp code, as this code is called using the normal
1542 // C calling convention. When calling directly from generated code the
1543 // native RegExp code will not do a GC and therefore the content of
1544 // these registers are safe to use after the call.
1545
1546 // x19 subject subject string
1547 // x20 regexp_data RegExp data (FixedArray)
1548 // x21 last_match_info_elements info relative to the last match
1549 // (FixedArray)
1550 // x22 code_object generated regexp code
1551 Register subject = x19;
1552 Register regexp_data = x20;
1553 Register last_match_info_elements = x21;
1554 Register code_object = x22;
1555
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001556 // Stack frame.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001557 // jssp[00]: last_match_info (JSArray)
1558 // jssp[08]: previous index
1559 // jssp[16]: subject string
1560 // jssp[24]: JSRegExp object
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001561
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001562 const int kLastMatchInfoOffset = 0 * kPointerSize;
1563 const int kPreviousIndexOffset = 1 * kPointerSize;
1564 const int kSubjectOffset = 2 * kPointerSize;
1565 const int kJSRegExpOffset = 3 * kPointerSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001566
1567 // Ensure that a RegExp stack is allocated.
1568 ExternalReference address_of_regexp_stack_memory_address =
1569 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1570 ExternalReference address_of_regexp_stack_memory_size =
1571 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1572 __ Mov(x10, address_of_regexp_stack_memory_size);
1573 __ Ldr(x10, MemOperand(x10));
1574 __ Cbz(x10, &runtime);
1575
1576 // Check that the first argument is a JSRegExp object.
1577 DCHECK(jssp.Is(__ StackPointer()));
1578 __ Peek(jsregexp_object, kJSRegExpOffset);
1579 __ JumpIfSmi(jsregexp_object, &runtime);
1580 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
1581
1582 // Check that the RegExp has been compiled (data contains a fixed array).
1583 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
1584 if (FLAG_debug_code) {
1585 STATIC_ASSERT(kSmiTag == 0);
1586 __ Tst(regexp_data, kSmiTagMask);
1587 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1588 __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
1589 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1590 }
1591
1592 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1593 __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1594 __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
1595 __ B(ne, &runtime);
1596
1597 // Check that the number of captures fit in the static offsets vector buffer.
1598 // We have always at least one capture for the whole match, plus additional
1599 // ones due to capturing parentheses. A capture takes 2 registers.
1600 // The number of capture registers then is (number_of_captures + 1) * 2.
1601 __ Ldrsw(x10,
1602 UntagSmiFieldMemOperand(regexp_data,
1603 JSRegExp::kIrregexpCaptureCountOffset));
1604 // Check (number_of_captures + 1) * 2 <= offsets vector size
1605 // number_of_captures * 2 <= offsets vector size - 2
1606 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1607 __ Add(x10, x10, x10);
1608 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
1609 __ B(hi, &runtime);
1610
1611 // Initialize offset for possibly sliced string.
1612 __ Mov(sliced_string_offset, 0);
1613
1614 DCHECK(jssp.Is(__ StackPointer()));
1615 __ Peek(subject, kSubjectOffset);
1616 __ JumpIfSmi(subject, &runtime);
1617
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001618 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
1619
1620 // Handle subject string according to its encoding and representation:
Ben Murdoch097c5b22016-05-18 11:27:45 +01001621 // (1) Sequential string? If yes, go to (4).
1622 // (2) Sequential or cons? If not, go to (5).
1623 // (3) Cons string. If the string is flat, replace subject with first string
1624 // and go to (1). Otherwise bail out to runtime.
1625 // (4) Sequential string. Load regexp code according to encoding.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001626 // (E) Carry on.
1627 /// [...]
1628
1629 // Deferred code at the end of the stub:
Ben Murdoch097c5b22016-05-18 11:27:45 +01001630 // (5) Long external string? If not, go to (7).
1631 // (6) External string. Make it, offset-wise, look like a sequential string.
1632 // Go to (4).
1633 // (7) Short external string or not a string? If yes, bail out to runtime.
1634 // (8) Sliced string. Replace subject with parent. Go to (1).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001635
Ben Murdoch097c5b22016-05-18 11:27:45 +01001636 Label check_underlying; // (1)
1637 Label seq_string; // (4)
1638 Label not_seq_nor_cons; // (5)
1639 Label external_string; // (6)
1640 Label not_long_external; // (7)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001641
Ben Murdoch097c5b22016-05-18 11:27:45 +01001642 __ Bind(&check_underlying);
1643 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
1644 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
1645
1646 // (1) Sequential string? If yes, go to (4).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001647 __ And(string_representation,
1648 string_type,
1649 kIsNotStringMask |
1650 kStringRepresentationMask |
1651 kShortExternalStringMask);
1652 // We depend on the fact that Strings of type
1653 // SeqString and not ShortExternalString are defined
1654 // by the following pattern:
1655 // string_type: 0XX0 XX00
1656 // ^ ^ ^^
1657 // | | ||
1658 // | | is a SeqString
1659 // | is not a short external String
1660 // is a String
1661 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1662 STATIC_ASSERT(kShortExternalStringTag != 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001663 __ Cbz(string_representation, &seq_string); // Go to (4).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001664
Ben Murdoch097c5b22016-05-18 11:27:45 +01001665 // (2) Sequential or cons? If not, go to (5).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001666 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1667 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1668 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1669 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1670 __ Cmp(string_representation, kExternalStringTag);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001671 __ B(ge, &not_seq_nor_cons); // Go to (5).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001672
1673 // (3) Cons string. Check that it's flat.
1674 __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
1675 __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
1676 // Replace subject with first string.
1677 __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +01001678 __ B(&check_underlying);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001679
Ben Murdoch097c5b22016-05-18 11:27:45 +01001680 // (4) Sequential string. Load regexp code according to encoding.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001681 __ Bind(&seq_string);
1682
1683 // Check that the third argument is a positive smi less than the subject
1684 // string length. A negative value will be greater (unsigned comparison).
1685 DCHECK(jssp.Is(__ StackPointer()));
1686 __ Peek(x10, kPreviousIndexOffset);
1687 __ JumpIfNotSmi(x10, &runtime);
1688 __ Cmp(jsstring_length, x10);
1689 __ B(ls, &runtime);
1690
1691 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
1692 // before entering the exit frame.
1693 __ SmiUntag(x1, x10);
1694
1695 // The third bit determines the string encoding in string_type.
1696 STATIC_ASSERT(kOneByteStringTag == 0x04);
1697 STATIC_ASSERT(kTwoByteStringTag == 0x00);
1698 STATIC_ASSERT(kStringEncodingMask == 0x04);
1699
1700 // Find the code object based on the assumptions above.
1701 // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
1702 // of kPointerSize to reach the latter.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001703 STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
1704 JSRegExp::kDataUC16CodeOffset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001705 __ Mov(x10, kPointerSize);
1706 // We will need the encoding later: Latin1 = 0x04
1707 // UC16 = 0x00
1708 __ Ands(string_encoding, string_type, kStringEncodingMask);
1709 __ CzeroX(x10, ne);
1710 __ Add(x10, regexp_data, x10);
1711 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
1712
1713 // (E) Carry on. String handling is done.
1714
1715 // Check that the irregexp code has been generated for the actual string
1716 // encoding. If it has, the field contains a code object otherwise it contains
1717 // a smi (code flushing support).
1718 __ JumpIfSmi(code_object, &runtime);
1719
1720 // All checks done. Now push arguments for native regexp code.
1721 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
1722 x10,
1723 x11);
1724
1725 // Isolates: note we add an additional parameter here (isolate pointer).
1726 __ EnterExitFrame(false, x10, 1);
1727 DCHECK(csp.Is(__ StackPointer()));
1728
1729 // We have 9 arguments to pass to the regexp code, therefore we have to pass
1730 // one on the stack and the rest as registers.
1731
1732 // Note that the placement of the argument on the stack isn't standard
1733 // AAPCS64:
1734 // csp[0]: Space for the return address placed by DirectCEntryStub.
1735 // csp[8]: Argument 9, the current isolate address.
1736
1737 __ Mov(x10, ExternalReference::isolate_address(isolate()));
1738 __ Poke(x10, kPointerSize);
1739
1740 Register length = w11;
1741 Register previous_index_in_bytes = w12;
1742 Register start = x13;
1743
1744 // Load start of the subject string.
1745 __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
1746 // Load the length from the original subject string from the previous stack
1747 // frame. Therefore we have to use fp, which points exactly to two pointer
1748 // sizes below the previous sp. (Because creating a new stack frame pushes
1749 // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
1750 __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1751 __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
1752
1753 // Handle UC16 encoding, two bytes make one character.
1754 // string_encoding: if Latin1: 0x04
1755 // if UC16: 0x00
1756 STATIC_ASSERT(kStringEncodingMask == 0x04);
1757 __ Ubfx(string_encoding, string_encoding, 2, 1);
1758 __ Eor(string_encoding, string_encoding, 1);
1759 // string_encoding: if Latin1: 0
1760 // if UC16: 1
1761
1762 // Convert string positions from characters to bytes.
1763 // Previous index is in x1.
1764 __ Lsl(previous_index_in_bytes, w1, string_encoding);
1765 __ Lsl(length, length, string_encoding);
1766 __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
1767
1768 // Argument 1 (x0): Subject string.
1769 __ Mov(x0, subject);
1770
1771 // Argument 2 (x1): Previous index, already there.
1772
1773 // Argument 3 (x2): Get the start of input.
1774 // Start of input = start of string + previous index + substring offset
1775 // (0 if the string
1776 // is not sliced).
1777 __ Add(w10, previous_index_in_bytes, sliced_string_offset);
1778 __ Add(x2, start, Operand(w10, UXTW));
1779
1780 // Argument 4 (x3):
1781 // End of input = start of input + (length of input - previous index)
1782 __ Sub(w10, length, previous_index_in_bytes);
1783 __ Add(x3, x2, Operand(w10, UXTW));
1784
1785 // Argument 5 (x4): static offsets vector buffer.
1786 __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
1787
1788 // Argument 6 (x5): Set the number of capture registers to zero to force
1789 // global regexps to behave as non-global. This stub is not used for global
1790 // regexps.
1791 __ Mov(x5, 0);
1792
1793 // Argument 7 (x6): Start (high end) of backtracking stack memory area.
1794 __ Mov(x10, address_of_regexp_stack_memory_address);
1795 __ Ldr(x10, MemOperand(x10));
1796 __ Mov(x11, address_of_regexp_stack_memory_size);
1797 __ Ldr(x11, MemOperand(x11));
1798 __ Add(x6, x10, x11);
1799
1800 // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
1801 __ Mov(x7, 1);
1802
1803 // Locate the code entry and call it.
1804 __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
1805 DirectCEntryStub stub(isolate());
1806 stub.GenerateCall(masm, code_object);
1807
1808 __ LeaveExitFrame(false, x10, true);
1809
1810 // The generated regexp code returns an int32 in w0.
1811 Label failure, exception;
1812 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
1813 __ CompareAndBranch(w0,
1814 NativeRegExpMacroAssembler::EXCEPTION,
1815 eq,
1816 &exception);
1817 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
1818
1819 // Success: process the result from the native regexp code.
1820 Register number_of_capture_registers = x12;
1821
1822 // Calculate number of capture registers (number_of_captures + 1) * 2
1823 // and store it in the last match info.
1824 __ Ldrsw(x10,
1825 UntagSmiFieldMemOperand(regexp_data,
1826 JSRegExp::kIrregexpCaptureCountOffset));
1827 __ Add(x10, x10, x10);
1828 __ Add(number_of_capture_registers, x10, 2);
1829
1830 // Check that the fourth object is a JSArray object.
1831 DCHECK(jssp.Is(__ StackPointer()));
1832 __ Peek(x10, kLastMatchInfoOffset);
1833 __ JumpIfSmi(x10, &runtime);
1834 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
1835
1836 // Check that the JSArray is the fast case.
1837 __ Ldr(last_match_info_elements,
1838 FieldMemOperand(x10, JSArray::kElementsOffset));
1839 __ Ldr(x10,
1840 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1841 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
1842
1843 // Check that the last match info has space for the capture registers and the
1844 // additional information (overhead).
1845 // (number_of_captures + 1) * 2 + overhead <= last match info size
1846 // (number_of_captures * 2) + 2 + overhead <= last match info size
1847 // number_of_capture_registers + overhead <= last match info size
1848 __ Ldrsw(x10,
1849 UntagSmiFieldMemOperand(last_match_info_elements,
1850 FixedArray::kLengthOffset));
1851 __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
1852 __ Cmp(x11, x10);
1853 __ B(gt, &runtime);
1854
1855 // Store the capture count.
1856 __ SmiTag(x10, number_of_capture_registers);
1857 __ Str(x10,
1858 FieldMemOperand(last_match_info_elements,
1859 RegExpImpl::kLastCaptureCountOffset));
1860 // Store last subject and last input.
1861 __ Str(subject,
1862 FieldMemOperand(last_match_info_elements,
1863 RegExpImpl::kLastSubjectOffset));
1864 // Use x10 as the subject string in order to only need
1865 // one RecordWriteStub.
1866 __ Mov(x10, subject);
1867 __ RecordWriteField(last_match_info_elements,
1868 RegExpImpl::kLastSubjectOffset,
1869 x10,
1870 x11,
1871 kLRHasNotBeenSaved,
1872 kDontSaveFPRegs);
1873 __ Str(subject,
1874 FieldMemOperand(last_match_info_elements,
1875 RegExpImpl::kLastInputOffset));
1876 __ Mov(x10, subject);
1877 __ RecordWriteField(last_match_info_elements,
1878 RegExpImpl::kLastInputOffset,
1879 x10,
1880 x11,
1881 kLRHasNotBeenSaved,
1882 kDontSaveFPRegs);
1883
1884 Register last_match_offsets = x13;
1885 Register offsets_vector_index = x14;
1886 Register current_offset = x15;
1887
1888 // Get the static offsets vector filled by the native regexp code
1889 // and fill the last match info.
1890 ExternalReference address_of_static_offsets_vector =
1891 ExternalReference::address_of_static_offsets_vector(isolate());
1892 __ Mov(offsets_vector_index, address_of_static_offsets_vector);
1893
1894 Label next_capture, done;
1895 // Capture register counter starts from number of capture registers and
1896 // iterates down to zero (inclusive).
1897 __ Add(last_match_offsets,
1898 last_match_info_elements,
1899 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
1900 __ Bind(&next_capture);
1901 __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
1902 __ B(mi, &done);
1903 // Read two 32 bit values from the static offsets vector buffer into
1904 // an X register
1905 __ Ldr(current_offset,
1906 MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
1907 // Store the smi values in the last match info.
1908 __ SmiTag(x10, current_offset);
1909 // Clearing the 32 bottom bits gives us a Smi.
1910 STATIC_ASSERT(kSmiTag == 0);
1911 __ Bic(x11, current_offset, kSmiShiftMask);
1912 __ Stp(x10,
1913 x11,
1914 MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
1915 __ B(&next_capture);
1916 __ Bind(&done);
1917
1918 // Return last match info.
1919 __ Peek(x0, kLastMatchInfoOffset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001920 // Drop the 4 arguments of the stub from the stack.
1921 __ Drop(4);
1922 __ Ret();
1923
1924 __ Bind(&exception);
1925 Register exception_value = x0;
1926 // A stack overflow (on the backtrack stack) may have occured
1927 // in the RegExp code but no exception has been created yet.
1928 // If there is no pending exception, handle that in the runtime system.
1929 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1930 __ Mov(x11,
1931 Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1932 isolate())));
1933 __ Ldr(exception_value, MemOperand(x11));
1934 __ Cmp(x10, exception_value);
1935 __ B(eq, &runtime);
1936
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001937 // For exception, throw the exception again.
1938 __ TailCallRuntime(Runtime::kRegExpExecReThrow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001939
1940 __ Bind(&failure);
1941 __ Mov(x0, Operand(isolate()->factory()->null_value()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001942 // Drop the 4 arguments of the stub from the stack.
1943 __ Drop(4);
1944 __ Ret();
1945
1946 __ Bind(&runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001947 __ TailCallRuntime(Runtime::kRegExpExec);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001948
1949 // Deferred code for string handling.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001950 // (5) Long external string? If not, go to (7).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001951 __ Bind(&not_seq_nor_cons);
1952 // Compare flags are still set.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001953 __ B(ne, &not_long_external); // Go to (7).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001954
Ben Murdoch097c5b22016-05-18 11:27:45 +01001955 // (6) External string. Make it, offset-wise, look like a sequential string.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001956 __ Bind(&external_string);
1957 if (masm->emit_debug_code()) {
1958 // Assert that we do not have a cons or slice (indirect strings) here.
1959 // Sequential strings have already been ruled out.
1960 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
1961 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
1962 __ Tst(x10, kIsIndirectStringMask);
1963 __ Check(eq, kExternalStringExpectedButNotFound);
1964 __ And(x10, x10, kStringRepresentationMask);
1965 __ Cmp(x10, 0);
1966 __ Check(ne, kExternalStringExpectedButNotFound);
1967 }
1968 __ Ldr(subject,
1969 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1970 // Move the pointer so that offset-wise, it looks like a sequential string.
1971 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1972 __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001973 __ B(&seq_string); // Go to (4).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001974
Ben Murdoch097c5b22016-05-18 11:27:45 +01001975 // (7) If this is a short external string or not a string, bail out to
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001976 // runtime.
1977 __ Bind(&not_long_external);
1978 STATIC_ASSERT(kShortExternalStringTag != 0);
1979 __ TestAndBranchIfAnySet(string_representation,
1980 kShortExternalStringMask | kIsNotStringMask,
1981 &runtime);
1982
Ben Murdoch097c5b22016-05-18 11:27:45 +01001983 // (8) Sliced string. Replace subject with parent.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001984 __ Ldr(sliced_string_offset,
1985 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
1986 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +01001987 __ B(&check_underlying); // Go to (1).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001988#endif
1989}
1990
1991
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001992static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
1993 Register argc, Register function,
1994 Register feedback_vector, Register index,
1995 Register new_target) {
1996 FrameScope scope(masm, StackFrame::INTERNAL);
1997
1998 // Number-of-arguments register must be smi-tagged to call out.
1999 __ SmiTag(argc);
2000 __ Push(argc, function, feedback_vector, index);
2001
2002 DCHECK(feedback_vector.Is(x2) && index.Is(x3));
2003 __ CallStub(stub);
2004
2005 __ Pop(index, feedback_vector, function, argc);
2006 __ SmiUntag(argc);
2007}
2008
2009
2010static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002011 Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002012 Register feedback_vector, Register index,
2013 Register new_target, Register scratch1,
2014 Register scratch2, Register scratch3) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002015 ASM_LOCATION("GenerateRecordCallTarget");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002016 DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
2017 feedback_vector, index, new_target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002018 // Cache the called function in a feedback vector slot. Cache states are
2019 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
2020 // argc : number of arguments to the construct function
2021 // function : the function to call
2022 // feedback_vector : the feedback vector
2023 // index : slot in feedback vector (smi)
2024 Label initialize, done, miss, megamorphic, not_array_function;
2025
2026 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2027 masm->isolate()->heap()->megamorphic_symbol());
2028 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2029 masm->isolate()->heap()->uninitialized_symbol());
2030
2031 // Load the cache state.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002032 Register feedback = scratch1;
2033 Register feedback_map = scratch2;
2034 Register feedback_value = scratch3;
2035 __ Add(feedback, feedback_vector,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002036 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002037 __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002038
2039 // A monomorphic cache hit or an already megamorphic state: invoke the
2040 // function without changing the state.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002041 // We don't know if feedback value is a WeakCell or a Symbol, but it's
2042 // harmless to read at this position in a symbol (see static asserts in
2043 // type-feedback-vector.h).
2044 Label check_allocation_site;
2045 __ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
2046 __ Cmp(function, feedback_value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002047 __ B(eq, &done);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002048 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
2049 __ B(eq, &done);
2050 __ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
2051 __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
2052 __ B(ne, &check_allocation_site);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002053
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002054 // If the weak cell is cleared, we have a new chance to become monomorphic.
2055 __ JumpIfSmi(feedback_value, &initialize);
2056 __ B(&megamorphic);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002057
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002058 __ bind(&check_allocation_site);
2059 // If we came here, we need to see if we are the array function.
2060 // If we didn't have a matching function, and we didn't find the megamorph
2061 // sentinel, then we have in the slot either some other function or an
2062 // AllocationSite.
2063 __ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
2064
2065 // Make sure the function is the Array() function
2066 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
2067 __ Cmp(function, scratch1);
2068 __ B(ne, &megamorphic);
2069 __ B(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002070
2071 __ Bind(&miss);
2072
2073 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2074 // megamorphic.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002075 __ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002076 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2077 // write-barrier is needed.
2078 __ Bind(&megamorphic);
2079 __ Add(scratch1, feedback_vector,
2080 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002081 __ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002082 __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2083 __ B(&done);
2084
2085 // An uninitialized cache is patched with the function or sentinel to
2086 // indicate the ElementsKind if function is the Array constructor.
2087 __ Bind(&initialize);
2088
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002089 // Make sure the function is the Array() function
2090 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
2091 __ Cmp(function, scratch1);
2092 __ B(ne, &not_array_function);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002093
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002094 // The target function is the Array constructor,
2095 // Create an AllocationSite if we don't already have it, store it in the
2096 // slot.
2097 CreateAllocationSiteStub create_stub(masm->isolate());
2098 CallStubInRecordCallTarget(masm, &create_stub, argc, function,
2099 feedback_vector, index, new_target);
2100 __ B(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002101
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002102 __ Bind(&not_array_function);
2103 CreateWeakCellStub weak_cell_stub(masm->isolate());
2104 CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
2105 feedback_vector, index, new_target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002106 __ Bind(&done);
2107}
2108
2109
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002110void CallConstructStub::Generate(MacroAssembler* masm) {
2111 ASM_LOCATION("CallConstructStub::Generate");
2112 // x0 : number of arguments
2113 // x1 : the function to call
2114 // x2 : feedback vector
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002115 // x3 : slot in feedback vector (Smi, for RecordCallTarget)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002116 Register function = x1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002117
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002118 Label non_function;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002119 // Check that the function is not a smi.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002120 __ JumpIfSmi(function, &non_function);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002121 // Check that the function is a JSFunction.
2122 Register object_type = x10;
2123 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002124 &non_function);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002125
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002126 GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002127
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002128 __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
2129 Label feedback_register_initialized;
2130 // Put the AllocationSite from the feedback vector into x2, or undefined.
2131 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
2132 __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
2133 __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
2134 &feedback_register_initialized);
2135 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
2136 __ bind(&feedback_register_initialized);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002137
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002138 __ AssertUndefinedOrAllocationSite(x2, x5);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002139
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002140 __ Mov(x3, function);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002141
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002142 // Tail call to the function-specific construct stub (still in the caller
2143 // context at this point).
2144 __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2145 __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
2146 __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
2147 __ Br(x4);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002148
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002149 __ Bind(&non_function);
2150 __ Mov(x3, function);
2151 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002152}
2153
2154
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002155void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002156 // x1 - function
2157 // x3 - slot id
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002158 // x2 - vector
2159 // x4 - allocation site (loaded from vector[slot])
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002160 Register function = x1;
2161 Register feedback_vector = x2;
2162 Register index = x3;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002163 Register allocation_site = x4;
2164 Register scratch = x5;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002165
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002166 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002167 __ Cmp(function, scratch);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002168 __ B(ne, miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002169
2170 __ Mov(x0, Operand(arg_count()));
2171
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002172 // Increment the call count for monomorphic function calls.
2173 __ Add(feedback_vector, feedback_vector,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002174 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002175 __ Add(feedback_vector, feedback_vector,
2176 Operand(FixedArray::kHeaderSize + kPointerSize));
2177 __ Ldr(index, FieldMemOperand(feedback_vector, 0));
2178 __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2179 __ Str(index, FieldMemOperand(feedback_vector, 0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002180
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002181 // Set up arguments for the array constructor stub.
2182 Register allocation_site_arg = feedback_vector;
2183 Register new_target_arg = index;
2184 __ Mov(allocation_site_arg, allocation_site);
2185 __ Mov(new_target_arg, function);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002186 ArrayConstructorStub stub(masm->isolate(), arg_count());
2187 __ TailCallStub(&stub);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002188}
2189
2190
2191void CallICStub::Generate(MacroAssembler* masm) {
2192 ASM_LOCATION("CallICStub");
2193
2194 // x1 - function
2195 // x3 - slot id (Smi)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002196 // x2 - vector
2197 Label extra_checks_or_miss, call, call_function;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002198 int argc = arg_count();
2199 ParameterCount actual(argc);
2200
2201 Register function = x1;
2202 Register feedback_vector = x2;
2203 Register index = x3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002204
2205 // The checks. First, does x1 match the recorded monomorphic target?
2206 __ Add(x4, feedback_vector,
2207 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2208 __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
2209
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002210 // We don't know that we have a weak cell. We might have a private symbol
2211 // or an AllocationSite, but the memory is safe to examine.
2212 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2213 // FixedArray.
2214 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2215 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2216 // computed, meaning that it can't appear to be a pointer. If the low bit is
2217 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2218 // to be a pointer.
2219 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2220 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2221 WeakCell::kValueOffset &&
2222 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2223
2224 __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
2225 __ Cmp(x5, function);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002226 __ B(ne, &extra_checks_or_miss);
2227
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002228 // The compare above could have been a SMI/SMI comparison. Guard against this
2229 // convincing us that we have a monomorphic JSFunction.
2230 __ JumpIfSmi(function, &extra_checks_or_miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002231
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002232 // Increment the call count for monomorphic function calls.
2233 __ Add(feedback_vector, feedback_vector,
2234 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2235 __ Add(feedback_vector, feedback_vector,
2236 Operand(FixedArray::kHeaderSize + kPointerSize));
2237 __ Ldr(index, FieldMemOperand(feedback_vector, 0));
2238 __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2239 __ Str(index, FieldMemOperand(feedback_vector, 0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002240
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002241 __ Bind(&call_function);
2242 __ Mov(x0, argc);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002243 __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
2244 tail_call_mode()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002245 RelocInfo::CODE_TARGET);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002246
2247 __ bind(&extra_checks_or_miss);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002248 Label uninitialized, miss, not_allocation_site;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002249
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002250 __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
2251
2252 __ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
2253 __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &not_allocation_site);
2254
2255 HandleArrayCase(masm, &miss);
2256
2257 __ bind(&not_allocation_site);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002258
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002259 // The following cases attempt to handle MISS cases without going to the
2260 // runtime.
2261 if (FLAG_trace_ic) {
2262 __ jmp(&miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002263 }
2264
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002265 __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
2266
2267 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2268 // to handle it here. More complex cases are dealt with in the runtime.
2269 __ AssertNotSmi(x4);
2270 __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
2271 __ Add(x4, feedback_vector,
2272 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2273 __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
2274 __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002275
2276 __ Bind(&call);
2277 __ Mov(x0, argc);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002278 __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002279 RelocInfo::CODE_TARGET);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002280
2281 __ bind(&uninitialized);
2282
2283 // We are going monomorphic, provided we actually have a JSFunction.
2284 __ JumpIfSmi(function, &miss);
2285
2286 // Goto miss case if we do not have a function.
2287 __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
2288
2289 // Make sure the function is not the Array() function, which requires special
2290 // behavior on MISS.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002291 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002292 __ Cmp(function, x5);
2293 __ B(eq, &miss);
2294
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002295 // Make sure the function belongs to the same native context.
2296 __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
2297 __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
2298 __ Ldr(x5, NativeContextMemOperand());
2299 __ Cmp(x4, x5);
2300 __ B(ne, &miss);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002301
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002302 // Initialize the call counter.
2303 __ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
2304 __ Adds(x4, feedback_vector,
2305 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2306 __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002307
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002308 // Store the function. Use a stub since we need a frame for allocation.
2309 // x2 - vector
2310 // x3 - slot
2311 // x1 - function
2312 {
2313 FrameScope scope(masm, StackFrame::INTERNAL);
2314 CreateWeakCellStub create_stub(masm->isolate());
2315 __ Push(function);
2316 __ CallStub(&create_stub);
2317 __ Pop(function);
2318 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002319
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002320 __ B(&call_function);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002321
2322 // We are here because tracing is on or we encountered a MISS case we can't
2323 // handle here.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002324 __ bind(&miss);
2325 GenerateMiss(masm);
2326
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002327 __ B(&call);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002328}
2329
2330
2331void CallICStub::GenerateMiss(MacroAssembler* masm) {
2332 ASM_LOCATION("CallICStub[Miss]");
2333
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002334 FrameScope scope(masm, StackFrame::INTERNAL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002335
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002336 // Push the receiver and the function and feedback info.
2337 __ Push(x1, x2, x3);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002338
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002339 // Call the entry.
2340 __ CallRuntime(Runtime::kCallIC_Miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002341
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002342 // Move result to edi and exit the internal frame.
2343 __ Mov(x1, x0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002344}
2345
2346
2347void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2348 // If the receiver is a smi trigger the non-string case.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002349 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2350 __ JumpIfSmi(object_, receiver_not_string_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002351
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002352 // Fetch the instance type of the receiver into result register.
2353 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2354 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002355
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002356 // If the receiver is not a string trigger the non-string case.
2357 __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
2358 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002359
2360 // If the index is non-smi trigger the non-smi case.
2361 __ JumpIfNotSmi(index_, &index_not_smi_);
2362
2363 __ Bind(&got_smi_index_);
2364 // Check for index out of range.
2365 __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
2366 __ Cmp(result_, Operand::UntagSmi(index_));
2367 __ B(ls, index_out_of_range_);
2368
2369 __ SmiUntag(index_);
2370
2371 StringCharLoadGenerator::Generate(masm,
2372 object_,
2373 index_.W(),
2374 result_,
2375 &call_runtime_);
2376 __ SmiTag(result_);
2377 __ Bind(&exit_);
2378}
2379
2380
2381void StringCharCodeAtGenerator::GenerateSlow(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002382 MacroAssembler* masm, EmbedMode embed_mode,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002383 const RuntimeCallHelper& call_helper) {
2384 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2385
2386 __ Bind(&index_not_smi_);
2387 // If index is a heap number, try converting it to an integer.
2388 __ JumpIfNotHeapNumber(index_, index_not_number_);
2389 call_helper.BeforeCall(masm);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002390 if (embed_mode == PART_OF_IC_HANDLER) {
2391 __ Push(LoadWithVectorDescriptor::VectorRegister(),
2392 LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2393 } else {
2394 // Save object_ on the stack and pass index_ as argument for runtime call.
2395 __ Push(object_, index_);
2396 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002397 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002398 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002399 } else {
2400 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2401 // NumberToSmi discards numbers that are not exact integers.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002402 __ CallRuntime(Runtime::kNumberToSmi);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002403 }
2404 // Save the conversion result before the pop instructions below
2405 // have a chance to overwrite it.
2406 __ Mov(index_, x0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002407 if (embed_mode == PART_OF_IC_HANDLER) {
2408 __ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
2409 LoadWithVectorDescriptor::VectorRegister());
2410 } else {
2411 __ Pop(object_);
2412 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002413 // Reload the instance type.
2414 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2415 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2416 call_helper.AfterCall(masm);
2417
2418 // If index is still not a smi, it must be out of range.
2419 __ JumpIfNotSmi(index_, index_out_of_range_);
2420 // Otherwise, return to the fast path.
2421 __ B(&got_smi_index_);
2422
2423 // Call runtime. We get here when the receiver is a string and the
2424 // index is a number, but the code of getting the actual character
2425 // is too complex (e.g., when the string needs to be flattened).
2426 __ Bind(&call_runtime_);
2427 call_helper.BeforeCall(masm);
2428 __ SmiTag(index_);
2429 __ Push(object_, index_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002430 __ CallRuntime(Runtime::kStringCharCodeAtRT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002431 __ Mov(result_, x0);
2432 call_helper.AfterCall(masm);
2433 __ B(&exit_);
2434
2435 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2436}
2437
2438
2439void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2440 __ JumpIfNotSmi(code_, &slow_case_);
2441 __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
2442 __ B(hi, &slow_case_);
2443
2444 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2445 // At this point code register contains smi tagged one-byte char code.
2446 __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
2447 __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2448 __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
2449 __ Bind(&exit_);
2450}
2451
2452
2453void StringCharFromCodeGenerator::GenerateSlow(
2454 MacroAssembler* masm,
2455 const RuntimeCallHelper& call_helper) {
2456 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2457
2458 __ Bind(&slow_case_);
2459 call_helper.BeforeCall(masm);
2460 __ Push(code_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002461 __ CallRuntime(Runtime::kStringCharFromCode);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002462 __ Mov(result_, x0);
2463 call_helper.AfterCall(masm);
2464 __ B(&exit_);
2465
2466 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2467}
2468
2469
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002470void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2471 // Inputs are in x0 (lhs) and x1 (rhs).
2472 DCHECK_EQ(CompareICState::BOOLEAN, state());
2473 ASM_LOCATION("CompareICStub[Booleans]");
2474 Label miss;
2475
2476 __ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2477 __ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002478 if (!Token::IsEqualityOp(op())) {
2479 __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
2480 __ AssertSmi(x1);
2481 __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
2482 __ AssertSmi(x0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002483 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01002484 __ Sub(x0, x1, x0);
2485 __ Ret();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002486
2487 __ Bind(&miss);
2488 GenerateMiss(masm);
2489}
2490
2491
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002492void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2493 // Inputs are in x0 (lhs) and x1 (rhs).
2494 DCHECK(state() == CompareICState::SMI);
2495 ASM_LOCATION("CompareICStub[Smis]");
2496 Label miss;
2497 // Bail out (to 'miss') unless both x0 and x1 are smis.
2498 __ JumpIfEitherNotSmi(x0, x1, &miss);
2499
2500 if (GetCondition() == eq) {
2501 // For equality we do not care about the sign of the result.
2502 __ Sub(x0, x0, x1);
2503 } else {
2504 // Untag before subtracting to avoid handling overflow.
2505 __ SmiUntag(x1);
2506 __ Sub(x0, x1, Operand::UntagSmi(x0));
2507 }
2508 __ Ret();
2509
2510 __ Bind(&miss);
2511 GenerateMiss(masm);
2512}
2513
2514
2515void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2516 DCHECK(state() == CompareICState::NUMBER);
2517 ASM_LOCATION("CompareICStub[HeapNumbers]");
2518
2519 Label unordered, maybe_undefined1, maybe_undefined2;
2520 Label miss, handle_lhs, values_in_d_regs;
2521 Label untag_rhs, untag_lhs;
2522
2523 Register result = x0;
2524 Register rhs = x0;
2525 Register lhs = x1;
2526 FPRegister rhs_d = d0;
2527 FPRegister lhs_d = d1;
2528
2529 if (left() == CompareICState::SMI) {
2530 __ JumpIfNotSmi(lhs, &miss);
2531 }
2532 if (right() == CompareICState::SMI) {
2533 __ JumpIfNotSmi(rhs, &miss);
2534 }
2535
2536 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
2537 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
2538
2539 // Load rhs if it's a heap number.
2540 __ JumpIfSmi(rhs, &handle_lhs);
2541 __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
2542 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
2543
2544 // Load lhs if it's a heap number.
2545 __ Bind(&handle_lhs);
2546 __ JumpIfSmi(lhs, &values_in_d_regs);
2547 __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
2548 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
2549
2550 __ Bind(&values_in_d_regs);
2551 __ Fcmp(lhs_d, rhs_d);
2552 __ B(vs, &unordered); // Overflow flag set if either is NaN.
2553 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
2554 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
2555 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
2556 __ Ret();
2557
2558 __ Bind(&unordered);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002559 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002560 CompareICState::GENERIC, CompareICState::GENERIC);
2561 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2562
2563 __ Bind(&maybe_undefined1);
2564 if (Token::IsOrderedRelationalCompareOp(op())) {
2565 __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
2566 __ JumpIfSmi(lhs, &unordered);
2567 __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
2568 __ B(&unordered);
2569 }
2570
2571 __ Bind(&maybe_undefined2);
2572 if (Token::IsOrderedRelationalCompareOp(op())) {
2573 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
2574 }
2575
2576 __ Bind(&miss);
2577 GenerateMiss(masm);
2578}
2579
2580
2581void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2582 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2583 ASM_LOCATION("CompareICStub[InternalizedStrings]");
2584 Label miss;
2585
2586 Register result = x0;
2587 Register rhs = x0;
2588 Register lhs = x1;
2589
2590 // Check that both operands are heap objects.
2591 __ JumpIfEitherSmi(lhs, rhs, &miss);
2592
2593 // Check that both operands are internalized strings.
2594 Register rhs_map = x10;
2595 Register lhs_map = x11;
2596 Register rhs_type = x10;
2597 Register lhs_type = x11;
2598 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
2599 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
2600 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
2601 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
2602
2603 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2604 __ Orr(x12, lhs_type, rhs_type);
2605 __ TestAndBranchIfAnySet(
2606 x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
2607
2608 // Internalized strings are compared by identity.
2609 STATIC_ASSERT(EQUAL == 0);
2610 __ Cmp(lhs, rhs);
2611 __ Cset(result, ne);
2612 __ Ret();
2613
2614 __ Bind(&miss);
2615 GenerateMiss(masm);
2616}
2617
2618
2619void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2620 DCHECK(state() == CompareICState::UNIQUE_NAME);
2621 ASM_LOCATION("CompareICStub[UniqueNames]");
2622 DCHECK(GetCondition() == eq);
2623 Label miss;
2624
2625 Register result = x0;
2626 Register rhs = x0;
2627 Register lhs = x1;
2628
2629 Register lhs_instance_type = w2;
2630 Register rhs_instance_type = w3;
2631
2632 // Check that both operands are heap objects.
2633 __ JumpIfEitherSmi(lhs, rhs, &miss);
2634
2635 // Check that both operands are unique names. This leaves the instance
2636 // types loaded in tmp1 and tmp2.
2637 __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
2638 __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
2639 __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2640 __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
2641
2642 // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
2643 // should have kInternalizedTag set.
2644 __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
2645 __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
2646
2647 // Unique names are compared by identity.
2648 STATIC_ASSERT(EQUAL == 0);
2649 __ Cmp(lhs, rhs);
2650 __ Cset(result, ne);
2651 __ Ret();
2652
2653 __ Bind(&miss);
2654 GenerateMiss(masm);
2655}
2656
2657
2658void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2659 DCHECK(state() == CompareICState::STRING);
2660 ASM_LOCATION("CompareICStub[Strings]");
2661
2662 Label miss;
2663
2664 bool equality = Token::IsEqualityOp(op());
2665
2666 Register result = x0;
2667 Register rhs = x0;
2668 Register lhs = x1;
2669
2670 // Check that both operands are heap objects.
2671 __ JumpIfEitherSmi(rhs, lhs, &miss);
2672
2673 // Check that both operands are strings.
2674 Register rhs_map = x10;
2675 Register lhs_map = x11;
2676 Register rhs_type = x10;
2677 Register lhs_type = x11;
2678 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
2679 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
2680 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
2681 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
2682 STATIC_ASSERT(kNotStringTag != 0);
2683 __ Orr(x12, lhs_type, rhs_type);
2684 __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
2685
2686 // Fast check for identical strings.
2687 Label not_equal;
2688 __ Cmp(lhs, rhs);
2689 __ B(ne, &not_equal);
2690 __ Mov(result, EQUAL);
2691 __ Ret();
2692
2693 __ Bind(&not_equal);
2694 // Handle not identical strings
2695
2696 // Check that both strings are internalized strings. If they are, we're done
2697 // because we already know they are not identical. We know they are both
2698 // strings.
2699 if (equality) {
2700 DCHECK(GetCondition() == eq);
2701 STATIC_ASSERT(kInternalizedTag == 0);
2702 Label not_internalized_strings;
2703 __ Orr(x12, lhs_type, rhs_type);
2704 __ TestAndBranchIfAnySet(
2705 x12, kIsNotInternalizedMask, &not_internalized_strings);
2706 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
2707 __ Ret();
2708 __ Bind(&not_internalized_strings);
2709 }
2710
2711 // Check that both strings are sequential one-byte.
2712 Label runtime;
2713 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
2714 x13, &runtime);
2715
2716 // Compare flat one-byte strings. Returns when done.
2717 if (equality) {
2718 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
2719 x12);
2720 } else {
2721 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
2722 x12, x13);
2723 }
2724
2725 // Handle more complex cases in runtime.
2726 __ Bind(&runtime);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002727 if (equality) {
Ben Murdochda12d292016-06-02 14:46:10 +01002728 {
2729 FrameScope scope(masm, StackFrame::INTERNAL);
2730 __ Push(lhs, rhs);
2731 __ CallRuntime(Runtime::kStringEqual);
2732 }
2733 __ LoadRoot(x1, Heap::kTrueValueRootIndex);
2734 __ Sub(x0, x0, x1);
2735 __ Ret();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002736 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002737 __ Push(lhs, rhs);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002738 __ TailCallRuntime(Runtime::kStringCompare);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002739 }
2740
2741 __ Bind(&miss);
2742 GenerateMiss(masm);
2743}
2744
2745
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002746void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2747 DCHECK_EQ(CompareICState::RECEIVER, state());
2748 ASM_LOCATION("CompareICStub[Receivers]");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002749
2750 Label miss;
2751
2752 Register result = x0;
2753 Register rhs = x0;
2754 Register lhs = x1;
2755
2756 __ JumpIfEitherSmi(rhs, lhs, &miss);
2757
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002758 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2759 __ JumpIfObjectType(rhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
2760 __ JumpIfObjectType(lhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002761
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002762 DCHECK_EQ(eq, GetCondition());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002763 __ Sub(result, rhs, lhs);
2764 __ Ret();
2765
2766 __ Bind(&miss);
2767 GenerateMiss(masm);
2768}
2769
2770
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002771void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2772 ASM_LOCATION("CompareICStub[KnownReceivers]");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002773
2774 Label miss;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002775 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002776
2777 Register result = x0;
2778 Register rhs = x0;
2779 Register lhs = x1;
2780
2781 __ JumpIfEitherSmi(rhs, lhs, &miss);
2782
2783 Register rhs_map = x10;
2784 Register lhs_map = x11;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002785 Register map = x12;
2786 __ GetWeakValue(map, cell);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002787 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
2788 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002789 __ Cmp(rhs_map, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002790 __ B(ne, &miss);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002791 __ Cmp(lhs_map, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002792 __ B(ne, &miss);
2793
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002794 if (Token::IsEqualityOp(op())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002795 __ Sub(result, rhs, lhs);
2796 __ Ret();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002797 } else {
2798 Register ncr = x2;
2799 if (op() == Token::LT || op() == Token::LTE) {
2800 __ Mov(ncr, Smi::FromInt(GREATER));
2801 } else {
2802 __ Mov(ncr, Smi::FromInt(LESS));
2803 }
2804 __ Push(lhs, rhs, ncr);
2805 __ TailCallRuntime(Runtime::kCompare);
2806 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002807
2808 __ Bind(&miss);
2809 GenerateMiss(masm);
2810}
2811
2812
2813// This method handles the case where a compare stub had the wrong
2814// implementation. It calls a miss handler, which re-writes the stub. All other
2815// CompareICStub::Generate* methods should fall back into this one if their
2816// operands were not the expected types.
2817void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2818 ASM_LOCATION("CompareICStub[Miss]");
2819
2820 Register stub_entry = x11;
2821 {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002822 FrameScope scope(masm, StackFrame::INTERNAL);
2823 Register op = x10;
2824 Register left = x1;
2825 Register right = x0;
2826 // Preserve some caller-saved registers.
2827 __ Push(x1, x0, lr);
2828 // Push the arguments.
2829 __ Mov(op, Smi::FromInt(this->op()));
2830 __ Push(left, right, op);
2831
2832 // Call the miss handler. This also pops the arguments.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002833 __ CallRuntime(Runtime::kCompareIC_Miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002834
2835 // Compute the entry point of the rewritten stub.
2836 __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
2837 // Restore caller-saved registers.
2838 __ Pop(lr, x0, x1);
2839 }
2840
2841 // Tail-call to the new stub.
2842 __ Jump(stub_entry);
2843}
2844
2845
2846void SubStringStub::Generate(MacroAssembler* masm) {
2847 ASM_LOCATION("SubStringStub::Generate");
2848 Label runtime;
2849
2850 // Stack frame on entry.
2851 // lr: return address
2852 // jssp[0]: substring "to" offset
2853 // jssp[8]: substring "from" offset
2854 // jssp[16]: pointer to string object
2855
2856 // This stub is called from the native-call %_SubString(...), so
2857 // nothing can be assumed about the arguments. It is tested that:
2858 // "string" is a sequential string,
2859 // both "from" and "to" are smis, and
2860 // 0 <= from <= to <= string.length (in debug mode.)
2861 // If any of these assumptions fail, we call the runtime system.
2862
2863 static const int kToOffset = 0 * kPointerSize;
2864 static const int kFromOffset = 1 * kPointerSize;
2865 static const int kStringOffset = 2 * kPointerSize;
2866
2867 Register to = x0;
2868 Register from = x15;
2869 Register input_string = x10;
2870 Register input_length = x11;
2871 Register input_type = x12;
2872 Register result_string = x0;
2873 Register result_length = x1;
2874 Register temp = x3;
2875
2876 __ Peek(to, kToOffset);
2877 __ Peek(from, kFromOffset);
2878
2879 // Check that both from and to are smis. If not, jump to runtime.
2880 __ JumpIfEitherNotSmi(from, to, &runtime);
2881 __ SmiUntag(from);
2882 __ SmiUntag(to);
2883
2884 // Calculate difference between from and to. If to < from, branch to runtime.
2885 __ Subs(result_length, to, from);
2886 __ B(mi, &runtime);
2887
2888 // Check from is positive.
2889 __ Tbnz(from, kWSignBit, &runtime);
2890
2891 // Make sure first argument is a string.
2892 __ Peek(input_string, kStringOffset);
2893 __ JumpIfSmi(input_string, &runtime);
2894 __ IsObjectJSStringType(input_string, input_type, &runtime);
2895
2896 Label single_char;
2897 __ Cmp(result_length, 1);
2898 __ B(eq, &single_char);
2899
2900 // Short-cut for the case of trivial substring.
2901 Label return_x0;
2902 __ Ldrsw(input_length,
2903 UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
2904
2905 __ Cmp(result_length, input_length);
2906 __ CmovX(x0, input_string, eq);
2907 // Return original string.
2908 __ B(eq, &return_x0);
2909
2910 // Longer than original string's length or negative: unsafe arguments.
2911 __ B(hi, &runtime);
2912
2913 // Shorter than original string's length: an actual substring.
2914
2915 // x0 to substring end character offset
2916 // x1 result_length length of substring result
2917 // x10 input_string pointer to input string object
2918 // x10 unpacked_string pointer to unpacked string object
2919 // x11 input_length length of input string
2920 // x12 input_type instance type of input string
2921 // x15 from substring start character offset
2922
2923 // Deal with different string types: update the index if necessary and put
2924 // the underlying string into register unpacked_string.
2925 Label underlying_unpacked, sliced_string, seq_or_external_string;
2926 Label update_instance_type;
2927 // If the string is not indirect, it can only be sequential or external.
2928 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
2929 STATIC_ASSERT(kIsIndirectStringMask != 0);
2930
2931 // Test for string types, and branch/fall through to appropriate unpacking
2932 // code.
2933 __ Tst(input_type, kIsIndirectStringMask);
2934 __ B(eq, &seq_or_external_string);
2935 __ Tst(input_type, kSlicedNotConsMask);
2936 __ B(ne, &sliced_string);
2937
2938 Register unpacked_string = input_string;
2939
2940 // Cons string. Check whether it is flat, then fetch first part.
2941 __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
2942 __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
2943 __ Ldr(unpacked_string,
2944 FieldMemOperand(input_string, ConsString::kFirstOffset));
2945 __ B(&update_instance_type);
2946
2947 __ Bind(&sliced_string);
2948 // Sliced string. Fetch parent and correct start index by offset.
2949 __ Ldrsw(temp,
2950 UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
2951 __ Add(from, from, temp);
2952 __ Ldr(unpacked_string,
2953 FieldMemOperand(input_string, SlicedString::kParentOffset));
2954
2955 __ Bind(&update_instance_type);
2956 __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
2957 __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2958 // Now control must go to &underlying_unpacked. Since the no code is generated
2959 // before then we fall through instead of generating a useless branch.
2960
2961 __ Bind(&seq_or_external_string);
2962 // Sequential or external string. Registers unpacked_string and input_string
2963 // alias, so there's nothing to do here.
2964 // Note that if code is added here, the above code must be updated.
2965
2966 // x0 result_string pointer to result string object (uninit)
2967 // x1 result_length length of substring result
2968 // x10 unpacked_string pointer to unpacked string object
2969 // x11 input_length length of input string
2970 // x12 input_type instance type of input string
2971 // x15 from substring start character offset
2972 __ Bind(&underlying_unpacked);
2973
2974 if (FLAG_string_slices) {
2975 Label copy_routine;
2976 __ Cmp(result_length, SlicedString::kMinLength);
2977 // Short slice. Copy instead of slicing.
2978 __ B(lt, &copy_routine);
2979 // Allocate new sliced string. At this point we do not reload the instance
2980 // type including the string encoding because we simply rely on the info
2981 // provided by the original string. It does not matter if the original
2982 // string's encoding is wrong because we always have to recheck encoding of
2983 // the newly created string's parent anyway due to externalized strings.
2984 Label two_byte_slice, set_slice_header;
2985 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
2986 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
2987 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
2988 __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
2989 &runtime);
2990 __ B(&set_slice_header);
2991
2992 __ Bind(&two_byte_slice);
2993 __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
2994 &runtime);
2995
2996 __ Bind(&set_slice_header);
2997 __ SmiTag(from);
2998 __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
2999 __ Str(unpacked_string,
3000 FieldMemOperand(result_string, SlicedString::kParentOffset));
3001 __ B(&return_x0);
3002
3003 __ Bind(&copy_routine);
3004 }
3005
3006 // x0 result_string pointer to result string object (uninit)
3007 // x1 result_length length of substring result
3008 // x10 unpacked_string pointer to unpacked string object
3009 // x11 input_length length of input string
3010 // x12 input_type instance type of input string
3011 // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
3012 // x13 substring_char0 pointer to first char of substring (uninit)
3013 // x14 result_char0 pointer to first char of result (uninit)
3014 // x15 from substring start character offset
3015 Register unpacked_char0 = x13;
3016 Register substring_char0 = x13;
3017 Register result_char0 = x14;
3018 Label two_byte_sequential, sequential_string, allocate_result;
3019 STATIC_ASSERT(kExternalStringTag != 0);
3020 STATIC_ASSERT(kSeqStringTag == 0);
3021
3022 __ Tst(input_type, kExternalStringTag);
3023 __ B(eq, &sequential_string);
3024
3025 __ Tst(input_type, kShortExternalStringTag);
3026 __ B(ne, &runtime);
3027 __ Ldr(unpacked_char0,
3028 FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
3029 // unpacked_char0 points to the first character of the underlying string.
3030 __ B(&allocate_result);
3031
3032 __ Bind(&sequential_string);
3033 // Locate first character of underlying subject string.
3034 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3035 __ Add(unpacked_char0, unpacked_string,
3036 SeqOneByteString::kHeaderSize - kHeapObjectTag);
3037
3038 __ Bind(&allocate_result);
3039 // Sequential one-byte string. Allocate the result.
3040 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3041 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
3042
3043 // Allocate and copy the resulting one-byte string.
3044 __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
3045
3046 // Locate first character of substring to copy.
3047 __ Add(substring_char0, unpacked_char0, from);
3048
3049 // Locate first character of result.
3050 __ Add(result_char0, result_string,
3051 SeqOneByteString::kHeaderSize - kHeapObjectTag);
3052
3053 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3054 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
3055 __ B(&return_x0);
3056
3057 // Allocate and copy the resulting two-byte string.
3058 __ Bind(&two_byte_sequential);
3059 __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
3060
3061 // Locate first character of substring to copy.
3062 __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
3063
3064 // Locate first character of result.
3065 __ Add(result_char0, result_string,
3066 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3067
3068 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3069 __ Add(result_length, result_length, result_length);
3070 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
3071
3072 __ Bind(&return_x0);
3073 Counters* counters = isolate()->counters();
3074 __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
3075 __ Drop(3);
3076 __ Ret();
3077
3078 __ Bind(&runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003079 __ TailCallRuntime(Runtime::kSubString);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003080
3081 __ bind(&single_char);
3082 // x1: result_length
3083 // x10: input_string
3084 // x12: input_type
3085 // x15: from (untagged)
3086 __ SmiTag(from);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003087 StringCharAtGenerator generator(input_string, from, result_length, x0,
3088 &runtime, &runtime, &runtime,
3089 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003090 generator.GenerateFast(masm);
3091 __ Drop(3);
3092 __ Ret();
3093 generator.SkipSlow(masm, &runtime);
3094}
3095
3096
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003097void ToNumberStub::Generate(MacroAssembler* masm) {
3098 // The ToNumber stub takes one argument in x0.
3099 Label not_smi;
3100 __ JumpIfNotSmi(x0, &not_smi);
3101 __ Ret();
3102 __ Bind(&not_smi);
3103
3104 Label not_heap_number;
Ben Murdochda12d292016-06-02 14:46:10 +01003105 __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
3106 // x0: receiver
3107 // x1: receiver instance type
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003108 __ B(ne, &not_heap_number);
3109 __ Ret();
3110 __ Bind(&not_heap_number);
3111
Ben Murdochda12d292016-06-02 14:46:10 +01003112 NonNumberToNumberStub stub(masm->isolate());
3113 __ TailCallStub(&stub);
3114}
3115
3116void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
3117 // The NonNumberToNumber stub takes one argument in x0.
3118 __ AssertNotNumber(x0);
3119
3120 Label not_string;
3121 __ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
3122 // x0: receiver
3123 // x1: receiver instance type
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003124 __ B(hs, &not_string);
Ben Murdochda12d292016-06-02 14:46:10 +01003125 StringToNumberStub stub(masm->isolate());
3126 __ TailCallStub(&stub);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003127 __ Bind(&not_string);
3128
3129 Label not_oddball;
3130 __ Cmp(x1, ODDBALL_TYPE);
3131 __ B(ne, &not_oddball);
3132 __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
3133 __ Ret();
3134 __ Bind(&not_oddball);
3135
3136 __ Push(x0); // Push argument.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003137 __ TailCallRuntime(Runtime::kToNumber);
3138}
3139
Ben Murdochda12d292016-06-02 14:46:10 +01003140void StringToNumberStub::Generate(MacroAssembler* masm) {
3141 // The StringToNumber stub takes one argument in x0.
3142 __ AssertString(x0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003143
Ben Murdochda12d292016-06-02 14:46:10 +01003144 // Check if string has a cached array index.
3145 Label runtime;
3146 __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
3147 __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
3148 __ B(ne, &runtime);
3149 __ IndexFromHash(x2, x0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003150 __ Ret();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003151
Ben Murdochda12d292016-06-02 14:46:10 +01003152 __ Bind(&runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003153 __ Push(x0); // Push argument.
Ben Murdochda12d292016-06-02 14:46:10 +01003154 __ TailCallRuntime(Runtime::kStringToNumber);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003155}
3156
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003157void ToStringStub::Generate(MacroAssembler* masm) {
3158 // The ToString stub takes one argument in x0.
3159 Label is_number;
3160 __ JumpIfSmi(x0, &is_number);
3161
3162 Label not_string;
3163 __ JumpIfObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE, &not_string, hs);
3164 // x0: receiver
3165 // x1: receiver instance type
3166 __ Ret();
3167 __ Bind(&not_string);
3168
3169 Label not_heap_number;
3170 __ Cmp(x1, HEAP_NUMBER_TYPE);
3171 __ B(ne, &not_heap_number);
3172 __ Bind(&is_number);
3173 NumberToStringStub stub(isolate());
3174 __ TailCallStub(&stub);
3175 __ Bind(&not_heap_number);
3176
3177 Label not_oddball;
3178 __ Cmp(x1, ODDBALL_TYPE);
3179 __ B(ne, &not_oddball);
3180 __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
3181 __ Ret();
3182 __ Bind(&not_oddball);
3183
3184 __ Push(x0); // Push argument.
3185 __ TailCallRuntime(Runtime::kToString);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003186}
3187
3188
Ben Murdoch097c5b22016-05-18 11:27:45 +01003189void ToNameStub::Generate(MacroAssembler* masm) {
3190 // The ToName stub takes one argument in x0.
3191 Label is_number;
3192 __ JumpIfSmi(x0, &is_number);
3193
3194 Label not_name;
3195 STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
3196 __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &not_name, hi);
3197 // x0: receiver
3198 // x1: receiver instance type
3199 __ Ret();
3200 __ Bind(&not_name);
3201
3202 Label not_heap_number;
3203 __ Cmp(x1, HEAP_NUMBER_TYPE);
3204 __ B(ne, &not_heap_number);
3205 __ Bind(&is_number);
3206 NumberToStringStub stub(isolate());
3207 __ TailCallStub(&stub);
3208 __ Bind(&not_heap_number);
3209
3210 Label not_oddball;
3211 __ Cmp(x1, ODDBALL_TYPE);
3212 __ B(ne, &not_oddball);
3213 __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
3214 __ Ret();
3215 __ Bind(&not_oddball);
3216
3217 __ Push(x0); // Push argument.
3218 __ TailCallRuntime(Runtime::kToName);
3219}
3220
3221
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003222void StringHelper::GenerateFlatOneByteStringEquals(
3223 MacroAssembler* masm, Register left, Register right, Register scratch1,
3224 Register scratch2, Register scratch3) {
3225 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
3226 Register result = x0;
3227 Register left_length = scratch1;
3228 Register right_length = scratch2;
3229
3230 // Compare lengths. If lengths differ, strings can't be equal. Lengths are
3231 // smis, and don't need to be untagged.
3232 Label strings_not_equal, check_zero_length;
3233 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
3234 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
3235 __ Cmp(left_length, right_length);
3236 __ B(eq, &check_zero_length);
3237
3238 __ Bind(&strings_not_equal);
3239 __ Mov(result, Smi::FromInt(NOT_EQUAL));
3240 __ Ret();
3241
3242 // Check if the length is zero. If so, the strings must be equal (and empty.)
3243 Label compare_chars;
3244 __ Bind(&check_zero_length);
3245 STATIC_ASSERT(kSmiTag == 0);
3246 __ Cbnz(left_length, &compare_chars);
3247 __ Mov(result, Smi::FromInt(EQUAL));
3248 __ Ret();
3249
3250 // Compare characters. Falls through if all characters are equal.
3251 __ Bind(&compare_chars);
3252 GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
3253 scratch3, &strings_not_equal);
3254
3255 // Characters in strings are equal.
3256 __ Mov(result, Smi::FromInt(EQUAL));
3257 __ Ret();
3258}
3259
3260
3261void StringHelper::GenerateCompareFlatOneByteStrings(
3262 MacroAssembler* masm, Register left, Register right, Register scratch1,
3263 Register scratch2, Register scratch3, Register scratch4) {
3264 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
3265 Label result_not_equal, compare_lengths;
3266
3267 // Find minimum length and length difference.
3268 Register length_delta = scratch3;
3269 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3270 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3271 __ Subs(length_delta, scratch1, scratch2);
3272
3273 Register min_length = scratch1;
3274 __ Csel(min_length, scratch2, scratch1, gt);
3275 __ Cbz(min_length, &compare_lengths);
3276
3277 // Compare loop.
3278 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3279 scratch4, &result_not_equal);
3280
3281 // Compare lengths - strings up to min-length are equal.
3282 __ Bind(&compare_lengths);
3283
3284 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3285
3286 // Use length_delta as result if it's zero.
3287 Register result = x0;
3288 __ Subs(result, length_delta, 0);
3289
3290 __ Bind(&result_not_equal);
3291 Register greater = x10;
3292 Register less = x11;
3293 __ Mov(greater, Smi::FromInt(GREATER));
3294 __ Mov(less, Smi::FromInt(LESS));
3295 __ CmovX(result, greater, gt);
3296 __ CmovX(result, less, lt);
3297 __ Ret();
3298}
3299
3300
3301void StringHelper::GenerateOneByteCharsCompareLoop(
3302 MacroAssembler* masm, Register left, Register right, Register length,
3303 Register scratch1, Register scratch2, Label* chars_not_equal) {
3304 DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
3305
3306 // Change index to run from -length to -1 by adding length to string
3307 // start. This means that loop ends when index reaches zero, which
3308 // doesn't need an additional compare.
3309 __ SmiUntag(length);
3310 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
3311 __ Add(left, left, scratch1);
3312 __ Add(right, right, scratch1);
3313
3314 Register index = length;
3315 __ Neg(index, length); // index = -length;
3316
3317 // Compare loop
3318 Label loop;
3319 __ Bind(&loop);
3320 __ Ldrb(scratch1, MemOperand(left, index));
3321 __ Ldrb(scratch2, MemOperand(right, index));
3322 __ Cmp(scratch1, scratch2);
3323 __ B(ne, chars_not_equal);
3324 __ Add(index, index, 1);
3325 __ Cbnz(index, &loop);
3326}
3327
3328
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003329void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3330 // ----------- S t a t e -------------
3331 // -- x1 : left
3332 // -- x0 : right
3333 // -- lr : return address
3334 // -----------------------------------
3335
3336 // Load x2 with the allocation site. We stick an undefined dummy value here
3337 // and replace it with the real allocation site later when we instantiate this
3338 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3339 __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
3340
3341 // Make sure that we actually patched the allocation site.
3342 if (FLAG_debug_code) {
3343 __ AssertNotSmi(x2, kExpectedAllocationSite);
3344 __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
3345 __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
3346 kExpectedAllocationSite);
3347 }
3348
3349 // Tail call into the stub that handles binary operations with allocation
3350 // sites.
3351 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3352 __ TailCallStub(&stub);
3353}
3354
3355
3356void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3357 // We need some extra registers for this stub, they have been allocated
3358 // but we need to save them before using them.
3359 regs_.Save(masm);
3360
3361 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3362 Label dont_need_remembered_set;
3363
3364 Register val = regs_.scratch0();
3365 __ Ldr(val, MemOperand(regs_.address()));
3366 __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
3367
Ben Murdoch097c5b22016-05-18 11:27:45 +01003368 __ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003369
3370 // First notify the incremental marker if necessary, then update the
3371 // remembered set.
3372 CheckNeedsToInformIncrementalMarker(
3373 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3374 InformIncrementalMarker(masm);
3375 regs_.Restore(masm); // Restore the extra scratch registers we used.
3376
3377 __ RememberedSetHelper(object(), address(),
3378 value(), // scratch1
3379 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
3380
3381 __ Bind(&dont_need_remembered_set);
3382 }
3383
3384 CheckNeedsToInformIncrementalMarker(
3385 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3386 InformIncrementalMarker(masm);
3387 regs_.Restore(masm); // Restore the extra scratch registers we used.
3388 __ Ret();
3389}
3390
3391
3392void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3393 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3394 Register address =
3395 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
3396 DCHECK(!address.Is(regs_.object()));
3397 DCHECK(!address.Is(x0));
3398 __ Mov(address, regs_.address());
3399 __ Mov(x0, regs_.object());
3400 __ Mov(x1, address);
3401 __ Mov(x2, ExternalReference::isolate_address(isolate()));
3402
3403 AllowExternalCallThatCantCauseGC scope(masm);
3404 ExternalReference function =
3405 ExternalReference::incremental_marking_record_write_function(
3406 isolate());
3407 __ CallCFunction(function, 3, 0);
3408
3409 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3410}
3411
3412
3413void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3414 MacroAssembler* masm,
3415 OnNoNeedToInformIncrementalMarker on_no_need,
3416 Mode mode) {
3417 Label on_black;
3418 Label need_incremental;
3419 Label need_incremental_pop_scratch;
3420
3421 Register mem_chunk = regs_.scratch0();
3422 Register counter = regs_.scratch1();
3423 __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
3424 __ Ldr(counter,
3425 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
3426 __ Subs(counter, counter, 1);
3427 __ Str(counter,
3428 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
3429 __ B(mi, &need_incremental);
3430
3431 // If the object is not black we don't have to inform the incremental marker.
3432 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
3433
3434 regs_.Restore(masm); // Restore the extra scratch registers we used.
3435 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3436 __ RememberedSetHelper(object(), address(),
3437 value(), // scratch1
3438 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
3439 } else {
3440 __ Ret();
3441 }
3442
3443 __ Bind(&on_black);
3444 // Get the value from the slot.
3445 Register val = regs_.scratch0();
3446 __ Ldr(val, MemOperand(regs_.address()));
3447
3448 if (mode == INCREMENTAL_COMPACTION) {
3449 Label ensure_not_white;
3450
3451 __ CheckPageFlagClear(val, regs_.scratch1(),
3452 MemoryChunk::kEvacuationCandidateMask,
3453 &ensure_not_white);
3454
3455 __ CheckPageFlagClear(regs_.object(),
3456 regs_.scratch1(),
3457 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
3458 &need_incremental);
3459
3460 __ Bind(&ensure_not_white);
3461 }
3462
3463 // We need extra registers for this, so we push the object and the address
3464 // register temporarily.
3465 __ Push(regs_.address(), regs_.object());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003466 __ JumpIfWhite(val,
3467 regs_.scratch1(), // Scratch.
3468 regs_.object(), // Scratch.
3469 regs_.address(), // Scratch.
3470 regs_.scratch2(), // Scratch.
3471 &need_incremental_pop_scratch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003472 __ Pop(regs_.object(), regs_.address());
3473
3474 regs_.Restore(masm); // Restore the extra scratch registers we used.
3475 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3476 __ RememberedSetHelper(object(), address(),
3477 value(), // scratch1
3478 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
3479 } else {
3480 __ Ret();
3481 }
3482
3483 __ Bind(&need_incremental_pop_scratch);
3484 __ Pop(regs_.object(), regs_.address());
3485
3486 __ Bind(&need_incremental);
3487 // Fall through when we need to inform the incremental marker.
3488}
3489
3490
3491void RecordWriteStub::Generate(MacroAssembler* masm) {
3492 Label skip_to_incremental_noncompacting;
3493 Label skip_to_incremental_compacting;
3494
3495 // We patch these two first instructions back and forth between a nop and
3496 // real branch when we start and stop incremental heap marking.
3497 // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
3498 // are generated.
3499 // See RecordWriteStub::Patch for details.
3500 {
3501 InstructionAccurateScope scope(masm, 2);
3502 __ adr(xzr, &skip_to_incremental_noncompacting);
3503 __ adr(xzr, &skip_to_incremental_compacting);
3504 }
3505
3506 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3507 __ RememberedSetHelper(object(), address(),
3508 value(), // scratch1
3509 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
3510 }
3511 __ Ret();
3512
3513 __ Bind(&skip_to_incremental_noncompacting);
3514 GenerateIncremental(masm, INCREMENTAL);
3515
3516 __ Bind(&skip_to_incremental_compacting);
3517 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3518}
3519
3520
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003521void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
3522 CEntryStub ces(isolate(), 1, kSaveFPRegs);
3523 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
3524 int parameter_count_offset =
Ben Murdochda12d292016-06-02 14:46:10 +01003525 StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003526 __ Ldr(x1, MemOperand(fp, parameter_count_offset));
3527 if (function_mode() == JS_FUNCTION_STUB_MODE) {
3528 __ Add(x1, x1, 1);
3529 }
3530 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
3531 __ Drop(x1);
3532 // Return to IC Miss stub, continuation still on stack.
3533 __ Ret();
3534}
3535
3536
3537void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003538 __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
3539 LoadICStub stub(isolate(), state());
3540 stub.GenerateForTrampoline(masm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003541}
3542
3543
3544void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003545 __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
3546 KeyedLoadICStub stub(isolate(), state());
3547 stub.GenerateForTrampoline(masm);
3548}
3549
3550
3551void CallICTrampolineStub::Generate(MacroAssembler* masm) {
3552 __ EmitLoadTypeFeedbackVector(x2);
3553 CallICStub stub(isolate(), state());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003554 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3555}
3556
3557
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003558void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
3559
3560
3561void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
3562 GenerateImpl(masm, true);
3563}
3564
3565
3566static void HandleArrayCases(MacroAssembler* masm, Register feedback,
3567 Register receiver_map, Register scratch1,
3568 Register scratch2, bool is_polymorphic,
3569 Label* miss) {
3570 // feedback initially contains the feedback array
3571 Label next_loop, prepare_next;
3572 Label load_smi_map, compare_map;
3573 Label start_polymorphic;
3574
3575 Register cached_map = scratch1;
3576
3577 __ Ldr(cached_map,
3578 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
3579 __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3580 __ Cmp(receiver_map, cached_map);
3581 __ B(ne, &start_polymorphic);
3582 // found, now call handler.
3583 Register handler = feedback;
3584 __ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
3585 __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
3586 __ Jump(feedback);
3587
3588 Register length = scratch2;
3589 __ Bind(&start_polymorphic);
3590 __ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3591 if (!is_polymorphic) {
3592 __ Cmp(length, Operand(Smi::FromInt(2)));
3593 __ B(eq, miss);
3594 }
3595
3596 Register too_far = length;
3597 Register pointer_reg = feedback;
3598
3599 // +-----+------+------+-----+-----+ ... ----+
3600 // | map | len | wm0 | h0 | wm1 | hN |
3601 // +-----+------+------+-----+-----+ ... ----+
3602 // 0 1 2 len-1
3603 // ^ ^
3604 // | |
3605 // pointer_reg too_far
3606 // aka feedback scratch2
3607 // also need receiver_map
3608 // use cached_map (scratch1) to look in the weak map values.
3609 __ Add(too_far, feedback,
3610 Operand::UntagSmiAndScale(length, kPointerSizeLog2));
3611 __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
3612 __ Add(pointer_reg, feedback,
3613 FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
3614
3615 __ Bind(&next_loop);
3616 __ Ldr(cached_map, MemOperand(pointer_reg));
3617 __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3618 __ Cmp(receiver_map, cached_map);
3619 __ B(ne, &prepare_next);
3620 __ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
3621 __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
3622 __ Jump(handler);
3623
3624 __ Bind(&prepare_next);
3625 __ Add(pointer_reg, pointer_reg, kPointerSize * 2);
3626 __ Cmp(pointer_reg, too_far);
3627 __ B(lt, &next_loop);
3628
3629 // We exhausted our array of map handler pairs.
3630 __ jmp(miss);
3631}
3632
3633
3634static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
3635 Register receiver_map, Register feedback,
3636 Register vector, Register slot,
3637 Register scratch, Label* compare_map,
3638 Label* load_smi_map, Label* try_array) {
3639 __ JumpIfSmi(receiver, load_smi_map);
3640 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
3641 __ bind(compare_map);
3642 Register cached_map = scratch;
3643 // Move the weak map into the weak_cell register.
3644 __ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
3645 __ Cmp(cached_map, receiver_map);
3646 __ B(ne, try_array);
3647
3648 Register handler = feedback;
3649 __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3650 __ Ldr(handler,
3651 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
3652 __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
3653 __ Jump(handler);
3654}
3655
3656
3657void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3658 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
3659 Register name = LoadWithVectorDescriptor::NameRegister(); // x2
3660 Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
3661 Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
3662 Register feedback = x4;
3663 Register receiver_map = x5;
3664 Register scratch1 = x6;
3665
3666 __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3667 __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3668
3669 // Try to quickly handle the monomorphic case without knowing for sure
3670 // if we have a weak cell in feedback. We do know it's safe to look
3671 // at WeakCell::kValueOffset.
3672 Label try_array, load_smi_map, compare_map;
3673 Label not_array, miss;
3674 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3675 scratch1, &compare_map, &load_smi_map, &try_array);
3676
3677 // Is it a fixed array?
3678 __ Bind(&try_array);
3679 __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3680 __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
3681 HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
3682
3683 __ Bind(&not_array);
3684 __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
Ben Murdochc5610432016-08-08 18:44:38 +01003685 Code::Flags code_flags =
3686 Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003687 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
3688 receiver, name, feedback,
3689 receiver_map, scratch1, x7);
3690
3691 __ Bind(&miss);
3692 LoadIC::GenerateMiss(masm);
3693
3694 __ Bind(&load_smi_map);
3695 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3696 __ jmp(&compare_map);
3697}
3698
3699
3700void KeyedLoadICStub::Generate(MacroAssembler* masm) {
3701 GenerateImpl(masm, false);
3702}
3703
3704
3705void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
3706 GenerateImpl(masm, true);
3707}
3708
3709
3710void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3711 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
3712 Register key = LoadWithVectorDescriptor::NameRegister(); // x2
3713 Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
3714 Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
3715 Register feedback = x4;
3716 Register receiver_map = x5;
3717 Register scratch1 = x6;
3718
3719 __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3720 __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3721
3722 // Try to quickly handle the monomorphic case without knowing for sure
3723 // if we have a weak cell in feedback. We do know it's safe to look
3724 // at WeakCell::kValueOffset.
3725 Label try_array, load_smi_map, compare_map;
3726 Label not_array, miss;
3727 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3728 scratch1, &compare_map, &load_smi_map, &try_array);
3729
3730 __ Bind(&try_array);
3731 // Is it a fixed array?
3732 __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3733 __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
3734
3735 // We have a polymorphic element handler.
3736 Label polymorphic, try_poly_name;
3737 __ Bind(&polymorphic);
3738 HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
3739
3740 __ Bind(&not_array);
3741 // Is it generic?
3742 __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
3743 &try_poly_name);
3744 Handle<Code> megamorphic_stub =
3745 KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3746 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3747
3748 __ Bind(&try_poly_name);
3749 // We might have a name in feedback, and a fixed array in the next slot.
3750 __ Cmp(key, feedback);
3751 __ B(ne, &miss);
3752 // If the name comparison succeeded, we know we have a fixed array with
3753 // at least one map/handler pair.
3754 __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3755 __ Ldr(feedback,
3756 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3757 HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, false, &miss);
3758
3759 __ Bind(&miss);
3760 KeyedLoadIC::GenerateMiss(masm);
3761
3762 __ Bind(&load_smi_map);
3763 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3764 __ jmp(&compare_map);
3765}
3766
3767
3768void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3769 __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
3770 VectorStoreICStub stub(isolate(), state());
3771 stub.GenerateForTrampoline(masm);
3772}
3773
3774
3775void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3776 __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
3777 VectorKeyedStoreICStub stub(isolate(), state());
3778 stub.GenerateForTrampoline(masm);
3779}
3780
3781
3782void VectorStoreICStub::Generate(MacroAssembler* masm) {
3783 GenerateImpl(masm, false);
3784}
3785
3786
3787void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3788 GenerateImpl(masm, true);
3789}
3790
3791
3792void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3793 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1
3794 Register key = VectorStoreICDescriptor::NameRegister(); // x2
3795 Register vector = VectorStoreICDescriptor::VectorRegister(); // x3
3796 Register slot = VectorStoreICDescriptor::SlotRegister(); // x4
3797 DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0
3798 Register feedback = x5;
3799 Register receiver_map = x6;
3800 Register scratch1 = x7;
3801
3802 __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3803 __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3804
3805 // Try to quickly handle the monomorphic case without knowing for sure
3806 // if we have a weak cell in feedback. We do know it's safe to look
3807 // at WeakCell::kValueOffset.
3808 Label try_array, load_smi_map, compare_map;
3809 Label not_array, miss;
3810 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3811 scratch1, &compare_map, &load_smi_map, &try_array);
3812
3813 // Is it a fixed array?
3814 __ Bind(&try_array);
3815 __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3816 __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
3817 HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, true, &miss);
3818
3819 __ Bind(&not_array);
3820 __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
Ben Murdochc5610432016-08-08 18:44:38 +01003821 Code::Flags code_flags =
3822 Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003823 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
3824 receiver, key, feedback,
3825 receiver_map, scratch1, x8);
3826
3827 __ Bind(&miss);
3828 StoreIC::GenerateMiss(masm);
3829
3830 __ Bind(&load_smi_map);
3831 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3832 __ jmp(&compare_map);
3833}
3834
3835
3836void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
3837 GenerateImpl(masm, false);
3838}
3839
3840
3841void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3842 GenerateImpl(masm, true);
3843}
3844
3845
3846static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
3847 Register receiver_map, Register scratch1,
3848 Register scratch2, Label* miss) {
3849 // feedback initially contains the feedback array
3850 Label next_loop, prepare_next;
3851 Label start_polymorphic;
3852 Label transition_call;
3853
3854 Register cached_map = scratch1;
3855 Register too_far = scratch2;
3856 Register pointer_reg = feedback;
3857
3858 __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3859
3860 // +-----+------+------+-----+-----+-----+ ... ----+
3861 // | map | len | wm0 | wt0 | h0 | wm1 | hN |
3862 // +-----+------+------+-----+-----+ ----+ ... ----+
3863 // 0 1 2 len-1
3864 // ^ ^
3865 // | |
3866 // pointer_reg too_far
3867 // aka feedback scratch2
3868 // also need receiver_map
3869 // use cached_map (scratch1) to look in the weak map values.
3870 __ Add(too_far, feedback,
3871 Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
3872 __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
3873 __ Add(pointer_reg, feedback,
3874 FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
3875
3876 __ Bind(&next_loop);
3877 __ Ldr(cached_map, MemOperand(pointer_reg));
3878 __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3879 __ Cmp(receiver_map, cached_map);
3880 __ B(ne, &prepare_next);
3881 // Is it a transitioning store?
3882 __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
3883 __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
3884 __ B(ne, &transition_call);
3885
3886 __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
3887 __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
3888 __ Jump(pointer_reg);
3889
3890 __ Bind(&transition_call);
3891 __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
3892 __ JumpIfSmi(too_far, miss);
3893
3894 __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
3895 // Load the map into the correct register.
3896 DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
3897 __ mov(feedback, too_far);
3898 __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
3899 __ Jump(receiver_map);
3900
3901 __ Bind(&prepare_next);
3902 __ Add(pointer_reg, pointer_reg, kPointerSize * 3);
3903 __ Cmp(pointer_reg, too_far);
3904 __ B(lt, &next_loop);
3905
3906 // We exhausted our array of map handler pairs.
3907 __ jmp(miss);
3908}
3909
3910
3911void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3912 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1
3913 Register key = VectorStoreICDescriptor::NameRegister(); // x2
3914 Register vector = VectorStoreICDescriptor::VectorRegister(); // x3
3915 Register slot = VectorStoreICDescriptor::SlotRegister(); // x4
3916 DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0
3917 Register feedback = x5;
3918 Register receiver_map = x6;
3919 Register scratch1 = x7;
3920
3921 __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3922 __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3923
3924 // Try to quickly handle the monomorphic case without knowing for sure
3925 // if we have a weak cell in feedback. We do know it's safe to look
3926 // at WeakCell::kValueOffset.
3927 Label try_array, load_smi_map, compare_map;
3928 Label not_array, miss;
3929 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3930 scratch1, &compare_map, &load_smi_map, &try_array);
3931
3932 __ Bind(&try_array);
3933 // Is it a fixed array?
3934 __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3935 __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
3936
3937 // We have a polymorphic element handler.
3938 Label try_poly_name;
3939 HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
3940
3941 __ Bind(&not_array);
3942 // Is it generic?
3943 __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
3944 &try_poly_name);
3945 Handle<Code> megamorphic_stub =
3946 KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3947 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3948
3949 __ Bind(&try_poly_name);
3950 // We might have a name in feedback, and a fixed array in the next slot.
3951 __ Cmp(key, feedback);
3952 __ B(ne, &miss);
3953 // If the name comparison succeeded, we know we have a fixed array with
3954 // at least one map/handler pair.
3955 __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
3956 __ Ldr(feedback,
3957 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3958 HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
3959
3960 __ Bind(&miss);
3961 KeyedStoreIC::GenerateMiss(masm);
3962
3963 __ Bind(&load_smi_map);
3964 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3965 __ jmp(&compare_map);
3966}
3967
3968
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003969// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
3970// a "Push lr" instruction, followed by a call.
3971static const unsigned int kProfileEntryHookCallSize =
3972 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003973
3974
3975void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
3976 if (masm->isolate()->function_entry_hook() != NULL) {
3977 ProfileEntryHookStub stub(masm->isolate());
3978 Assembler::BlockConstPoolScope no_const_pools(masm);
3979 DontEmitDebugCodeScope no_debug_code(masm);
3980 Label entry_hook_call_start;
3981 __ Bind(&entry_hook_call_start);
3982 __ Push(lr);
3983 __ CallStub(&stub);
3984 DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003985 kProfileEntryHookCallSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003986
3987 __ Pop(lr);
3988 }
3989}
3990
3991
3992void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
3993 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
3994
3995 // Save all kCallerSaved registers (including lr), since this can be called
3996 // from anywhere.
3997 // TODO(jbramley): What about FP registers?
3998 __ PushCPURegList(kCallerSaved);
3999 DCHECK(kCallerSaved.IncludesAliasOf(lr));
4000 const int kNumSavedRegs = kCallerSaved.Count();
4001
4002 // Compute the function's address as the first argument.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004003 __ Sub(x0, lr, kProfileEntryHookCallSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004004
4005#if V8_HOST_ARCH_ARM64
4006 uintptr_t entry_hook =
4007 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
4008 __ Mov(x10, entry_hook);
4009#else
4010 // Under the simulator we need to indirect the entry hook through a trampoline
4011 // function at a known address.
4012 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4013 __ Mov(x10, Operand(ExternalReference(&dispatcher,
4014 ExternalReference::BUILTIN_CALL,
4015 isolate())));
4016 // It additionally takes an isolate as a third parameter
4017 __ Mov(x2, ExternalReference::isolate_address(isolate()));
4018#endif
4019
4020 // The caller's return address is above the saved temporaries.
4021 // Grab its location for the second argument to the hook.
4022 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
4023
4024 {
4025 // Create a dummy frame, as CallCFunction requires this.
4026 FrameScope frame(masm, StackFrame::MANUAL);
4027 __ CallCFunction(x10, 2, 0);
4028 }
4029
4030 __ PopCPURegList(kCallerSaved);
4031 __ Ret();
4032}
4033
4034
4035void DirectCEntryStub::Generate(MacroAssembler* masm) {
4036 // When calling into C++ code the stack pointer must be csp.
4037 // Therefore this code must use csp for peek/poke operations when the
4038 // stub is generated. When the stub is called
4039 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
4040 // and configure the stack pointer *before* doing the call.
4041 const Register old_stack_pointer = __ StackPointer();
4042 __ SetStackPointer(csp);
4043
4044 // Put return address on the stack (accessible to GC through exit frame pc).
4045 __ Poke(lr, 0);
4046 // Call the C++ function.
4047 __ Blr(x10);
4048 // Return to calling code.
4049 __ Peek(lr, 0);
4050 __ AssertFPCRState();
4051 __ Ret();
4052
4053 __ SetStackPointer(old_stack_pointer);
4054}
4055
4056void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4057 Register target) {
4058 // Make sure the caller configured the stack pointer (see comment in
4059 // DirectCEntryStub::Generate).
4060 DCHECK(csp.Is(__ StackPointer()));
4061
4062 intptr_t code =
4063 reinterpret_cast<intptr_t>(GetCode().location());
4064 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4065 __ Mov(x10, target);
4066 // Branch to the stub.
4067 __ Blr(lr);
4068}
4069
4070
4071// Probe the name dictionary in the 'elements' register.
4072// Jump to the 'done' label if a property with the given name is found.
4073// Jump to the 'miss' label otherwise.
4074//
4075// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
4076// 'elements' and 'name' registers are preserved on miss.
4077void NameDictionaryLookupStub::GeneratePositiveLookup(
4078 MacroAssembler* masm,
4079 Label* miss,
4080 Label* done,
4081 Register elements,
4082 Register name,
4083 Register scratch1,
4084 Register scratch2) {
4085 DCHECK(!AreAliased(elements, name, scratch1, scratch2));
4086
4087 // Assert that name contains a string.
4088 __ AssertName(name);
4089
4090 // Compute the capacity mask.
4091 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
4092 __ Sub(scratch1, scratch1, 1);
4093
4094 // Generate an unrolled loop that performs a few probes before giving up.
4095 for (int i = 0; i < kInlinedProbes; i++) {
4096 // Compute the masked index: (hash + i + i * i) & mask.
4097 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4098 if (i > 0) {
4099 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4100 // the hash in a separate instruction. The value hash + i + i * i is right
4101 // shifted in the following and instruction.
4102 DCHECK(NameDictionary::GetProbeOffset(i) <
4103 1 << (32 - Name::kHashFieldOffset));
4104 __ Add(scratch2, scratch2, Operand(
4105 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4106 }
4107 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
4108
4109 // Scale the index by multiplying by the element size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004110 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004111 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4112
4113 // Check if the key is identical to the name.
4114 UseScratchRegisterScope temps(masm);
4115 Register scratch3 = temps.AcquireX();
4116 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4117 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
4118 __ Cmp(name, scratch3);
4119 __ B(eq, done);
4120 }
4121
4122 // The inlined probes didn't find the entry.
4123 // Call the complete stub to scan the whole dictionary.
4124
4125 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4126 spill_list.Combine(lr);
4127 spill_list.Remove(scratch1);
4128 spill_list.Remove(scratch2);
4129
4130 __ PushCPURegList(spill_list);
4131
4132 if (name.is(x0)) {
4133 DCHECK(!elements.is(x1));
4134 __ Mov(x1, name);
4135 __ Mov(x0, elements);
4136 } else {
4137 __ Mov(x0, elements);
4138 __ Mov(x1, name);
4139 }
4140
4141 Label not_found;
4142 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4143 __ CallStub(&stub);
4144 __ Cbz(x0, &not_found);
4145 __ Mov(scratch2, x2); // Move entry index into scratch2.
4146 __ PopCPURegList(spill_list);
4147 __ B(done);
4148
4149 __ Bind(&not_found);
4150 __ PopCPURegList(spill_list);
4151 __ B(miss);
4152}
4153
4154
4155void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4156 Label* miss,
4157 Label* done,
4158 Register receiver,
4159 Register properties,
4160 Handle<Name> name,
4161 Register scratch0) {
4162 DCHECK(!AreAliased(receiver, properties, scratch0));
4163 DCHECK(name->IsUniqueName());
4164 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4165 // not equal to the name and kProbes-th slot is not used (its name is the
4166 // undefined value), it guarantees the hash table doesn't contain the
4167 // property. It's true even if some slots represent deleted properties
4168 // (their names are the hole value).
4169 for (int i = 0; i < kInlinedProbes; i++) {
4170 // scratch0 points to properties hash.
4171 // Compute the masked index: (hash + i + i * i) & mask.
4172 Register index = scratch0;
4173 // Capacity is smi 2^n.
4174 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
4175 __ Sub(index, index, 1);
4176 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
4177
4178 // Scale the index by multiplying by the entry size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004179 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004180 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
4181
4182 Register entity_name = scratch0;
4183 // Having undefined at this place means the name is not contained.
4184 Register tmp = index;
4185 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
4186 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4187
4188 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
4189
4190 // Stop if found the property.
4191 __ Cmp(entity_name, Operand(name));
4192 __ B(eq, miss);
4193
4194 Label good;
4195 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
4196
4197 // Check if the entry name is not a unique name.
4198 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4199 __ Ldrb(entity_name,
4200 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4201 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
4202 __ Bind(&good);
4203 }
4204
4205 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4206 spill_list.Combine(lr);
4207 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
4208
4209 __ PushCPURegList(spill_list);
4210
4211 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4212 __ Mov(x1, Operand(name));
4213 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4214 __ CallStub(&stub);
4215 // Move stub return value to scratch0. Note that scratch0 is not included in
4216 // spill_list and won't be clobbered by PopCPURegList.
4217 __ Mov(scratch0, x0);
4218 __ PopCPURegList(spill_list);
4219
4220 __ Cbz(scratch0, done);
4221 __ B(miss);
4222}
4223
4224
4225void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4226 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4227 // we cannot call anything that could cause a GC from this stub.
4228 //
4229 // Arguments are in x0 and x1:
4230 // x0: property dictionary.
4231 // x1: the name of the property we are looking for.
4232 //
4233 // Return value is in x0 and is zero if lookup failed, non zero otherwise.
4234 // If the lookup is successful, x2 will contains the index of the entry.
4235
4236 Register result = x0;
4237 Register dictionary = x0;
4238 Register key = x1;
4239 Register index = x2;
4240 Register mask = x3;
4241 Register hash = x4;
4242 Register undefined = x5;
4243 Register entry_key = x6;
4244
4245 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4246
4247 __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
4248 __ Sub(mask, mask, 1);
4249
4250 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4251 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4252
4253 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4254 // Compute the masked index: (hash + i + i * i) & mask.
4255 // Capacity is smi 2^n.
4256 if (i > 0) {
4257 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4258 // the hash in a separate instruction. The value hash + i + i * i is right
4259 // shifted in the following and instruction.
4260 DCHECK(NameDictionary::GetProbeOffset(i) <
4261 1 << (32 - Name::kHashFieldOffset));
4262 __ Add(index, hash,
4263 NameDictionary::GetProbeOffset(i) << Name::kHashShift);
4264 } else {
4265 __ Mov(index, hash);
4266 }
4267 __ And(index, mask, Operand(index, LSR, Name::kHashShift));
4268
4269 // Scale the index by multiplying by the entry size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004270 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004271 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
4272
4273 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
4274 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
4275
4276 // Having undefined at this place means the name is not contained.
4277 __ Cmp(entry_key, undefined);
4278 __ B(eq, &not_in_dictionary);
4279
4280 // Stop if found the property.
4281 __ Cmp(entry_key, key);
4282 __ B(eq, &in_dictionary);
4283
4284 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4285 // Check if the entry name is not a unique name.
4286 __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4287 __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4288 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4289 }
4290 }
4291
4292 __ Bind(&maybe_in_dictionary);
4293 // If we are doing negative lookup then probing failure should be
4294 // treated as a lookup success. For positive lookup, probing failure
4295 // should be treated as lookup failure.
4296 if (mode() == POSITIVE_LOOKUP) {
4297 __ Mov(result, 0);
4298 __ Ret();
4299 }
4300
4301 __ Bind(&in_dictionary);
4302 __ Mov(result, 1);
4303 __ Ret();
4304
4305 __ Bind(&not_in_dictionary);
4306 __ Mov(result, 0);
4307 __ Ret();
4308}
4309
4310
4311template<class T>
4312static void CreateArrayDispatch(MacroAssembler* masm,
4313 AllocationSiteOverrideMode mode) {
4314 ASM_LOCATION("CreateArrayDispatch");
4315 if (mode == DISABLE_ALLOCATION_SITES) {
4316 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4317 __ TailCallStub(&stub);
4318
4319 } else if (mode == DONT_OVERRIDE) {
4320 Register kind = x3;
4321 int last_index =
4322 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4323 for (int i = 0; i <= last_index; ++i) {
4324 Label next;
4325 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
4326 // TODO(jbramley): Is this the best way to handle this? Can we make the
4327 // tail calls conditional, rather than hopping over each one?
4328 __ CompareAndBranch(kind, candidate_kind, ne, &next);
4329 T stub(masm->isolate(), candidate_kind);
4330 __ TailCallStub(&stub);
4331 __ Bind(&next);
4332 }
4333
4334 // If we reached this point there is a problem.
4335 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4336
4337 } else {
4338 UNREACHABLE();
4339 }
4340}
4341
4342
4343// TODO(jbramley): If this needs to be a special case, make it a proper template
4344// specialization, and not a separate function.
4345static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4346 AllocationSiteOverrideMode mode) {
4347 ASM_LOCATION("CreateArrayDispatchOneArgument");
4348 // x0 - argc
4349 // x1 - constructor?
4350 // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4351 // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4352 // sp[0] - last argument
4353
4354 Register allocation_site = x2;
4355 Register kind = x3;
4356
4357 Label normal_sequence;
4358 if (mode == DONT_OVERRIDE) {
4359 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4360 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4361 STATIC_ASSERT(FAST_ELEMENTS == 2);
4362 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4363 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4364 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4365
4366 // Is the low bit set? If so, the array is holey.
4367 __ Tbnz(kind, 0, &normal_sequence);
4368 }
4369
4370 // Look at the last argument.
4371 // TODO(jbramley): What does a 0 argument represent?
4372 __ Peek(x10, 0);
4373 __ Cbz(x10, &normal_sequence);
4374
4375 if (mode == DISABLE_ALLOCATION_SITES) {
4376 ElementsKind initial = GetInitialFastElementsKind();
4377 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4378
4379 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4380 holey_initial,
4381 DISABLE_ALLOCATION_SITES);
4382 __ TailCallStub(&stub_holey);
4383
4384 __ Bind(&normal_sequence);
4385 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4386 initial,
4387 DISABLE_ALLOCATION_SITES);
4388 __ TailCallStub(&stub);
4389 } else if (mode == DONT_OVERRIDE) {
4390 // We are going to create a holey array, but our kind is non-holey.
4391 // Fix kind and retry (only if we have an allocation site in the slot).
4392 __ Orr(kind, kind, 1);
4393
4394 if (FLAG_debug_code) {
4395 __ Ldr(x10, FieldMemOperand(allocation_site, 0));
4396 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
4397 &normal_sequence);
4398 __ Assert(eq, kExpectedAllocationSite);
4399 }
4400
4401 // Save the resulting elements kind in type info. We can't just store 'kind'
4402 // in the AllocationSite::transition_info field because elements kind is
4403 // restricted to a portion of the field; upper bits need to be left alone.
4404 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4405 __ Ldr(x11, FieldMemOperand(allocation_site,
4406 AllocationSite::kTransitionInfoOffset));
4407 __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
4408 __ Str(x11, FieldMemOperand(allocation_site,
4409 AllocationSite::kTransitionInfoOffset));
4410
4411 __ Bind(&normal_sequence);
4412 int last_index =
4413 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4414 for (int i = 0; i <= last_index; ++i) {
4415 Label next;
4416 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
4417 __ CompareAndBranch(kind, candidate_kind, ne, &next);
4418 ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
4419 __ TailCallStub(&stub);
4420 __ Bind(&next);
4421 }
4422
4423 // If we reached this point there is a problem.
4424 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4425 } else {
4426 UNREACHABLE();
4427 }
4428}
4429
4430
4431template<class T>
4432static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4433 int to_index = GetSequenceIndexFromFastElementsKind(
4434 TERMINAL_FAST_ELEMENTS_KIND);
4435 for (int i = 0; i <= to_index; ++i) {
4436 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4437 T stub(isolate, kind);
4438 stub.GetCode();
4439 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4440 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4441 stub1.GetCode();
4442 }
4443 }
4444}
4445
4446
4447void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4448 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4449 isolate);
4450 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4451 isolate);
4452 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4453 isolate);
4454}
4455
4456
4457void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4458 Isolate* isolate) {
4459 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4460 for (int i = 0; i < 2; i++) {
4461 // For internal arrays we only need a few things
4462 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4463 stubh1.GetCode();
4464 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4465 stubh2.GetCode();
4466 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4467 stubh3.GetCode();
4468 }
4469}
4470
4471
4472void ArrayConstructorStub::GenerateDispatchToArrayStub(
4473 MacroAssembler* masm,
4474 AllocationSiteOverrideMode mode) {
4475 Register argc = x0;
4476 if (argument_count() == ANY) {
4477 Label zero_case, n_case;
4478 __ Cbz(argc, &zero_case);
4479 __ Cmp(argc, 1);
4480 __ B(ne, &n_case);
4481
4482 // One argument.
4483 CreateArrayDispatchOneArgument(masm, mode);
4484
4485 __ Bind(&zero_case);
4486 // No arguments.
4487 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4488
4489 __ Bind(&n_case);
4490 // N arguments.
4491 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4492
4493 } else if (argument_count() == NONE) {
4494 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4495 } else if (argument_count() == ONE) {
4496 CreateArrayDispatchOneArgument(masm, mode);
4497 } else if (argument_count() == MORE_THAN_ONE) {
4498 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4499 } else {
4500 UNREACHABLE();
4501 }
4502}
4503
4504
4505void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4506 ASM_LOCATION("ArrayConstructorStub::Generate");
4507 // ----------- S t a t e -------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004508 // -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004509 // -- x1 : constructor
4510 // -- x2 : AllocationSite or undefined
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004511 // -- x3 : new target
4512 // -- sp[0] : last argument
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004513 // -----------------------------------
4514 Register constructor = x1;
4515 Register allocation_site = x2;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004516 Register new_target = x3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004517
4518 if (FLAG_debug_code) {
4519 // The array construct code is only set for the global and natives
4520 // builtin Array functions which always have maps.
4521
4522 Label unexpected_map, map_ok;
4523 // Initial map for the builtin Array function should be a map.
4524 __ Ldr(x10, FieldMemOperand(constructor,
4525 JSFunction::kPrototypeOrInitialMapOffset));
4526 // Will both indicate a NULL and a Smi.
4527 __ JumpIfSmi(x10, &unexpected_map);
4528 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
4529 __ Bind(&unexpected_map);
4530 __ Abort(kUnexpectedInitialMapForArrayFunction);
4531 __ Bind(&map_ok);
4532
4533 // We should either have undefined in the allocation_site register or a
4534 // valid AllocationSite.
4535 __ AssertUndefinedOrAllocationSite(allocation_site, x10);
4536 }
4537
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004538 // Enter the context of the Array function.
4539 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
4540
4541 Label subclassing;
4542 __ Cmp(new_target, constructor);
4543 __ B(ne, &subclassing);
4544
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004545 Register kind = x3;
4546 Label no_info;
4547 // Get the elements kind and case on that.
4548 __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
4549
4550 __ Ldrsw(kind,
4551 UntagSmiFieldMemOperand(allocation_site,
4552 AllocationSite::kTransitionInfoOffset));
4553 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
4554 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4555
4556 __ Bind(&no_info);
4557 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004558
4559 // Subclassing support.
4560 __ Bind(&subclassing);
4561 switch (argument_count()) {
4562 case ANY:
4563 case MORE_THAN_ONE:
4564 __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
4565 __ Add(x0, x0, Operand(3));
4566 break;
4567 case NONE:
4568 __ Poke(constructor, 0 * kPointerSize);
4569 __ Mov(x0, Operand(3));
4570 break;
4571 case ONE:
4572 __ Poke(constructor, 1 * kPointerSize);
4573 __ Mov(x0, Operand(4));
4574 break;
4575 }
4576 __ Push(new_target, allocation_site);
4577 __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004578}
4579
4580
4581void InternalArrayConstructorStub::GenerateCase(
4582 MacroAssembler* masm, ElementsKind kind) {
4583 Label zero_case, n_case;
4584 Register argc = x0;
4585
4586 __ Cbz(argc, &zero_case);
4587 __ CompareAndBranch(argc, 1, ne, &n_case);
4588
4589 // One argument.
4590 if (IsFastPackedElementsKind(kind)) {
4591 Label packed_case;
4592
4593 // We might need to create a holey array; look at the first argument.
4594 __ Peek(x10, 0);
4595 __ Cbz(x10, &packed_case);
4596
4597 InternalArraySingleArgumentConstructorStub
4598 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4599 __ TailCallStub(&stub1_holey);
4600
4601 __ Bind(&packed_case);
4602 }
4603 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4604 __ TailCallStub(&stub1);
4605
4606 __ Bind(&zero_case);
4607 // No arguments.
4608 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4609 __ TailCallStub(&stub0);
4610
4611 __ Bind(&n_case);
4612 // N arguments.
4613 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4614 __ TailCallStub(&stubN);
4615}
4616
4617
4618void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4619 // ----------- S t a t e -------------
4620 // -- x0 : argc
4621 // -- x1 : constructor
4622 // -- sp[0] : return address
4623 // -- sp[4] : last argument
4624 // -----------------------------------
4625
4626 Register constructor = x1;
4627
4628 if (FLAG_debug_code) {
4629 // The array construct code is only set for the global and natives
4630 // builtin Array functions which always have maps.
4631
4632 Label unexpected_map, map_ok;
4633 // Initial map for the builtin Array function should be a map.
4634 __ Ldr(x10, FieldMemOperand(constructor,
4635 JSFunction::kPrototypeOrInitialMapOffset));
4636 // Will both indicate a NULL and a Smi.
4637 __ JumpIfSmi(x10, &unexpected_map);
4638 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
4639 __ Bind(&unexpected_map);
4640 __ Abort(kUnexpectedInitialMapForArrayFunction);
4641 __ Bind(&map_ok);
4642 }
4643
4644 Register kind = w3;
4645 // Figure out the right elements kind
4646 __ Ldr(x10, FieldMemOperand(constructor,
4647 JSFunction::kPrototypeOrInitialMapOffset));
4648
4649 // Retrieve elements_kind from map.
4650 __ LoadElementsKindFromMap(kind, x10);
4651
4652 if (FLAG_debug_code) {
4653 Label done;
4654 __ Cmp(x3, FAST_ELEMENTS);
4655 __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
4656 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4657 }
4658
4659 Label fast_elements_case;
4660 __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
4661 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4662
4663 __ Bind(&fast_elements_case);
4664 GenerateCase(masm, FAST_ELEMENTS);
4665}
4666
4667
Ben Murdoch097c5b22016-05-18 11:27:45 +01004668void FastNewObjectStub::Generate(MacroAssembler* masm) {
4669 // ----------- S t a t e -------------
4670 // -- x1 : target
4671 // -- x3 : new target
4672 // -- cp : context
4673 // -- lr : return address
4674 // -----------------------------------
4675 __ AssertFunction(x1);
4676 __ AssertReceiver(x3);
4677
4678 // Verify that the new target is a JSFunction.
4679 Label new_object;
4680 __ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
4681
4682 // Load the initial map and verify that it's in fact a map.
4683 __ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
4684 __ JumpIfSmi(x2, &new_object);
4685 __ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
4686
4687 // Fall back to runtime if the target differs from the new target's
4688 // initial map constructor.
4689 __ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
4690 __ CompareAndBranch(x0, x1, ne, &new_object);
4691
4692 // Allocate the JSObject on the heap.
4693 Label allocate, done_allocate;
4694 __ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
4695 __ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
4696 __ Bind(&done_allocate);
4697
4698 // Initialize the JSObject fields.
Ben Murdoch097c5b22016-05-18 11:27:45 +01004699 STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
Ben Murdochc5610432016-08-08 18:44:38 +01004700 __ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +01004701 __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
4702 STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
4703 STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
Ben Murdochc5610432016-08-08 18:44:38 +01004704 __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
4705 __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +01004706 STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
Ben Murdochc5610432016-08-08 18:44:38 +01004707 __ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
Ben Murdoch097c5b22016-05-18 11:27:45 +01004708
4709 // ----------- S t a t e -------------
Ben Murdochc5610432016-08-08 18:44:38 +01004710 // -- x0 : result (tagged)
Ben Murdoch097c5b22016-05-18 11:27:45 +01004711 // -- x1 : result fields (untagged)
4712 // -- x5 : result end (untagged)
4713 // -- x2 : initial map
4714 // -- cp : context
4715 // -- lr : return address
4716 // -----------------------------------
4717
4718 // Perform in-object slack tracking if requested.
4719 Label slack_tracking;
4720 STATIC_ASSERT(Map::kNoSlackTracking == 0);
4721 __ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
4722 __ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
4723 __ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
4724 &slack_tracking);
4725 {
4726 // Initialize all in-object fields with undefined.
4727 __ InitializeFieldsWithFiller(x1, x5, x6);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004728 __ Ret();
4729 }
4730 __ Bind(&slack_tracking);
4731 {
4732 // Decrease generous allocation count.
4733 STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
4734 __ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
4735 __ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
4736
4737 // Initialize the in-object fields with undefined.
4738 __ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
4739 __ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
4740 __ InitializeFieldsWithFiller(x1, x4, x6);
4741
4742 // Initialize the remaining (reserved) fields with one pointer filler map.
4743 __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
4744 __ InitializeFieldsWithFiller(x1, x5, x6);
4745
Ben Murdoch097c5b22016-05-18 11:27:45 +01004746 // Check if we can finalize the instance size.
4747 Label finalize;
4748 STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
4749 __ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
4750 __ Ret();
4751
4752 // Finalize the instance size.
4753 __ Bind(&finalize);
4754 {
4755 FrameScope scope(masm, StackFrame::INTERNAL);
4756 __ Push(x0, x2);
4757 __ CallRuntime(Runtime::kFinalizeInstanceSize);
4758 __ Pop(x0);
4759 }
4760 __ Ret();
4761 }
4762
4763 // Fall back to %AllocateInNewSpace.
4764 __ Bind(&allocate);
4765 {
4766 FrameScope scope(masm, StackFrame::INTERNAL);
4767 STATIC_ASSERT(kSmiTag == 0);
4768 STATIC_ASSERT(kSmiTagSize == 1);
4769 __ Mov(x4,
4770 Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
4771 __ Push(x2, x4);
4772 __ CallRuntime(Runtime::kAllocateInNewSpace);
4773 __ Pop(x2);
4774 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01004775 __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
4776 __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
Ben Murdochc5610432016-08-08 18:44:38 +01004777 STATIC_ASSERT(kHeapObjectTag == 1);
4778 __ Sub(x5, x5, kHeapObjectTag); // Subtract the tag from end.
Ben Murdoch097c5b22016-05-18 11:27:45 +01004779 __ B(&done_allocate);
4780
4781 // Fall back to %NewObject.
4782 __ Bind(&new_object);
4783 __ Push(x1, x3);
4784 __ TailCallRuntime(Runtime::kNewObject);
4785}
4786
4787
4788void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
4789 // ----------- S t a t e -------------
4790 // -- x1 : function
4791 // -- cp : context
4792 // -- fp : frame pointer
4793 // -- lr : return address
4794 // -----------------------------------
4795 __ AssertFunction(x1);
4796
Ben Murdochc5610432016-08-08 18:44:38 +01004797 // Make x2 point to the JavaScript frame.
4798 __ Mov(x2, fp);
4799 if (skip_stub_frame()) {
4800 // For Ignition we need to skip the handler/stub frame to reach the
4801 // JavaScript frame for the function.
Ben Murdoch097c5b22016-05-18 11:27:45 +01004802 __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
Ben Murdochc5610432016-08-08 18:44:38 +01004803 }
4804 if (FLAG_debug_code) {
4805 Label ok;
Ben Murdochda12d292016-06-02 14:46:10 +01004806 __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +01004807 __ Cmp(x3, x1);
Ben Murdochc5610432016-08-08 18:44:38 +01004808 __ B(eq, &ok);
4809 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4810 __ Bind(&ok);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004811 }
4812
4813 // Check if we have rest parameters (only possible if we have an
4814 // arguments adaptor frame below the function frame).
4815 Label no_rest_parameters;
Ben Murdochda12d292016-06-02 14:46:10 +01004816 __ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
4817 __ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +01004818 __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
4819 __ B(ne, &no_rest_parameters);
4820
4821 // Check if the arguments adaptor frame contains more arguments than
4822 // specified by the function's internal formal parameter count.
4823 Label rest_parameters;
4824 __ Ldrsw(x0, UntagSmiMemOperand(
4825 x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4826 __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
4827 __ Ldrsw(
4828 x1, FieldMemOperand(x1, SharedFunctionInfo::kFormalParameterCountOffset));
4829 __ Subs(x0, x0, x1);
4830 __ B(gt, &rest_parameters);
4831
4832 // Return an empty rest parameter array.
4833 __ Bind(&no_rest_parameters);
4834 {
4835 // ----------- S t a t e -------------
4836 // -- cp : context
4837 // -- lr : return address
4838 // -----------------------------------
4839
4840 // Allocate an empty rest parameter array.
4841 Label allocate, done_allocate;
Ben Murdochc5610432016-08-08 18:44:38 +01004842 __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, NO_ALLOCATION_FLAGS);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004843 __ Bind(&done_allocate);
4844
4845 // Setup the rest parameter array in x0.
4846 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
4847 __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
4848 __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
4849 __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
4850 __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
4851 __ Mov(x1, Smi::FromInt(0));
4852 __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
4853 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4854 __ Ret();
4855
4856 // Fall back to %AllocateInNewSpace.
4857 __ Bind(&allocate);
4858 {
4859 FrameScope scope(masm, StackFrame::INTERNAL);
4860 __ Push(Smi::FromInt(JSArray::kSize));
4861 __ CallRuntime(Runtime::kAllocateInNewSpace);
4862 }
4863 __ B(&done_allocate);
4864 }
4865
4866 __ Bind(&rest_parameters);
4867 {
4868 // Compute the pointer to the first rest parameter (skippping the receiver).
4869 __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
4870 __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
4871
4872 // ----------- S t a t e -------------
4873 // -- cp : context
4874 // -- x0 : number of rest parameters
4875 // -- x2 : pointer to first rest parameters
4876 // -- lr : return address
4877 // -----------------------------------
4878
4879 // Allocate space for the rest parameter array plus the backing store.
4880 Label allocate, done_allocate;
4881 __ Mov(x1, JSArray::kSize + FixedArray::kHeaderSize);
4882 __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
Ben Murdochc5610432016-08-08 18:44:38 +01004883 __ Allocate(x1, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004884 __ Bind(&done_allocate);
4885
4886 // Compute arguments.length in x6.
4887 __ SmiTag(x6, x0);
4888
4889 // Setup the elements array in x3.
4890 __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
4891 __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
4892 __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
4893 __ Add(x4, x3, FixedArray::kHeaderSize);
4894 {
4895 Label loop, done_loop;
4896 __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
4897 __ Bind(&loop);
4898 __ Cmp(x4, x0);
4899 __ B(eq, &done_loop);
4900 __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
4901 __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
4902 __ Sub(x2, x2, Operand(1 * kPointerSize));
4903 __ Add(x4, x4, Operand(1 * kPointerSize));
4904 __ B(&loop);
4905 __ Bind(&done_loop);
4906 }
4907
4908 // Setup the rest parameter array in x0.
4909 __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
4910 __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
4911 __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
4912 __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
4913 __ Str(x3, FieldMemOperand(x0, JSArray::kElementsOffset));
4914 __ Str(x6, FieldMemOperand(x0, JSArray::kLengthOffset));
4915 STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4916 __ Ret();
4917
4918 // Fall back to %AllocateInNewSpace.
4919 __ Bind(&allocate);
4920 {
4921 FrameScope scope(masm, StackFrame::INTERNAL);
4922 __ SmiTag(x0);
4923 __ SmiTag(x1);
4924 __ Push(x0, x2, x1);
4925 __ CallRuntime(Runtime::kAllocateInNewSpace);
4926 __ Mov(x3, x0);
4927 __ Pop(x2, x0);
4928 __ SmiUntag(x0);
4929 }
4930 __ B(&done_allocate);
4931 }
4932}
4933
4934
4935void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
4936 // ----------- S t a t e -------------
4937 // -- x1 : function
4938 // -- cp : context
4939 // -- fp : frame pointer
4940 // -- lr : return address
4941 // -----------------------------------
4942 __ AssertFunction(x1);
4943
Ben Murdochc5610432016-08-08 18:44:38 +01004944 // Make x6 point to the JavaScript frame.
4945 __ Mov(x6, fp);
4946 if (skip_stub_frame()) {
4947 // For Ignition we need to skip the handler/stub frame to reach the
4948 // JavaScript frame for the function.
4949 __ Ldr(x6, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
4950 }
4951 if (FLAG_debug_code) {
4952 Label ok;
4953 __ Ldr(x3, MemOperand(x6, StandardFrameConstants::kFunctionOffset));
4954 __ Cmp(x3, x1);
4955 __ B(eq, &ok);
4956 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4957 __ Bind(&ok);
4958 }
4959
Ben Murdoch097c5b22016-05-18 11:27:45 +01004960 // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
4961 __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
4962 __ Ldrsw(
4963 x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdochc5610432016-08-08 18:44:38 +01004964 __ Add(x3, x6, Operand(x2, LSL, kPointerSizeLog2));
Ben Murdoch097c5b22016-05-18 11:27:45 +01004965 __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
4966 __ SmiTag(x2);
4967
4968 // x1 : function
4969 // x2 : number of parameters (tagged)
4970 // x3 : parameters pointer
Ben Murdochc5610432016-08-08 18:44:38 +01004971 // x6 : JavaScript frame pointer
Ben Murdoch097c5b22016-05-18 11:27:45 +01004972 //
4973 // Returns pointer to result object in x0.
4974
4975 // Make an untagged copy of the parameter count.
4976 // Note: arg_count_smi is an alias of param_count_smi.
4977 Register function = x1;
4978 Register arg_count_smi = x2;
4979 Register param_count_smi = x2;
4980 Register recv_arg = x3;
4981 Register param_count = x7;
4982 __ SmiUntag(param_count, param_count_smi);
4983
4984 // Check if the calling frame is an arguments adaptor frame.
4985 Register caller_fp = x11;
4986 Register caller_ctx = x12;
4987 Label runtime;
4988 Label adaptor_frame, try_allocate;
Ben Murdochc5610432016-08-08 18:44:38 +01004989 __ Ldr(caller_fp, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01004990 __ Ldr(
4991 caller_ctx,
4992 MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +01004993 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
4994 __ B(eq, &adaptor_frame);
4995
4996 // No adaptor, parameter count = argument count.
4997
4998 // x1 function function pointer
4999 // x2 arg_count_smi number of function arguments (smi)
5000 // x3 recv_arg pointer to receiver arguments
5001 // x4 mapped_params number of mapped params, min(params, args) (uninit)
5002 // x7 param_count number of function parameters
5003 // x11 caller_fp caller's frame pointer
5004 // x14 arg_count number of function arguments (uninit)
5005
5006 Register arg_count = x14;
5007 Register mapped_params = x4;
5008 __ Mov(arg_count, param_count);
5009 __ Mov(mapped_params, param_count);
5010 __ B(&try_allocate);
5011
5012 // We have an adaptor frame. Patch the parameters pointer.
5013 __ Bind(&adaptor_frame);
5014 __ Ldr(arg_count_smi,
5015 MemOperand(caller_fp,
5016 ArgumentsAdaptorFrameConstants::kLengthOffset));
5017 __ SmiUntag(arg_count, arg_count_smi);
5018 __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
5019 __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
5020
5021 // Compute the mapped parameter count = min(param_count, arg_count)
5022 __ Cmp(param_count, arg_count);
5023 __ Csel(mapped_params, param_count, arg_count, lt);
5024
5025 __ Bind(&try_allocate);
5026
5027 // x0 alloc_obj pointer to allocated objects: param map, backing
5028 // store, arguments (uninit)
5029 // x1 function function pointer
5030 // x2 arg_count_smi number of function arguments (smi)
5031 // x3 recv_arg pointer to receiver arguments
5032 // x4 mapped_params number of mapped parameters, min(params, args)
5033 // x7 param_count number of function parameters
5034 // x10 size size of objects to allocate (uninit)
5035 // x14 arg_count number of function arguments
5036
5037 // Compute the size of backing store, parameter map, and arguments object.
5038 // 1. Parameter map, has two extra words containing context and backing
5039 // store.
5040 const int kParameterMapHeaderSize =
5041 FixedArray::kHeaderSize + 2 * kPointerSize;
5042
5043 // Calculate the parameter map size, assuming it exists.
5044 Register size = x10;
5045 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
5046 __ Add(size, size, kParameterMapHeaderSize);
5047
5048 // If there are no mapped parameters, set the running size total to zero.
5049 // Otherwise, use the parameter map size calculated earlier.
5050 __ Cmp(mapped_params, 0);
5051 __ CzeroX(size, eq);
5052
5053 // 2. Add the size of the backing store and arguments object.
5054 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
5055 __ Add(size, size, FixedArray::kHeaderSize + JSSloppyArgumentsObject::kSize);
5056
5057 // Do the allocation of all three objects in one go. Assign this to x0, as it
5058 // will be returned to the caller.
5059 Register alloc_obj = x0;
Ben Murdochc5610432016-08-08 18:44:38 +01005060 __ Allocate(size, alloc_obj, x11, x12, &runtime, NO_ALLOCATION_FLAGS);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005061
5062 // Get the arguments boilerplate from the current (global) context.
5063
5064 // x0 alloc_obj pointer to allocated objects (param map, backing
5065 // store, arguments)
5066 // x1 function function pointer
5067 // x2 arg_count_smi number of function arguments (smi)
5068 // x3 recv_arg pointer to receiver arguments
5069 // x4 mapped_params number of mapped parameters, min(params, args)
5070 // x7 param_count number of function parameters
5071 // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
5072 // x14 arg_count number of function arguments
5073
5074 Register global_ctx = x10;
5075 Register sloppy_args_map = x11;
5076 Register aliased_args_map = x10;
5077 __ Ldr(global_ctx, NativeContextMemOperand());
5078
5079 __ Ldr(sloppy_args_map,
5080 ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
5081 __ Ldr(
5082 aliased_args_map,
5083 ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
5084 __ Cmp(mapped_params, 0);
5085 __ CmovX(sloppy_args_map, aliased_args_map, ne);
5086
5087 // Copy the JS object part.
5088 __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
5089 __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
5090 __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
5091 __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
5092
5093 // Set up the callee in-object property.
5094 __ AssertNotSmi(function);
5095 __ Str(function,
5096 FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kCalleeOffset));
5097
5098 // Use the length and set that as an in-object property.
5099 __ Str(arg_count_smi,
5100 FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kLengthOffset));
5101
5102 // Set up the elements pointer in the allocated arguments object.
5103 // If we allocated a parameter map, "elements" will point there, otherwise
5104 // it will point to the backing store.
5105
5106 // x0 alloc_obj pointer to allocated objects (param map, backing
5107 // store, arguments)
5108 // x1 function function pointer
5109 // x2 arg_count_smi number of function arguments (smi)
5110 // x3 recv_arg pointer to receiver arguments
5111 // x4 mapped_params number of mapped parameters, min(params, args)
5112 // x5 elements pointer to parameter map or backing store (uninit)
5113 // x6 backing_store pointer to backing store (uninit)
5114 // x7 param_count number of function parameters
5115 // x14 arg_count number of function arguments
5116
5117 Register elements = x5;
5118 __ Add(elements, alloc_obj, JSSloppyArgumentsObject::kSize);
5119 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
5120
5121 // Initialize parameter map. If there are no mapped arguments, we're done.
5122 Label skip_parameter_map;
5123 __ Cmp(mapped_params, 0);
5124 // Set up backing store address, because it is needed later for filling in
5125 // the unmapped arguments.
5126 Register backing_store = x6;
5127 __ CmovX(backing_store, elements, eq);
5128 __ B(eq, &skip_parameter_map);
5129
5130 __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
5131 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
5132 __ Add(x10, mapped_params, 2);
5133 __ SmiTag(x10);
5134 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
5135 __ Str(cp, FieldMemOperand(elements,
5136 FixedArray::kHeaderSize + 0 * kPointerSize));
5137 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
5138 __ Add(x10, x10, kParameterMapHeaderSize);
5139 __ Str(x10, FieldMemOperand(elements,
5140 FixedArray::kHeaderSize + 1 * kPointerSize));
5141
5142 // Copy the parameter slots and the holes in the arguments.
5143 // We need to fill in mapped_parameter_count slots. Then index the context,
5144 // where parameters are stored in reverse order, at:
5145 //
5146 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
5147 //
5148 // The mapped parameter thus needs to get indices:
5149 //
5150 // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
5151 // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
5152 //
5153 // We loop from right to left.
5154
5155 // x0 alloc_obj pointer to allocated objects (param map, backing
5156 // store, arguments)
5157 // x1 function function pointer
5158 // x2 arg_count_smi number of function arguments (smi)
5159 // x3 recv_arg pointer to receiver arguments
5160 // x4 mapped_params number of mapped parameters, min(params, args)
5161 // x5 elements pointer to parameter map or backing store (uninit)
5162 // x6 backing_store pointer to backing store (uninit)
5163 // x7 param_count number of function parameters
5164 // x11 loop_count parameter loop counter (uninit)
5165 // x12 index parameter index (smi, uninit)
5166 // x13 the_hole hole value (uninit)
5167 // x14 arg_count number of function arguments
5168
5169 Register loop_count = x11;
5170 Register index = x12;
5171 Register the_hole = x13;
5172 Label parameters_loop, parameters_test;
5173 __ Mov(loop_count, mapped_params);
5174 __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
5175 __ Sub(index, index, mapped_params);
5176 __ SmiTag(index);
5177 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
5178 __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
5179 __ Add(backing_store, backing_store, kParameterMapHeaderSize);
5180
5181 __ B(&parameters_test);
5182
5183 __ Bind(&parameters_loop);
5184 __ Sub(loop_count, loop_count, 1);
5185 __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
5186 __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
5187 __ Str(index, MemOperand(elements, x10));
5188 __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
5189 __ Str(the_hole, MemOperand(backing_store, x10));
5190 __ Add(index, index, Smi::FromInt(1));
5191 __ Bind(&parameters_test);
5192 __ Cbnz(loop_count, &parameters_loop);
5193
5194 __ Bind(&skip_parameter_map);
5195 // Copy arguments header and remaining slots (if there are any.)
5196 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
5197 __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
5198 __ Str(arg_count_smi, FieldMemOperand(backing_store,
5199 FixedArray::kLengthOffset));
5200
5201 // x0 alloc_obj pointer to allocated objects (param map, backing
5202 // store, arguments)
5203 // x1 function function pointer
5204 // x2 arg_count_smi number of function arguments (smi)
5205 // x3 recv_arg pointer to receiver arguments
5206 // x4 mapped_params number of mapped parameters, min(params, args)
5207 // x6 backing_store pointer to backing store (uninit)
5208 // x14 arg_count number of function arguments
5209
5210 Label arguments_loop, arguments_test;
5211 __ Mov(x10, mapped_params);
5212 __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
5213 __ B(&arguments_test);
5214
5215 __ Bind(&arguments_loop);
5216 __ Sub(recv_arg, recv_arg, kPointerSize);
5217 __ Ldr(x11, MemOperand(recv_arg));
5218 __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
5219 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
5220 __ Add(x10, x10, 1);
5221
5222 __ Bind(&arguments_test);
5223 __ Cmp(x10, arg_count);
5224 __ B(lt, &arguments_loop);
5225
5226 __ Ret();
5227
5228 // Do the runtime call to allocate the arguments object.
5229 __ Bind(&runtime);
5230 __ Push(function, recv_arg, arg_count_smi);
5231 __ TailCallRuntime(Runtime::kNewSloppyArguments);
5232}
5233
5234
5235void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
5236 // ----------- S t a t e -------------
5237 // -- x1 : function
5238 // -- cp : context
5239 // -- fp : frame pointer
5240 // -- lr : return address
5241 // -----------------------------------
5242 __ AssertFunction(x1);
5243
Ben Murdochc5610432016-08-08 18:44:38 +01005244 // Make x2 point to the JavaScript frame.
5245 __ Mov(x2, fp);
5246 if (skip_stub_frame()) {
5247 // For Ignition we need to skip the handler/stub frame to reach the
5248 // JavaScript frame for the function.
Ben Murdoch097c5b22016-05-18 11:27:45 +01005249 __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
Ben Murdochc5610432016-08-08 18:44:38 +01005250 }
5251 if (FLAG_debug_code) {
5252 Label ok;
Ben Murdochda12d292016-06-02 14:46:10 +01005253 __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +01005254 __ Cmp(x3, x1);
Ben Murdochc5610432016-08-08 18:44:38 +01005255 __ B(eq, &ok);
5256 __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
5257 __ Bind(&ok);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005258 }
5259
5260 // Check if we have an arguments adaptor frame below the function frame.
5261 Label arguments_adaptor, arguments_done;
5262 __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01005263 __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
Ben Murdoch097c5b22016-05-18 11:27:45 +01005264 __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
5265 __ B(eq, &arguments_adaptor);
5266 {
5267 __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
5268 __ Ldrsw(x0, FieldMemOperand(
5269 x1, SharedFunctionInfo::kFormalParameterCountOffset));
5270 __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
5271 __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
5272 }
5273 __ B(&arguments_done);
5274 __ Bind(&arguments_adaptor);
5275 {
5276 __ Ldrsw(x0, UntagSmiMemOperand(
5277 x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
5278 __ Add(x2, x3, Operand(x0, LSL, kPointerSizeLog2));
5279 __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
5280 }
5281 __ Bind(&arguments_done);
5282
5283 // ----------- S t a t e -------------
5284 // -- cp : context
5285 // -- x0 : number of rest parameters
5286 // -- x2 : pointer to first rest parameters
5287 // -- lr : return address
5288 // -----------------------------------
5289
5290 // Allocate space for the strict arguments object plus the backing store.
5291 Label allocate, done_allocate;
5292 __ Mov(x1, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
5293 __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
Ben Murdochc5610432016-08-08 18:44:38 +01005294 __ Allocate(x1, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005295 __ Bind(&done_allocate);
5296
5297 // Compute arguments.length in x6.
5298 __ SmiTag(x6, x0);
5299
5300 // Setup the elements array in x3.
5301 __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
5302 __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
5303 __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
5304 __ Add(x4, x3, FixedArray::kHeaderSize);
5305 {
5306 Label loop, done_loop;
5307 __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
5308 __ Bind(&loop);
5309 __ Cmp(x4, x0);
5310 __ B(eq, &done_loop);
5311 __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
5312 __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
5313 __ Sub(x2, x2, Operand(1 * kPointerSize));
5314 __ Add(x4, x4, Operand(1 * kPointerSize));
5315 __ B(&loop);
5316 __ Bind(&done_loop);
5317 }
5318
5319 // Setup the strict arguments object in x0.
5320 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, x1);
5321 __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kMapOffset));
5322 __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
5323 __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kPropertiesOffset));
5324 __ Str(x3, FieldMemOperand(x0, JSStrictArgumentsObject::kElementsOffset));
5325 __ Str(x6, FieldMemOperand(x0, JSStrictArgumentsObject::kLengthOffset));
5326 STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
5327 __ Ret();
5328
5329 // Fall back to %AllocateInNewSpace.
5330 __ Bind(&allocate);
5331 {
5332 FrameScope scope(masm, StackFrame::INTERNAL);
5333 __ SmiTag(x0);
5334 __ SmiTag(x1);
5335 __ Push(x0, x2, x1);
5336 __ CallRuntime(Runtime::kAllocateInNewSpace);
5337 __ Mov(x3, x0);
5338 __ Pop(x2, x0);
5339 __ SmiUntag(x0);
5340 }
5341 __ B(&done_allocate);
5342}
5343
5344
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005345void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
5346 Register context = cp;
5347 Register result = x0;
5348 Register slot = x2;
5349 Label slow_case;
5350
5351 // Go up the context chain to the script context.
5352 for (int i = 0; i < depth(); ++i) {
5353 __ Ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
5354 context = result;
5355 }
5356
5357 // Load the PropertyCell value at the specified slot.
5358 __ Add(result, context, Operand(slot, LSL, kPointerSizeLog2));
5359 __ Ldr(result, ContextMemOperand(result));
5360 __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
5361
5362 // If the result is not the_hole, return. Otherwise, handle in the runtime.
5363 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &slow_case);
5364 __ Ret();
5365
5366 // Fallback to runtime.
5367 __ Bind(&slow_case);
5368 __ SmiTag(slot);
5369 __ Push(slot);
5370 __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
5371}
5372
5373
5374void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
5375 Register context = cp;
5376 Register value = x0;
5377 Register slot = x2;
5378 Register context_temp = x10;
5379 Register cell = x10;
5380 Register cell_details = x11;
5381 Register cell_value = x12;
5382 Register cell_value_map = x13;
5383 Register value_map = x14;
5384 Label fast_heapobject_case, fast_smi_case, slow_case;
5385
5386 if (FLAG_debug_code) {
5387 __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
5388 __ Check(ne, kUnexpectedValue);
5389 }
5390
5391 // Go up the context chain to the script context.
5392 for (int i = 0; i < depth(); i++) {
5393 __ Ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
5394 context = context_temp;
5395 }
5396
5397 // Load the PropertyCell at the specified slot.
5398 __ Add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
5399 __ Ldr(cell, ContextMemOperand(cell));
5400
5401 // Load PropertyDetails for the cell (actually only the cell_type and kind).
5402 __ Ldr(cell_details,
5403 UntagSmiFieldMemOperand(cell, PropertyCell::kDetailsOffset));
5404 __ And(cell_details, cell_details,
5405 PropertyDetails::PropertyCellTypeField::kMask |
5406 PropertyDetails::KindField::kMask |
5407 PropertyDetails::kAttributesReadOnlyMask);
5408
5409 // Check if PropertyCell holds mutable data.
5410 Label not_mutable_data;
5411 __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5412 PropertyCellType::kMutable) |
5413 PropertyDetails::KindField::encode(kData));
5414 __ B(ne, &not_mutable_data);
5415 __ JumpIfSmi(value, &fast_smi_case);
5416 __ Bind(&fast_heapobject_case);
5417 __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5418 // RecordWriteField clobbers the value register, so we copy it before the
5419 // call.
5420 __ Mov(x11, value);
5421 __ RecordWriteField(cell, PropertyCell::kValueOffset, x11, x12,
5422 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
5423 OMIT_SMI_CHECK);
5424 __ Ret();
5425
5426 __ Bind(&not_mutable_data);
5427 // Check if PropertyCell value matches the new value (relevant for Constant,
5428 // ConstantType and Undefined cells).
5429 Label not_same_value;
5430 __ Ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5431 __ Cmp(cell_value, value);
5432 __ B(ne, &not_same_value);
5433
5434 // Make sure the PropertyCell is not marked READ_ONLY.
5435 __ Tst(cell_details, PropertyDetails::kAttributesReadOnlyMask);
5436 __ B(ne, &slow_case);
5437
5438 if (FLAG_debug_code) {
5439 Label done;
5440 // This can only be true for Constant, ConstantType and Undefined cells,
5441 // because we never store the_hole via this stub.
5442 __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5443 PropertyCellType::kConstant) |
5444 PropertyDetails::KindField::encode(kData));
5445 __ B(eq, &done);
5446 __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5447 PropertyCellType::kConstantType) |
5448 PropertyDetails::KindField::encode(kData));
5449 __ B(eq, &done);
5450 __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5451 PropertyCellType::kUndefined) |
5452 PropertyDetails::KindField::encode(kData));
5453 __ Check(eq, kUnexpectedValue);
5454 __ Bind(&done);
5455 }
5456 __ Ret();
5457 __ Bind(&not_same_value);
5458
5459 // Check if PropertyCell contains data with constant type (and is not
5460 // READ_ONLY).
5461 __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
5462 PropertyCellType::kConstantType) |
5463 PropertyDetails::KindField::encode(kData));
5464 __ B(ne, &slow_case);
5465
5466 // Now either both old and new values must be smis or both must be heap
5467 // objects with same map.
5468 Label value_is_heap_object;
5469 __ JumpIfNotSmi(value, &value_is_heap_object);
5470 __ JumpIfNotSmi(cell_value, &slow_case);
5471 // Old and new values are smis, no need for a write barrier here.
5472 __ Bind(&fast_smi_case);
5473 __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5474 __ Ret();
5475
5476 __ Bind(&value_is_heap_object);
5477 __ JumpIfSmi(cell_value, &slow_case);
5478
5479 __ Ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
5480 __ Ldr(value_map, FieldMemOperand(value, HeapObject::kMapOffset));
5481 __ Cmp(cell_value_map, value_map);
5482 __ B(eq, &fast_heapobject_case);
5483
5484 // Fall back to the runtime.
5485 __ Bind(&slow_case);
5486 __ SmiTag(slot);
5487 __ Push(slot, value);
5488 __ TailCallRuntime(is_strict(language_mode())
5489 ? Runtime::kStoreGlobalViaContext_Strict
5490 : Runtime::kStoreGlobalViaContext_Sloppy);
5491}
5492
5493
5494// The number of register that CallApiFunctionAndReturn will need to save on
5495// the stack. The space for these registers need to be allocated in the
5496// ExitFrame before calling CallApiFunctionAndReturn.
5497static const int kCallApiFunctionSpillSpace = 4;
5498
5499
5500static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5501 return static_cast<int>(ref0.address() - ref1.address());
5502}
5503
5504
5505// Calls an API function. Allocates HandleScope, extracts returned value
5506// from handle and propagates exceptions.
5507// 'stack_space' is the space to be unwound on exit (includes the call JS
5508// arguments space and the additional space allocated for the fast call).
5509// 'spill_offset' is the offset from the stack pointer where
5510// CallApiFunctionAndReturn can spill registers.
5511static void CallApiFunctionAndReturn(
5512 MacroAssembler* masm, Register function_address,
5513 ExternalReference thunk_ref, int stack_space,
5514 MemOperand* stack_space_operand, int spill_offset,
5515 MemOperand return_value_operand, MemOperand* context_restore_operand) {
5516 ASM_LOCATION("CallApiFunctionAndReturn");
5517 Isolate* isolate = masm->isolate();
5518 ExternalReference next_address =
5519 ExternalReference::handle_scope_next_address(isolate);
5520 const int kNextOffset = 0;
5521 const int kLimitOffset = AddressOffset(
5522 ExternalReference::handle_scope_limit_address(isolate), next_address);
5523 const int kLevelOffset = AddressOffset(
5524 ExternalReference::handle_scope_level_address(isolate), next_address);
5525
5526 DCHECK(function_address.is(x1) || function_address.is(x2));
5527
5528 Label profiler_disabled;
5529 Label end_profiler_check;
5530 __ Mov(x10, ExternalReference::is_profiling_address(isolate));
5531 __ Ldrb(w10, MemOperand(x10));
5532 __ Cbz(w10, &profiler_disabled);
5533 __ Mov(x3, thunk_ref);
5534 __ B(&end_profiler_check);
5535
5536 __ Bind(&profiler_disabled);
5537 __ Mov(x3, function_address);
5538 __ Bind(&end_profiler_check);
5539
5540 // Save the callee-save registers we are going to use.
5541 // TODO(all): Is this necessary? ARM doesn't do it.
5542 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
5543 __ Poke(x19, (spill_offset + 0) * kXRegSize);
5544 __ Poke(x20, (spill_offset + 1) * kXRegSize);
5545 __ Poke(x21, (spill_offset + 2) * kXRegSize);
5546 __ Poke(x22, (spill_offset + 3) * kXRegSize);
5547
5548 // Allocate HandleScope in callee-save registers.
5549 // We will need to restore the HandleScope after the call to the API function,
5550 // by allocating it in callee-save registers they will be preserved by C code.
5551 Register handle_scope_base = x22;
5552 Register next_address_reg = x19;
5553 Register limit_reg = x20;
5554 Register level_reg = w21;
5555
5556 __ Mov(handle_scope_base, next_address);
5557 __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
5558 __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
5559 __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
5560 __ Add(level_reg, level_reg, 1);
5561 __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
5562
5563 if (FLAG_log_timer_events) {
5564 FrameScope frame(masm, StackFrame::MANUAL);
5565 __ PushSafepointRegisters();
5566 __ Mov(x0, ExternalReference::isolate_address(isolate));
5567 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5568 1);
5569 __ PopSafepointRegisters();
5570 }
5571
5572 // Native call returns to the DirectCEntry stub which redirects to the
5573 // return address pushed on stack (could have moved after GC).
5574 // DirectCEntry stub itself is generated early and never moves.
5575 DirectCEntryStub stub(isolate);
5576 stub.GenerateCall(masm, x3);
5577
5578 if (FLAG_log_timer_events) {
5579 FrameScope frame(masm, StackFrame::MANUAL);
5580 __ PushSafepointRegisters();
5581 __ Mov(x0, ExternalReference::isolate_address(isolate));
5582 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5583 1);
5584 __ PopSafepointRegisters();
5585 }
5586
5587 Label promote_scheduled_exception;
5588 Label delete_allocated_handles;
5589 Label leave_exit_frame;
5590 Label return_value_loaded;
5591
5592 // Load value from ReturnValue.
5593 __ Ldr(x0, return_value_operand);
5594 __ Bind(&return_value_loaded);
5595 // No more valid handles (the result handle was the last one). Restore
5596 // previous handle scope.
5597 __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
5598 if (__ emit_debug_code()) {
5599 __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
5600 __ Cmp(w1, level_reg);
5601 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
5602 }
5603 __ Sub(level_reg, level_reg, 1);
5604 __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
5605 __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
5606 __ Cmp(limit_reg, x1);
5607 __ B(ne, &delete_allocated_handles);
5608
5609 // Leave the API exit frame.
5610 __ Bind(&leave_exit_frame);
5611 // Restore callee-saved registers.
5612 __ Peek(x19, (spill_offset + 0) * kXRegSize);
5613 __ Peek(x20, (spill_offset + 1) * kXRegSize);
5614 __ Peek(x21, (spill_offset + 2) * kXRegSize);
5615 __ Peek(x22, (spill_offset + 3) * kXRegSize);
5616
5617 bool restore_context = context_restore_operand != NULL;
5618 if (restore_context) {
5619 __ Ldr(cp, *context_restore_operand);
5620 }
5621
5622 if (stack_space_operand != NULL) {
5623 __ Ldr(w2, *stack_space_operand);
5624 }
5625
5626 __ LeaveExitFrame(false, x1, !restore_context);
5627
5628 // Check if the function scheduled an exception.
5629 __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
5630 __ Ldr(x5, MemOperand(x5));
5631 __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
5632 &promote_scheduled_exception);
5633
5634 if (stack_space_operand != NULL) {
5635 __ Drop(x2, 1);
5636 } else {
5637 __ Drop(stack_space);
5638 }
5639 __ Ret();
5640
5641 // Re-throw by promoting a scheduled exception.
5642 __ Bind(&promote_scheduled_exception);
5643 __ TailCallRuntime(Runtime::kPromoteScheduledException);
5644
5645 // HandleScope limit has changed. Delete allocated extensions.
5646 __ Bind(&delete_allocated_handles);
5647 __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
5648 // Save the return value in a callee-save register.
5649 Register saved_result = x19;
5650 __ Mov(saved_result, x0);
5651 __ Mov(x0, ExternalReference::isolate_address(isolate));
5652 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5653 1);
5654 __ Mov(x0, saved_result);
5655 __ B(&leave_exit_frame);
5656}
5657
Ben Murdochda12d292016-06-02 14:46:10 +01005658void CallApiCallbackStub::Generate(MacroAssembler* masm) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005659 // ----------- S t a t e -------------
5660 // -- x0 : callee
5661 // -- x4 : call_data
5662 // -- x2 : holder
5663 // -- x1 : api_function_address
5664 // -- cp : context
5665 // --
5666 // -- sp[0] : last argument
5667 // -- ...
5668 // -- sp[(argc - 1) * 8] : first argument
5669 // -- sp[argc * 8] : receiver
5670 // -----------------------------------
5671
5672 Register callee = x0;
5673 Register call_data = x4;
5674 Register holder = x2;
5675 Register api_function_address = x1;
5676 Register context = cp;
5677
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005678 typedef FunctionCallbackArguments FCA;
5679
5680 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5681 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5682 STATIC_ASSERT(FCA::kDataIndex == 4);
5683 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5684 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5685 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5686 STATIC_ASSERT(FCA::kHolderIndex == 0);
Ben Murdochc5610432016-08-08 18:44:38 +01005687 STATIC_ASSERT(FCA::kNewTargetIndex == 7);
5688 STATIC_ASSERT(FCA::kArgsLength == 8);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005689
Ben Murdochc5610432016-08-08 18:44:38 +01005690 // FunctionCallbackArguments
5691
5692 // new target
5693 __ PushRoot(Heap::kUndefinedValueRootIndex);
5694
5695 // context, callee and call data.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005696 __ Push(context, callee, call_data);
5697
Ben Murdochda12d292016-06-02 14:46:10 +01005698 if (!is_lazy()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005699 // Load context from callee
5700 __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5701 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005702
Ben Murdochda12d292016-06-02 14:46:10 +01005703 if (!call_data_undefined()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005704 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
5705 }
5706 Register isolate_reg = x5;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005707 __ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005708
5709 // FunctionCallbackArguments:
5710 // return value, return value default, isolate, holder.
5711 __ Push(call_data, call_data, isolate_reg, holder);
5712
5713 // Prepare arguments.
5714 Register args = x6;
5715 __ Mov(args, masm->StackPointer());
5716
5717 // Allocate the v8::Arguments structure in the arguments' space, since it's
5718 // not controlled by GC.
Ben Murdochc5610432016-08-08 18:44:38 +01005719 const int kApiStackSpace = 3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005720
5721 // Allocate space for CallApiFunctionAndReturn can store some scratch
5722 // registeres on the stack.
5723 const int kCallApiFunctionSpillSpace = 4;
5724
5725 FrameScope frame_scope(masm, StackFrame::MANUAL);
5726 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5727
5728 DCHECK(!AreAliased(x0, api_function_address));
5729 // x0 = FunctionCallbackInfo&
5730 // Arguments is after the return address.
5731 __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
Ben Murdochda12d292016-06-02 14:46:10 +01005732 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
5733 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
5734 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
Ben Murdochc5610432016-08-08 18:44:38 +01005735 // FunctionCallbackInfo::length_ = argc
Ben Murdochda12d292016-06-02 14:46:10 +01005736 __ Mov(x10, argc());
Ben Murdochc5610432016-08-08 18:44:38 +01005737 __ Str(x10, MemOperand(x0, 2 * kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005738
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005739 ExternalReference thunk_ref =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005740 ExternalReference::invoke_function_callback(masm->isolate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005741
5742 AllowExternalCallThatCantCauseGC scope(masm);
5743 MemOperand context_restore_operand(
5744 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5745 // Stores return the first js argument
5746 int return_value_offset = 0;
Ben Murdochda12d292016-06-02 14:46:10 +01005747 if (is_store()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005748 return_value_offset = 2 + FCA::kArgsLength;
5749 } else {
5750 return_value_offset = 2 + FCA::kReturnValueOffset;
5751 }
5752 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005753 int stack_space = 0;
Ben Murdochc5610432016-08-08 18:44:38 +01005754 MemOperand length_operand =
5755 MemOperand(masm->StackPointer(), 3 * kPointerSize);
5756 MemOperand* stack_space_operand = &length_operand;
Ben Murdochda12d292016-06-02 14:46:10 +01005757 stack_space = argc() + FCA::kArgsLength + 1;
5758 stack_space_operand = NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005759
5760 const int spill_offset = 1 + kApiStackSpace;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005761 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5762 stack_space_operand, spill_offset,
5763 return_value_operand, &context_restore_operand);
5764}
5765
5766
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005767void CallApiGetterStub::Generate(MacroAssembler* masm) {
Ben Murdochc5610432016-08-08 18:44:38 +01005768 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
5769 // name below the exit frame to make GC aware of them.
5770 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
5771 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
5772 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
5773 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
5774 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
5775 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
5776 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
5777 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005778
Ben Murdochc5610432016-08-08 18:44:38 +01005779 Register receiver = ApiGetterDescriptor::ReceiverRegister();
5780 Register holder = ApiGetterDescriptor::HolderRegister();
5781 Register callback = ApiGetterDescriptor::CallbackRegister();
5782 Register scratch = x4;
5783 Register scratch2 = x5;
5784 Register scratch3 = x6;
5785 DCHECK(!AreAliased(receiver, holder, callback, scratch));
5786
5787 __ Push(receiver);
5788
5789 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5790 __ Mov(scratch2, Operand(ExternalReference::isolate_address(isolate())));
5791 __ Ldr(scratch3, FieldMemOperand(callback, AccessorInfo::kDataOffset));
5792 __ Push(scratch3, scratch, scratch, scratch2, holder);
5793 __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
5794 __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
5795 __ Push(scratch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005796
Ben Murdoch097c5b22016-05-18 11:27:45 +01005797 // v8::PropertyCallbackInfo::args_ array and name handle.
5798 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5799
5800 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005801 __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
Ben Murdoch097c5b22016-05-18 11:27:45 +01005802 __ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005803
5804 const int kApiStackSpace = 1;
5805
5806 // Allocate space for CallApiFunctionAndReturn can store some scratch
5807 // registeres on the stack.
5808 const int kCallApiFunctionSpillSpace = 4;
5809
5810 FrameScope frame_scope(masm, StackFrame::MANUAL);
5811 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5812
Ben Murdoch097c5b22016-05-18 11:27:45 +01005813 // Create v8::PropertyCallbackInfo object on the stack and initialize
5814 // it's args_ field.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005815 __ Poke(x1, 1 * kPointerSize);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005816 __ Add(x1, masm->StackPointer(), 1 * kPointerSize);
5817 // x1 = v8::PropertyCallbackInfo&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005818
5819 ExternalReference thunk_ref =
5820 ExternalReference::invoke_accessor_getter_callback(isolate());
5821
Ben Murdochc5610432016-08-08 18:44:38 +01005822 Register api_function_address = x2;
5823 __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
5824 __ Ldr(api_function_address,
5825 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
5826
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005827 const int spill_offset = 1 + kApiStackSpace;
Ben Murdoch097c5b22016-05-18 11:27:45 +01005828 // +3 is to skip prolog, return address and name handle.
5829 MemOperand return_value_operand(
5830 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005831 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5832 kStackUnwindSpace, NULL, spill_offset,
Ben Murdoch097c5b22016-05-18 11:27:45 +01005833 return_value_operand, NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005834}
5835
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005836#undef __
5837
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005838} // namespace internal
5839} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005840
5841#endif // V8_TARGET_ARCH_ARM64