blob: edd289900e978d6d63ce8bb13f076bb272f1fd8f [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/arm64/codegen-arm64.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#if V8_TARGET_ARCH_ARM64
8
9#include "src/arm64/simulator-arm64.h"
10#include "src/codegen.h"
11#include "src/macro-assembler.h"
12
13namespace v8 {
14namespace internal {
15
16#define __ ACCESS_MASM(masm)
17
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000018UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
19 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000020}
21
22
23// -------------------------------------------------------------------------
24// Platform-specific RuntimeCallHelper functions.
25
26void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
27 masm->EnterFrame(StackFrame::INTERNAL);
28 DCHECK(!masm->has_frame());
29 masm->set_has_frame(true);
30}
31
32
33void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
34 masm->LeaveFrame(StackFrame::INTERNAL);
35 DCHECK(masm->has_frame());
36 masm->set_has_frame(false);
37}
38
39
40// -------------------------------------------------------------------------
41// Code generators
42
43void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
44 MacroAssembler* masm,
45 Register receiver,
46 Register key,
47 Register value,
48 Register target_map,
49 AllocationSiteMode mode,
50 Label* allocation_memento_found) {
51 ASM_LOCATION(
52 "ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
53 DCHECK(!AreAliased(receiver, key, value, target_map));
54
55 if (mode == TRACK_ALLOCATION_SITE) {
56 DCHECK(allocation_memento_found != NULL);
57 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
58 allocation_memento_found);
59 }
60
61 // Set transitioned map.
62 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
63 __ RecordWriteField(receiver,
64 HeapObject::kMapOffset,
65 target_map,
66 x10,
67 kLRHasNotBeenSaved,
68 kDontSaveFPRegs,
69 EMIT_REMEMBERED_SET,
70 OMIT_SMI_CHECK);
71}
72
73
74void ElementsTransitionGenerator::GenerateSmiToDouble(
75 MacroAssembler* masm,
76 Register receiver,
77 Register key,
78 Register value,
79 Register target_map,
80 AllocationSiteMode mode,
81 Label* fail) {
82 ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
83 Label gc_required, only_change_map;
84 Register elements = x4;
85 Register length = x5;
86 Register array_size = x6;
87 Register array = x7;
88
89 Register scratch = x6;
90
91 // Verify input registers don't conflict with locals.
92 DCHECK(!AreAliased(receiver, key, value, target_map,
93 elements, length, array_size, array));
94
95 if (mode == TRACK_ALLOCATION_SITE) {
96 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
97 }
98
99 // Check for empty arrays, which only require a map transition and no changes
100 // to the backing store.
101 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
102 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
103
104 __ Push(lr);
105 __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
106 FixedArray::kLengthOffset));
107
108 // Allocate new FixedDoubleArray.
109 __ Lsl(array_size, length, kDoubleSizeLog2);
110 __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
111 __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
112 // Register array is non-tagged heap object.
113
114 // Set the destination FixedDoubleArray's length and map.
115 Register map_root = array_size;
116 __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
117 __ SmiTag(x11, length);
Ben Murdochc5610432016-08-08 18:44:38 +0100118 __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
119 __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000120
121 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
122 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
123 kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
124 OMIT_SMI_CHECK);
125
126 // Replace receiver's backing store with newly created FixedDoubleArray.
Ben Murdochc5610432016-08-08 18:44:38 +0100127 __ Move(x10, array);
128 __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
129 __ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch,
130 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
131 OMIT_SMI_CHECK);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000132
133 // Prepare for conversion loop.
134 Register src_elements = x10;
135 Register dst_elements = x11;
136 Register dst_end = x12;
137 __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
Ben Murdochc5610432016-08-08 18:44:38 +0100138 __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000139 __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
140
141 FPRegister nan_d = d1;
142 __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
143
144 Label entry, done;
145 __ B(&entry);
146
147 __ Bind(&only_change_map);
148 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
149 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
150 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
151 OMIT_SMI_CHECK);
152 __ B(&done);
153
154 // Call into runtime if GC is required.
155 __ Bind(&gc_required);
156 __ Pop(lr);
157 __ B(fail);
158
159 // Iterate over the array, copying and coverting smis to doubles. If an
160 // element is non-smi, write a hole to the destination.
161 {
162 Label loop;
163 __ Bind(&loop);
164 __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
165 __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
166 __ Tst(x13, kSmiTagMask);
167 __ Fcsel(d0, d0, nan_d, eq);
168 __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
169
170 __ Bind(&entry);
171 __ Cmp(dst_elements, dst_end);
172 __ B(lt, &loop);
173 }
174
175 __ Pop(lr);
176 __ Bind(&done);
177}
178
179
180void ElementsTransitionGenerator::GenerateDoubleToObject(
181 MacroAssembler* masm,
182 Register receiver,
183 Register key,
184 Register value,
185 Register target_map,
186 AllocationSiteMode mode,
187 Label* fail) {
188 ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
189 Register elements = x4;
190 Register array_size = x6;
191 Register array = x7;
192 Register length = x5;
193
194 // Verify input registers don't conflict with locals.
195 DCHECK(!AreAliased(receiver, key, value, target_map,
196 elements, array_size, array, length));
197
198 if (mode == TRACK_ALLOCATION_SITE) {
199 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
200 }
201
202 // Check for empty arrays, which only require a map transition and no changes
203 // to the backing store.
204 Label only_change_map;
205
206 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
207 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
208
209 __ Push(lr);
210 // TODO(all): These registers may not need to be pushed. Examine
211 // RecordWriteStub and check whether it's needed.
212 __ Push(target_map, receiver, key, value);
213 __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
214 FixedArray::kLengthOffset));
215 // Allocate new FixedArray.
216 Label gc_required;
217 __ Mov(array_size, FixedDoubleArray::kHeaderSize);
218 __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
219 __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
220
221 // Set destination FixedDoubleArray's length and map.
222 Register map_root = array_size;
223 __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
224 __ SmiTag(x11, length);
Ben Murdochc5610432016-08-08 18:44:38 +0100225 __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
226 __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227
228 // Prepare for conversion loop.
229 Register src_elements = x10;
230 Register dst_elements = x11;
231 Register dst_end = x12;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400232 Register the_hole = x14;
233 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000234 __ Add(src_elements, elements,
235 FixedDoubleArray::kHeaderSize - kHeapObjectTag);
Ben Murdochc5610432016-08-08 18:44:38 +0100236 __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000237 __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
238
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400239 // Allocating heap numbers in the loop below can fail and cause a jump to
240 // gc_required. We can't leave a partly initialized FixedArray behind,
241 // so pessimistically fill it with holes now.
242 Label initialization_loop, initialization_loop_entry;
243 __ B(&initialization_loop_entry);
244 __ bind(&initialization_loop);
245 __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
246 __ bind(&initialization_loop_entry);
247 __ Cmp(dst_elements, dst_end);
248 __ B(lt, &initialization_loop);
249
Ben Murdochc5610432016-08-08 18:44:38 +0100250 __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400251
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000252 Register heap_num_map = x15;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000253 __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
254
255 Label entry;
256 __ B(&entry);
257
258 // Call into runtime if GC is required.
259 __ Bind(&gc_required);
260 __ Pop(value, key, receiver, target_map);
261 __ Pop(lr);
262 __ B(fail);
263
264 {
265 Label loop, convert_hole;
266 __ Bind(&loop);
267 __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
268 __ Cmp(x13, kHoleNanInt64);
269 __ B(eq, &convert_hole);
270
271 // Non-hole double, copy value into a heap number.
272 Register heap_num = length;
273 Register scratch = array_size;
274 Register scratch2 = elements;
275 __ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
276 x13, heap_num_map);
277 __ Mov(x13, dst_elements);
278 __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
279 __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
280 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
281
282 __ B(&entry);
283
284 // Replace the-hole NaN with the-hole pointer.
285 __ Bind(&convert_hole);
286 __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
287
288 __ Bind(&entry);
289 __ Cmp(dst_elements, dst_end);
290 __ B(lt, &loop);
291 }
292
293 __ Pop(value, key, receiver, target_map);
294 // Replace receiver's backing store with newly created and filled FixedArray.
295 __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
296 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
297 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
298 OMIT_SMI_CHECK);
299 __ Pop(lr);
300
301 __ Bind(&only_change_map);
302 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
303 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
304 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
305 OMIT_SMI_CHECK);
306}
307
308
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000309CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
310 USE(isolate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000311 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
312 // The sequence of instructions that is patched out for aging code is the
313 // following boilerplate stack-building prologue that is found both in
314 // FUNCTION and OPTIMIZED_FUNCTION code:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000315 PatchingAssembler patcher(isolate, young_sequence_.start(),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000316 young_sequence_.length() / kInstructionSize);
317 // The young sequence is the frame setup code for FUNCTION code types. It is
318 // generated by FullCodeGenerator::Generate.
319 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
320
321#ifdef DEBUG
322 const int length = kCodeAgeStubEntryOffset / kInstructionSize;
323 DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000324 PatchingAssembler patcher_old(isolate, old_sequence_.start(), length);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000325 MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
326#endif
327}
328
329
330#ifdef DEBUG
331bool CodeAgingHelper::IsOld(byte* candidate) const {
332 return memcmp(candidate, old_sequence_.start(), kCodeAgeStubEntryOffset) == 0;
333}
334#endif
335
336
337bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
338 return MacroAssembler::IsYoungSequence(isolate, sequence);
339}
340
341
342void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
343 MarkingParity* parity) {
344 if (IsYoungSequence(isolate, sequence)) {
345 *age = kNoAgeCodeAge;
346 *parity = NO_MARKING_PARITY;
347 } else {
348 byte* target = sequence + kCodeAgeStubEntryOffset;
349 Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
350 GetCodeAgeAndParity(stub, age, parity);
351 }
352}
353
354
355void Code::PatchPlatformCodeAge(Isolate* isolate,
356 byte* sequence,
357 Code::Age age,
358 MarkingParity parity) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000359 PatchingAssembler patcher(isolate, sequence,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000360 kNoCodeAgeSequenceLength / kInstructionSize);
361 if (age == kNoAgeCodeAge) {
362 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
363 } else {
364 Code * stub = GetCodeAgeStub(isolate, age, parity);
365 MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
366 }
367}
368
369
370void StringCharLoadGenerator::Generate(MacroAssembler* masm,
371 Register string,
372 Register index,
373 Register result,
374 Label* call_runtime) {
375 DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
376 // Fetch the instance type of the receiver into result register.
377 __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
378 __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
379
380 // We need special handling for indirect strings.
381 Label check_sequential;
382 __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
383
384 // Dispatch on the indirect string shape: slice or cons.
385 Label cons_string;
386 __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
387
388 // Handle slices.
389 Label indirect_string_loaded;
390 __ Ldr(result.W(),
391 UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
392 __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
393 __ Add(index, index, result.W());
394 __ B(&indirect_string_loaded);
395
396 // Handle cons strings.
397 // Check whether the right hand side is the empty string (i.e. if
398 // this is really a flat string in a cons string). If that is not
399 // the case we would rather go to the runtime system now to flatten
400 // the string.
401 __ Bind(&cons_string);
402 __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
403 __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
404 // Get the first of the two strings and load its instance type.
405 __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
406
407 __ Bind(&indirect_string_loaded);
408 __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
409 __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
410
411 // Distinguish sequential and external strings. Only these two string
412 // representations can reach here (slices and flat cons strings have been
413 // reduced to the underlying sequential or external string).
414 Label external_string, check_encoding;
415 __ Bind(&check_sequential);
416 STATIC_ASSERT(kSeqStringTag == 0);
417 __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
418
419 // Prepare sequential strings
420 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
421 __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
422 __ B(&check_encoding);
423
424 // Handle external strings.
425 __ Bind(&external_string);
426 if (FLAG_debug_code) {
427 // Assert that we do not have a cons or slice (indirect strings) here.
428 // Sequential strings have already been ruled out.
429 __ Tst(result, kIsIndirectStringMask);
430 __ Assert(eq, kExternalStringExpectedButNotFound);
431 }
432 // Rule out short external strings.
433 STATIC_ASSERT(kShortExternalStringTag != 0);
434 // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
435 // can be bound far away in deferred code.
436 __ Tst(result, kShortExternalStringMask);
437 __ B(ne, call_runtime);
438 __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
439
440 Label one_byte, done;
441 __ Bind(&check_encoding);
442 STATIC_ASSERT(kTwoByteStringTag == 0);
443 __ TestAndBranchIfAnySet(result, kStringEncodingMask, &one_byte);
444 // Two-byte string.
445 __ Ldrh(result, MemOperand(string, index, SXTW, 1));
446 __ B(&done);
447 __ Bind(&one_byte);
448 // One-byte string.
449 __ Ldrb(result, MemOperand(string, index, SXTW));
450 __ Bind(&done);
451}
452
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000453#undef __
454
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000455} // namespace internal
456} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000457
458#endif // V8_TARGET_ARCH_ARM64