blob: fd1b0efc8d8d2c0e6924dff84efc6cba31776a65 [file] [log] [blame]
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001// Copyright 2012 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Steve Blocka7e24c12009-10-30 11:49:00 +00004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#include "src/v8.h"
Steve Blocka7e24c12009-10-30 11:49:00 +00006
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007#if V8_TARGET_ARCH_ARM
Leon Clarkef7060e22010-06-03 12:02:55 +01008
Ben Murdochb8a8cc12014-11-26 15:28:44 +00009#include "src/arm/simulator-arm.h"
10#include "src/codegen.h"
11#include "src/macro-assembler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000012
13namespace v8 {
14namespace internal {
15
Ben Murdoch3ef787d2012-04-12 10:51:47 +010016
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017#define __ masm.
18
19
20#if defined(USE_SIMULATOR)
21byte* fast_exp_arm_machine_code = NULL;
22double fast_exp_simulator(double x) {
23 return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
24 fast_exp_arm_machine_code, x, 0);
25}
26#endif
27
28
29UnaryMathFunction CreateExpFunction() {
30 if (!FLAG_fast_math) return &std::exp;
31 size_t actual_size;
32 byte* buffer =
33 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
34 if (buffer == NULL) return &std::exp;
35 ExternalReference::InitializeMathExpData();
36
37 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
38
39 {
40 DwVfpRegister input = d0;
41 DwVfpRegister result = d1;
42 DwVfpRegister double_scratch1 = d2;
43 DwVfpRegister double_scratch2 = d3;
44 Register temp1 = r4;
45 Register temp2 = r5;
46 Register temp3 = r6;
47
48 if (masm.use_eabi_hardfloat()) {
49 // Input value is in d0 anyway, nothing to do.
50 } else {
51 __ vmov(input, r0, r1);
52 }
53 __ Push(temp3, temp2, temp1);
54 MathExpGenerator::EmitMathExp(
55 &masm, input, result, double_scratch1, double_scratch2,
56 temp1, temp2, temp3);
57 __ Pop(temp3, temp2, temp1);
58 if (masm.use_eabi_hardfloat()) {
59 __ vmov(d0, result);
60 } else {
61 __ vmov(r0, r1, result);
62 }
63 __ Ret();
Ben Murdoch3ef787d2012-04-12 10:51:47 +010064 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000065
66 CodeDesc desc;
67 masm.GetCode(&desc);
68 DCHECK(!RelocInfo::RequiresRelocation(desc));
69
70 CpuFeatures::FlushICache(buffer, actual_size);
71 base::OS::ProtectCode(buffer, actual_size);
72
73#if !defined(USE_SIMULATOR)
74 return FUNCTION_CAST<UnaryMathFunction>(buffer);
75#else
76 fast_exp_arm_machine_code = buffer;
77 return &fast_exp_simulator;
78#endif
Ben Murdoch3ef787d2012-04-12 10:51:47 +010079}
80
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081#if defined(V8_HOST_ARCH_ARM)
82MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
83#if defined(USE_SIMULATOR)
84 return stub;
85#else
86 if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
87 size_t actual_size;
88 byte* buffer =
89 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
90 if (buffer == NULL) return stub;
91
92 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
93
94 Register dest = r0;
95 Register src = r1;
96 Register chars = r2;
97 Register temp1 = r3;
98 Label less_4;
99
100 if (CpuFeatures::IsSupported(NEON)) {
101 Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
102 Label size_less_than_8;
103 __ pld(MemOperand(src, 0));
104
105 __ cmp(chars, Operand(8));
106 __ b(lt, &size_less_than_8);
107 __ cmp(chars, Operand(32));
108 __ b(lt, &less_32);
109 if (CpuFeatures::cache_line_size() == 32) {
110 __ pld(MemOperand(src, 32));
111 }
112 __ cmp(chars, Operand(64));
113 __ b(lt, &less_64);
114 __ pld(MemOperand(src, 64));
115 if (CpuFeatures::cache_line_size() == 32) {
116 __ pld(MemOperand(src, 96));
117 }
118 __ cmp(chars, Operand(128));
119 __ b(lt, &less_128);
120 __ pld(MemOperand(src, 128));
121 if (CpuFeatures::cache_line_size() == 32) {
122 __ pld(MemOperand(src, 160));
123 }
124 __ pld(MemOperand(src, 192));
125 if (CpuFeatures::cache_line_size() == 32) {
126 __ pld(MemOperand(src, 224));
127 }
128 __ cmp(chars, Operand(256));
129 __ b(lt, &less_256);
130 __ sub(chars, chars, Operand(256));
131
132 __ bind(&loop);
133 __ pld(MemOperand(src, 256));
134 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
135 if (CpuFeatures::cache_line_size() == 32) {
136 __ pld(MemOperand(src, 256));
137 }
138 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
139 __ sub(chars, chars, Operand(64), SetCC);
140 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
141 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
142 __ b(ge, &loop);
143 __ add(chars, chars, Operand(256));
144
145 __ bind(&less_256);
146 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
147 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
148 __ sub(chars, chars, Operand(128));
149 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
150 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
151 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
152 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
153 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
154 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
155 __ cmp(chars, Operand(64));
156 __ b(lt, &less_64);
157
158 __ bind(&less_128);
159 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
160 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
161 __ sub(chars, chars, Operand(64));
162 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
163 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
164
165 __ bind(&less_64);
166 __ cmp(chars, Operand(32));
167 __ b(lt, &less_32);
168 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
169 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
170 __ sub(chars, chars, Operand(32));
171
172 __ bind(&less_32);
173 __ cmp(chars, Operand(16));
174 __ b(le, &_16_or_less);
175 __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
176 __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
177 __ sub(chars, chars, Operand(16));
178
179 __ bind(&_16_or_less);
180 __ cmp(chars, Operand(8));
181 __ b(le, &_8_or_less);
182 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
183 __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
184 __ sub(chars, chars, Operand(8));
185
186 // Do a last copy which may overlap with the previous copy (up to 8 bytes).
187 __ bind(&_8_or_less);
188 __ rsb(chars, chars, Operand(8));
189 __ sub(src, src, Operand(chars));
190 __ sub(dest, dest, Operand(chars));
191 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
192 __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
193
194 __ Ret();
195
196 __ bind(&size_less_than_8);
197
198 __ bic(temp1, chars, Operand(0x3), SetCC);
199 __ b(&less_4, eq);
200 __ ldr(temp1, MemOperand(src, 4, PostIndex));
201 __ str(temp1, MemOperand(dest, 4, PostIndex));
202 } else {
203 Register temp2 = ip;
204 Label loop;
205
206 __ bic(temp2, chars, Operand(0x3), SetCC);
207 __ b(&less_4, eq);
208 __ add(temp2, dest, temp2);
209
210 __ bind(&loop);
211 __ ldr(temp1, MemOperand(src, 4, PostIndex));
212 __ str(temp1, MemOperand(dest, 4, PostIndex));
213 __ cmp(dest, temp2);
214 __ b(&loop, ne);
215 }
216
217 __ bind(&less_4);
218 __ mov(chars, Operand(chars, LSL, 31), SetCC);
219 // bit0 => Z (ne), bit1 => C (cs)
220 __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
221 __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
222 __ ldrb(temp1, MemOperand(src), ne);
223 __ strb(temp1, MemOperand(dest), ne);
224 __ Ret();
225
226 CodeDesc desc;
227 masm.GetCode(&desc);
228 DCHECK(!RelocInfo::RequiresRelocation(desc));
229
230 CpuFeatures::FlushICache(buffer, actual_size);
231 base::OS::ProtectCode(buffer, actual_size);
232 return FUNCTION_CAST<MemCopyUint8Function>(buffer);
233#endif
234}
235
236
237// Convert 8 to 16. The number of character to copy must be at least 8.
238MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
239 MemCopyUint16Uint8Function stub) {
240#if defined(USE_SIMULATOR)
241 return stub;
242#else
243 if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
244 size_t actual_size;
245 byte* buffer =
246 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
247 if (buffer == NULL) return stub;
248
249 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
250
251 Register dest = r0;
252 Register src = r1;
253 Register chars = r2;
254 if (CpuFeatures::IsSupported(NEON)) {
255 Register temp = r3;
256 Label loop;
257
258 __ bic(temp, chars, Operand(0x7));
259 __ sub(chars, chars, Operand(temp));
260 __ add(temp, dest, Operand(temp, LSL, 1));
261
262 __ bind(&loop);
263 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
264 __ vmovl(NeonU8, q0, d0);
265 __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
266 __ cmp(dest, temp);
267 __ b(&loop, ne);
268
269 // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
270 __ rsb(chars, chars, Operand(8));
271 __ sub(src, src, Operand(chars));
272 __ sub(dest, dest, Operand(chars, LSL, 1));
273 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
274 __ vmovl(NeonU8, q0, d0);
275 __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
276 __ Ret();
277 } else {
278 Register temp1 = r3;
279 Register temp2 = ip;
280 Register temp3 = lr;
281 Register temp4 = r4;
282 Label loop;
283 Label not_two;
284
285 __ Push(lr, r4);
286 __ bic(temp2, chars, Operand(0x3));
287 __ add(temp2, dest, Operand(temp2, LSL, 1));
288
289 __ bind(&loop);
290 __ ldr(temp1, MemOperand(src, 4, PostIndex));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400291 __ uxtb16(temp3, temp1);
292 __ uxtb16(temp4, temp1, 8);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000293 __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
294 __ str(temp1, MemOperand(dest));
295 __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
296 __ str(temp1, MemOperand(dest, 4));
297 __ add(dest, dest, Operand(8));
298 __ cmp(dest, temp2);
299 __ b(&loop, ne);
300
301 __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
302 __ b(&not_two, cc);
303 __ ldrh(temp1, MemOperand(src, 2, PostIndex));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400304 __ uxtb(temp3, temp1, 8);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000305 __ mov(temp3, Operand(temp3, LSL, 16));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400306 __ uxtab(temp3, temp3, temp1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000307 __ str(temp3, MemOperand(dest, 4, PostIndex));
308 __ bind(&not_two);
309 __ ldrb(temp1, MemOperand(src), ne);
310 __ strh(temp1, MemOperand(dest), ne);
311 __ Pop(pc, r4);
312 }
313
314 CodeDesc desc;
315 masm.GetCode(&desc);
316
317 CpuFeatures::FlushICache(buffer, actual_size);
318 base::OS::ProtectCode(buffer, actual_size);
319
320 return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
321#endif
322}
323#endif
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100324
325UnaryMathFunction CreateSqrtFunction() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000326#if defined(USE_SIMULATOR)
327 return &std::sqrt;
328#else
329 size_t actual_size;
330 byte* buffer =
331 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
332 if (buffer == NULL) return &std::sqrt;
333
334 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
335
336 __ MovFromFloatParameter(d0);
337 __ vsqrt(d0, d0);
338 __ MovToFloatResult(d0);
339 __ Ret();
340
341 CodeDesc desc;
342 masm.GetCode(&desc);
343 DCHECK(!RelocInfo::RequiresRelocation(desc));
344
345 CpuFeatures::FlushICache(buffer, actual_size);
346 base::OS::ProtectCode(buffer, actual_size);
347 return FUNCTION_CAST<UnaryMathFunction>(buffer);
348#endif
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100349}
350
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000351#undef __
352
353
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100354// -------------------------------------------------------------------------
355// Platform-specific RuntimeCallHelper functions.
356
Ben Murdochb0fe1622011-05-05 13:52:32 +0100357void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100358 masm->EnterFrame(StackFrame::INTERNAL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359 DCHECK(!masm->has_frame());
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100360 masm->set_has_frame(true);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100361}
362
363
Ben Murdochb0fe1622011-05-05 13:52:32 +0100364void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100365 masm->LeaveFrame(StackFrame::INTERNAL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000366 DCHECK(masm->has_frame());
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100367 masm->set_has_frame(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000368}
369
370
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100371// -------------------------------------------------------------------------
372// Code generators
373
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000374#define __ ACCESS_MASM(masm)
375
376void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
377 MacroAssembler* masm,
378 Register receiver,
379 Register key,
380 Register value,
381 Register target_map,
382 AllocationSiteMode mode,
383 Label* allocation_memento_found) {
384 Register scratch_elements = r4;
385 DCHECK(!AreAliased(receiver, key, value, target_map,
386 scratch_elements));
387
388 if (mode == TRACK_ALLOCATION_SITE) {
389 DCHECK(allocation_memento_found != NULL);
390 __ JumpIfJSArrayHasAllocationMemento(
391 receiver, scratch_elements, allocation_memento_found);
392 }
393
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100394 // Set transitioned map.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000395 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
396 __ RecordWriteField(receiver,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100397 HeapObject::kMapOffset,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000398 target_map,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100399 r9,
400 kLRHasNotBeenSaved,
401 kDontSaveFPRegs,
402 EMIT_REMEMBERED_SET,
403 OMIT_SMI_CHECK);
404}
405
406
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000407void ElementsTransitionGenerator::GenerateSmiToDouble(
408 MacroAssembler* masm,
409 Register receiver,
410 Register key,
411 Register value,
412 Register target_map,
413 AllocationSiteMode mode,
414 Label* fail) {
415 // Register lr contains the return address.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100416 Label loop, entry, convert_hole, gc_required, only_change_map, done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000417 Register elements = r4;
418 Register length = r5;
419 Register array = r6;
420 Register array_end = array;
421
422 // target_map parameter can be clobbered.
423 Register scratch1 = target_map;
424 Register scratch2 = r9;
425
426 // Verify input registers don't conflict with locals.
427 DCHECK(!AreAliased(receiver, key, value, target_map,
428 elements, length, array, scratch2));
429
430 if (mode == TRACK_ALLOCATION_SITE) {
431 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
432 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100433
434 // Check for empty arrays, which only require a map transition and no changes
435 // to the backing store.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000436 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
437 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100438 __ b(eq, &only_change_map);
439
440 __ push(lr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000441 __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
442 // length: number of elements (smi-tagged)
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100443
444 // Allocate new FixedDoubleArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000445 // Use lr as a temporary register.
446 __ mov(lr, Operand(length, LSL, 2));
447 __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
448 __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
449 // array: destination FixedDoubleArray, not tagged as heap object.
450 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
451 // r4: source FixedArray.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100452
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000453 // Set destination FixedDoubleArray's length and map.
454 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
455 __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
456 // Update receiver's map.
457 __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
458
459 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
460 __ RecordWriteField(receiver,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100461 HeapObject::kMapOffset,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000462 target_map,
463 scratch2,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100464 kLRHasBeenSaved,
465 kDontSaveFPRegs,
466 OMIT_REMEMBERED_SET,
467 OMIT_SMI_CHECK);
468 // Replace receiver's backing store with newly created FixedDoubleArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000469 __ add(scratch1, array, Operand(kHeapObjectTag));
470 __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
471 __ RecordWriteField(receiver,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100472 JSObject::kElementsOffset,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000473 scratch1,
474 scratch2,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100475 kLRHasBeenSaved,
476 kDontSaveFPRegs,
477 EMIT_REMEMBERED_SET,
478 OMIT_SMI_CHECK);
479
480 // Prepare for conversion loop.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000481 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
482 __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
483 __ add(array_end, scratch2, Operand(length, LSL, 2));
484
485 // Repurpose registers no longer in use.
486 Register hole_lower = elements;
487 Register hole_upper = length;
488
489 __ mov(hole_lower, Operand(kHoleNanLower32));
490 __ mov(hole_upper, Operand(kHoleNanUpper32));
491 // scratch1: begin of source FixedArray element fields, not tagged
492 // hole_lower: kHoleNanLower32
493 // hole_upper: kHoleNanUpper32
494 // array_end: end of destination FixedDoubleArray, not tagged
495 // scratch2: begin of FixedDoubleArray element fields, not tagged
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100496
497 __ b(&entry);
498
499 __ bind(&only_change_map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000500 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
501 __ RecordWriteField(receiver,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100502 HeapObject::kMapOffset,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000503 target_map,
504 scratch2,
505 kLRHasNotBeenSaved,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100506 kDontSaveFPRegs,
507 OMIT_REMEMBERED_SET,
508 OMIT_SMI_CHECK);
509 __ b(&done);
510
511 // Call into runtime if GC is required.
512 __ bind(&gc_required);
513 __ pop(lr);
514 __ b(fail);
515
516 // Convert and copy elements.
517 __ bind(&loop);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000518 __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
519 // lr: current element
520 __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100521
522 // Normal smi, convert to double and store.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000523 __ vmov(s0, lr);
524 __ vcvt_f64_s32(d0, s0);
525 __ vstr(d0, scratch2, 0);
526 __ add(scratch2, scratch2, Operand(8));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100527 __ b(&entry);
528
529 // Hole found, store the-hole NaN.
530 __ bind(&convert_hole);
531 if (FLAG_debug_code) {
532 // Restore a "smi-untagged" heap object.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000533 __ SmiTag(lr);
534 __ orr(lr, lr, Operand(1));
535 __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
536 __ Assert(eq, kObjectFoundInSmiOnlyArray);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100537 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000538 __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100539
540 __ bind(&entry);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000541 __ cmp(scratch2, array_end);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100542 __ b(lt, &loop);
543
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100544 __ pop(lr);
545 __ bind(&done);
546}
547
548
549void ElementsTransitionGenerator::GenerateDoubleToObject(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000550 MacroAssembler* masm,
551 Register receiver,
552 Register key,
553 Register value,
554 Register target_map,
555 AllocationSiteMode mode,
556 Label* fail) {
557 // Register lr contains the return address.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100558 Label entry, loop, convert_hole, gc_required, only_change_map;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000559 Register elements = r4;
560 Register array = r6;
561 Register length = r5;
562 Register scratch = r9;
563
564 // Verify input registers don't conflict with locals.
565 DCHECK(!AreAliased(receiver, key, value, target_map,
566 elements, array, length, scratch));
567
568 if (mode == TRACK_ALLOCATION_SITE) {
569 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
570 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100571
572 // Check for empty arrays, which only require a map transition and no changes
573 // to the backing store.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000574 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
575 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100576 __ b(eq, &only_change_map);
577
578 __ push(lr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000579 __ Push(target_map, receiver, key, value);
580 __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
581 // elements: source FixedDoubleArray
582 // length: number of elements (smi-tagged)
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100583
584 // Allocate new FixedArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000585 // Re-use value and target_map registers, as they have been saved on the
586 // stack.
587 Register array_size = value;
588 Register allocate_scratch = target_map;
589 __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
590 __ add(array_size, array_size, Operand(length, LSL, 1));
591 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
592 NO_ALLOCATION_FLAGS);
593 // array: destination FixedArray, not tagged as heap object
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100594 // Set destination FixedDoubleArray's length and map.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000595 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
596 __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
597 __ str(scratch, MemOperand(array, HeapObject::kMapOffset));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100598
599 // Prepare for conversion loop.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000600 Register src_elements = elements;
601 Register dst_elements = target_map;
602 Register dst_end = length;
603 Register heap_number_map = scratch;
604 __ add(src_elements, elements,
605 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
606 __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000607 __ add(dst_end, dst_elements, Operand(length, LSL, 1));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400608
609 // Allocating heap numbers in the loop below can fail and cause a jump to
610 // gc_required. We can't leave a partly initialized FixedArray behind,
611 // so pessimistically fill it with holes now.
612 Label initialization_loop, initialization_loop_entry;
613 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
614 __ b(&initialization_loop_entry);
615 __ bind(&initialization_loop);
616 __ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
617 __ bind(&initialization_loop_entry);
618 __ cmp(dst_elements, dst_end);
619 __ b(lt, &initialization_loop);
620
621 __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
622 __ add(array, array, Operand(kHeapObjectTag));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000623 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
624 // Using offsetted addresses in src_elements to fully take advantage of
625 // post-indexing.
626 // dst_elements: begin of destination FixedArray element fields, not tagged
627 // src_elements: begin of source FixedDoubleArray element fields,
628 // not tagged, +4
629 // dst_end: end of destination FixedArray, not tagged
630 // array: destination FixedArray
631 // heap_number_map: heap number map
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100632 __ b(&entry);
633
634 // Call into runtime if GC is required.
635 __ bind(&gc_required);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000636 __ Pop(target_map, receiver, key, value);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100637 __ pop(lr);
638 __ b(fail);
639
640 __ bind(&loop);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000641 Register upper_bits = key;
642 __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
643 // upper_bits: current element's upper 32 bit
644 // src_elements: address of next element's upper 32 bit
645 __ cmp(upper_bits, Operand(kHoleNanUpper32));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100646 __ b(eq, &convert_hole);
647
648 // Non-hole double, copy value into a heap number.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000649 Register heap_number = receiver;
650 Register scratch2 = value;
651 __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
652 &gc_required);
653 // heap_number: new heap number
654 __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
655 __ Strd(scratch2, upper_bits,
656 FieldMemOperand(heap_number, HeapNumber::kValueOffset));
657 __ mov(scratch2, dst_elements);
658 __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
659 __ RecordWrite(array,
660 scratch2,
661 heap_number,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100662 kLRHasBeenSaved,
663 kDontSaveFPRegs,
664 EMIT_REMEMBERED_SET,
665 OMIT_SMI_CHECK);
666 __ b(&entry);
667
668 // Replace the-hole NaN with the-hole pointer.
669 __ bind(&convert_hole);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000670 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
671 __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100672
673 __ bind(&entry);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000674 __ cmp(dst_elements, dst_end);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100675 __ b(lt, &loop);
676
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000677 __ Pop(target_map, receiver, key, value);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100678 // Replace receiver's backing store with newly created and filled FixedArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000679 __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
680 __ RecordWriteField(receiver,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100681 JSObject::kElementsOffset,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000682 array,
683 scratch,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100684 kLRHasBeenSaved,
685 kDontSaveFPRegs,
686 EMIT_REMEMBERED_SET,
687 OMIT_SMI_CHECK);
688 __ pop(lr);
689
690 __ bind(&only_change_map);
691 // Update receiver's map.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000692 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
693 __ RecordWriteField(receiver,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100694 HeapObject::kMapOffset,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000695 target_map,
696 scratch,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100697 kLRHasNotBeenSaved,
698 kDontSaveFPRegs,
699 OMIT_REMEMBERED_SET,
700 OMIT_SMI_CHECK);
701}
702
703
704void StringCharLoadGenerator::Generate(MacroAssembler* masm,
705 Register string,
706 Register index,
707 Register result,
708 Label* call_runtime) {
709 // Fetch the instance type of the receiver into result register.
710 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
711 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
712
713 // We need special handling for indirect strings.
714 Label check_sequential;
715 __ tst(result, Operand(kIsIndirectStringMask));
716 __ b(eq, &check_sequential);
717
718 // Dispatch on the indirect string shape: slice or cons.
719 Label cons_string;
720 __ tst(result, Operand(kSlicedNotConsMask));
721 __ b(eq, &cons_string);
722
723 // Handle slices.
724 Label indirect_string_loaded;
725 __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
726 __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000727 __ add(index, index, Operand::SmiUntag(result));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100728 __ jmp(&indirect_string_loaded);
729
730 // Handle cons strings.
731 // Check whether the right hand side is the empty string (i.e. if
732 // this is really a flat string in a cons string). If that is not
733 // the case we would rather go to the runtime system now to flatten
734 // the string.
735 __ bind(&cons_string);
736 __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000737 __ CompareRoot(result, Heap::kempty_stringRootIndex);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100738 __ b(ne, call_runtime);
739 // Get the first of the two strings and load its instance type.
740 __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
741
742 __ bind(&indirect_string_loaded);
743 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
744 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
745
746 // Distinguish sequential and external strings. Only these two string
747 // representations can reach here (slices and flat cons strings have been
748 // reduced to the underlying sequential or external string).
749 Label external_string, check_encoding;
750 __ bind(&check_sequential);
751 STATIC_ASSERT(kSeqStringTag == 0);
752 __ tst(result, Operand(kStringRepresentationMask));
753 __ b(ne, &external_string);
754
755 // Prepare sequential strings
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000756 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100757 __ add(string,
758 string,
759 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
760 __ jmp(&check_encoding);
761
762 // Handle external strings.
763 __ bind(&external_string);
764 if (FLAG_debug_code) {
765 // Assert that we do not have a cons or slice (indirect strings) here.
766 // Sequential strings have already been ruled out.
767 __ tst(result, Operand(kIsIndirectStringMask));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000768 __ Assert(eq, kExternalStringExpectedButNotFound);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100769 }
770 // Rule out short external strings.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000771 STATIC_ASSERT(kShortExternalStringTag != 0);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100772 __ tst(result, Operand(kShortExternalStringMask));
773 __ b(ne, call_runtime);
774 __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
775
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000776 Label one_byte, done;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100777 __ bind(&check_encoding);
778 STATIC_ASSERT(kTwoByteStringTag == 0);
779 __ tst(result, Operand(kStringEncodingMask));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000780 __ b(ne, &one_byte);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100781 // Two-byte string.
782 __ ldrh(result, MemOperand(string, index, LSL, 1));
783 __ jmp(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000784 __ bind(&one_byte);
785 // One-byte string.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100786 __ ldrb(result, MemOperand(string, index));
787 __ bind(&done);
788}
789
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000790
791static MemOperand ExpConstant(int index, Register base) {
792 return MemOperand(base, index * kDoubleSize);
793}
794
795
796void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
797 DwVfpRegister input,
798 DwVfpRegister result,
799 DwVfpRegister double_scratch1,
800 DwVfpRegister double_scratch2,
801 Register temp1,
802 Register temp2,
803 Register temp3) {
804 DCHECK(!input.is(result));
805 DCHECK(!input.is(double_scratch1));
806 DCHECK(!input.is(double_scratch2));
807 DCHECK(!result.is(double_scratch1));
808 DCHECK(!result.is(double_scratch2));
809 DCHECK(!double_scratch1.is(double_scratch2));
810 DCHECK(!temp1.is(temp2));
811 DCHECK(!temp1.is(temp3));
812 DCHECK(!temp2.is(temp3));
813 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
814 DCHECK(!masm->serializer_enabled()); // External references not serializable.
815
816 Label zero, infinity, done;
817
818 __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
819
820 __ vldr(double_scratch1, ExpConstant(0, temp3));
821 __ VFPCompareAndSetFlags(double_scratch1, input);
822 __ b(ge, &zero);
823
824 __ vldr(double_scratch2, ExpConstant(1, temp3));
825 __ VFPCompareAndSetFlags(input, double_scratch2);
826 __ b(ge, &infinity);
827
828 __ vldr(double_scratch1, ExpConstant(3, temp3));
829 __ vldr(result, ExpConstant(4, temp3));
830 __ vmul(double_scratch1, double_scratch1, input);
831 __ vadd(double_scratch1, double_scratch1, result);
832 __ VmovLow(temp2, double_scratch1);
833 __ vsub(double_scratch1, double_scratch1, result);
834 __ vldr(result, ExpConstant(6, temp3));
835 __ vldr(double_scratch2, ExpConstant(5, temp3));
836 __ vmul(double_scratch1, double_scratch1, double_scratch2);
837 __ vsub(double_scratch1, double_scratch1, input);
838 __ vsub(result, result, double_scratch1);
839 __ vmul(double_scratch2, double_scratch1, double_scratch1);
840 __ vmul(result, result, double_scratch2);
841 __ vldr(double_scratch2, ExpConstant(7, temp3));
842 __ vmul(result, result, double_scratch2);
843 __ vsub(result, result, double_scratch1);
844 // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
845 DCHECK(*reinterpret_cast<double*>
846 (ExternalReference::math_exp_constants(8).address()) == 1);
847 __ vmov(double_scratch2, 1);
848 __ vadd(result, result, double_scratch2);
849 __ mov(temp1, Operand(temp2, LSR, 11));
850 __ Ubfx(temp2, temp2, 0, 11);
851 __ add(temp1, temp1, Operand(0x3ff));
852
853 // Must not call ExpConstant() after overwriting temp3!
854 __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
855 __ add(temp3, temp3, Operand(temp2, LSL, 3));
856 __ ldm(ia, temp3, temp2.bit() | temp3.bit());
857 // The first word is loaded is the lower number register.
858 if (temp2.code() < temp3.code()) {
859 __ orr(temp1, temp3, Operand(temp1, LSL, 20));
860 __ vmov(double_scratch1, temp2, temp1);
861 } else {
862 __ orr(temp1, temp2, Operand(temp1, LSL, 20));
863 __ vmov(double_scratch1, temp3, temp1);
864 }
865 __ vmul(result, result, double_scratch1);
866 __ b(&done);
867
868 __ bind(&zero);
869 __ vmov(result, kDoubleRegZero);
870 __ b(&done);
871
872 __ bind(&infinity);
873 __ vldr(result, ExpConstant(2, temp3));
874
875 __ bind(&done);
876}
877
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100878#undef __
879
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000880#ifdef DEBUG
881// add(r0, pc, Operand(-8))
882static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
883#endif
884
885CodeAgingHelper::CodeAgingHelper() {
886 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
887 // Since patcher is a large object, allocate it dynamically when needed,
888 // to avoid overloading the stack in stress conditions.
889 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
890 // the process, before ARM simulator ICache is setup.
891 SmartPointer<CodePatcher> patcher(
892 new CodePatcher(young_sequence_.start(),
893 young_sequence_.length() / Assembler::kInstrSize,
894 CodePatcher::DONT_FLUSH));
895 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
896 patcher->masm()->PushFixedFrame(r1);
897 patcher->masm()->nop(ip.code());
898 patcher->masm()->add(
899 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
900}
901
902
903#ifdef DEBUG
904bool CodeAgingHelper::IsOld(byte* candidate) const {
905 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
906}
907#endif
908
909
910bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
911 bool result = isolate->code_aging_helper()->IsYoung(sequence);
912 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
913 return result;
914}
915
916
917void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
918 MarkingParity* parity) {
919 if (IsYoungSequence(isolate, sequence)) {
920 *age = kNoAgeCodeAge;
921 *parity = NO_MARKING_PARITY;
922 } else {
923 Address target_address = Memory::Address_at(
924 sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
925 Code* stub = GetCodeFromTargetAddress(target_address);
926 GetCodeAgeAndParity(stub, age, parity);
927 }
928}
929
930
931void Code::PatchPlatformCodeAge(Isolate* isolate,
932 byte* sequence,
933 Code::Age age,
934 MarkingParity parity) {
935 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
936 if (age == kNoAgeCodeAge) {
937 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
938 CpuFeatures::FlushICache(sequence, young_length);
939 } else {
940 Code* stub = GetCodeAgeStub(isolate, age, parity);
941 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
942 patcher.masm()->add(r0, pc, Operand(-8));
943 patcher.masm()->ldr(pc, MemOperand(pc, -4));
944 patcher.masm()->emit_code_stub_address(stub);
945 }
946}
947
948
Steve Blocka7e24c12009-10-30 11:49:00 +0000949} } // namespace v8::internal
Leon Clarkef7060e22010-06-03 12:02:55 +0100950
951#endif // V8_TARGET_ARCH_ARM