blob: 8f2f1cd902f4f3a56630b4a00d4c30f7e23757df [file] [log] [blame]
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/ppc/codegen-ppc.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -04006
7#if V8_TARGET_ARCH_PPC
8
9#include "src/codegen.h"
10#include "src/macro-assembler.h"
11#include "src/ppc/simulator-ppc.h"
12
13namespace v8 {
14namespace internal {
15
16
17#define __ masm.
18
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000019UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -040020#if defined(USE_SIMULATOR)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000021 return nullptr;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040022#else
23 size_t actual_size;
24 byte* buffer =
25 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000026 if (buffer == nullptr) return nullptr;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040027
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
29 CodeObjectRequired::kNo);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040030
31// Called from C
Emily Bernierd0a1eb72015-03-24 16:35:39 -040032 __ function_descriptor();
Emily Bernierd0a1eb72015-03-24 16:35:39 -040033
34 __ MovFromFloatParameter(d1);
35 __ fsqrt(d1, d1);
36 __ MovToFloatResult(d1);
37 __ Ret();
38
39 CodeDesc desc;
40 masm.GetCode(&desc);
Ben Murdoch097c5b22016-05-18 11:27:45 +010041 DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
Emily Bernierd0a1eb72015-03-24 16:35:39 -040042
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000043 Assembler::FlushICache(isolate, buffer, actual_size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040044 base::OS::ProtectCode(buffer, actual_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000045 return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040046#endif
47}
48
49#undef __
50
51
52// -------------------------------------------------------------------------
53// Platform-specific RuntimeCallHelper functions.
54
55void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
56 masm->EnterFrame(StackFrame::INTERNAL);
57 DCHECK(!masm->has_frame());
58 masm->set_has_frame(true);
59}
60
61
62void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
63 masm->LeaveFrame(StackFrame::INTERNAL);
64 DCHECK(masm->has_frame());
65 masm->set_has_frame(false);
66}
67
68
69// -------------------------------------------------------------------------
70// Code generators
71
72#define __ ACCESS_MASM(masm)
73
74void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
75 MacroAssembler* masm, Register receiver, Register key, Register value,
76 Register target_map, AllocationSiteMode mode,
77 Label* allocation_memento_found) {
78 Register scratch_elements = r7;
79 DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
80
81 if (mode == TRACK_ALLOCATION_SITE) {
82 DCHECK(allocation_memento_found != NULL);
Ben Murdochda12d292016-06-02 14:46:10 +010083 __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r11,
Emily Bernierd0a1eb72015-03-24 16:35:39 -040084 allocation_memento_found);
85 }
86
87 // Set transitioned map.
88 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
89 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
90 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
91 OMIT_SMI_CHECK);
92}
93
94
95void ElementsTransitionGenerator::GenerateSmiToDouble(
96 MacroAssembler* masm, Register receiver, Register key, Register value,
97 Register target_map, AllocationSiteMode mode, Label* fail) {
98 // lr contains the return address
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000099 Label loop, entry, convert_hole, only_change_map, done;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400100 Register elements = r7;
101 Register length = r8;
102 Register array = r9;
103 Register array_end = array;
104
105 // target_map parameter can be clobbered.
106 Register scratch1 = target_map;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000107 Register scratch2 = r10;
108 Register scratch3 = r11;
109 Register scratch4 = r14;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400110
111 // Verify input registers don't conflict with locals.
112 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
113 scratch2));
114
115 if (mode == TRACK_ALLOCATION_SITE) {
Ben Murdochda12d292016-06-02 14:46:10 +0100116 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400117 }
118
119 // Check for empty arrays, which only require a map transition and no changes
120 // to the backing store.
121 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
122 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
123 __ beq(&only_change_map);
124
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400125 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
126 // length: number of elements (smi-tagged)
127
128 // Allocate new FixedDoubleArray.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000129 __ SmiToDoubleArrayOffset(scratch3, length);
130 __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
131 __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
Ben Murdochc5610432016-08-08 18:44:38 +0100132 __ subi(array, array, Operand(kHeapObjectTag));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000133 // array: destination FixedDoubleArray, not tagged as heap object.
134 // elements: source FixedArray.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400135
136 // Set destination FixedDoubleArray's length and map.
137 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
138 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
139 // Update receiver's map.
140 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
141
142 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
143 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000144 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400145 OMIT_SMI_CHECK);
146 // Replace receiver's backing store with newly created FixedDoubleArray.
147 __ addi(scratch1, array, Operand(kHeapObjectTag));
148 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
149 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000150 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400151 OMIT_SMI_CHECK);
152
153 // Prepare for conversion loop.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000154 __ addi(scratch1, elements,
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400155 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000156 __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
157 __ SmiToDoubleArrayOffset(array_end, length);
158 __ add(array_end, scratch2, array_end);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400159// Repurpose registers no longer in use.
160#if V8_TARGET_ARCH_PPC64
161 Register hole_int64 = elements;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000162 __ mov(hole_int64, Operand(kHoleNanInt64));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400163#else
164 Register hole_lower = elements;
165 Register hole_upper = length;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000166 __ mov(hole_lower, Operand(kHoleNanLower32));
167 __ mov(hole_upper, Operand(kHoleNanUpper32));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400168#endif
169 // scratch1: begin of source FixedArray element fields, not tagged
170 // hole_lower: kHoleNanLower32 OR hol_int64
171 // hole_upper: kHoleNanUpper32
172 // array_end: end of destination FixedDoubleArray, not tagged
173 // scratch2: begin of FixedDoubleArray element fields, not tagged
174
175 __ b(&entry);
176
177 __ bind(&only_change_map);
178 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
179 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
180 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
181 OMIT_SMI_CHECK);
182 __ b(&done);
183
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400184 // Convert and copy elements.
185 __ bind(&loop);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000186 __ LoadP(scratch3, MemOperand(scratch1));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400187 __ addi(scratch1, scratch1, Operand(kPointerSize));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000188 // scratch3: current element
189 __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400190
191 // Normal smi, convert to double and store.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000192 __ ConvertIntToDouble(scratch3, d0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400193 __ stfd(d0, MemOperand(scratch2, 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194 __ addi(scratch2, scratch2, Operand(8));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400195 __ b(&entry);
196
197 // Hole found, store the-hole NaN.
198 __ bind(&convert_hole);
199 if (FLAG_debug_code) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000200 __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
201 __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400202 __ Assert(eq, kObjectFoundInSmiOnlyArray);
203 }
204#if V8_TARGET_ARCH_PPC64
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000205 __ std(hole_int64, MemOperand(scratch2, 0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400206#else
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000207 __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
208 __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400209#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000210 __ addi(scratch2, scratch2, Operand(8));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400211
212 __ bind(&entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000213 __ cmp(scratch2, array_end);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400214 __ blt(&loop);
215
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400216 __ bind(&done);
217}
218
219
220void ElementsTransitionGenerator::GenerateDoubleToObject(
221 MacroAssembler* masm, Register receiver, Register key, Register value,
222 Register target_map, AllocationSiteMode mode, Label* fail) {
223 // Register lr contains the return address.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000224 Label loop, convert_hole, gc_required, only_change_map;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400225 Register elements = r7;
226 Register array = r9;
227 Register length = r8;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000228 Register scratch = r10;
229 Register scratch3 = r11;
230 Register hole_value = r14;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400231
232 // Verify input registers don't conflict with locals.
233 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
234 scratch));
235
236 if (mode == TRACK_ALLOCATION_SITE) {
Ben Murdochda12d292016-06-02 14:46:10 +0100237 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400238 }
239
240 // Check for empty arrays, which only require a map transition and no changes
241 // to the backing store.
242 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
243 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
244 __ beq(&only_change_map);
245
246 __ Push(target_map, receiver, key, value);
247 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
248 // elements: source FixedDoubleArray
249 // length: number of elements (smi-tagged)
250
251 // Allocate new FixedArray.
252 // Re-use value and target_map registers, as they have been saved on the
253 // stack.
254 Register array_size = value;
255 Register allocate_scratch = target_map;
256 __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
257 __ SmiToPtrArrayOffset(r0, length);
258 __ add(array_size, array_size, r0);
259 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
260 NO_ALLOCATION_FLAGS);
Ben Murdochc5610432016-08-08 18:44:38 +0100261 // array: destination FixedArray, tagged as heap object
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400262 // Set destination FixedDoubleArray's length and map.
263 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
Ben Murdochc5610432016-08-08 18:44:38 +0100264 __ StoreP(length, FieldMemOperand(array,
265 FixedDoubleArray::kLengthOffset), r0);
266 __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400267
268 // Prepare for conversion loop.
269 Register src_elements = elements;
270 Register dst_elements = target_map;
271 Register dst_end = length;
272 Register heap_number_map = scratch;
273 __ addi(src_elements, elements,
274 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
275 __ SmiToPtrArrayOffset(length, length);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000276 __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400277
278 Label initialization_loop, loop_done;
279 __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
280 __ beq(&loop_done, cr0);
281
282 // Allocating heap numbers in the loop below can fail and cause a jump to
283 // gc_required. We can't leave a partly initialized FixedArray behind,
284 // so pessimistically fill it with holes now.
285 __ mtctr(r0);
286 __ addi(dst_elements, array,
287 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
288 __ bind(&initialization_loop);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000289 __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400290 __ bdnz(&initialization_loop);
291
292 __ addi(dst_elements, array,
293 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
294 __ add(dst_end, dst_elements, length);
295 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
296 // Using offsetted addresses in src_elements to fully take advantage of
297 // post-indexing.
298 // dst_elements: begin of destination FixedArray element fields, not tagged
299 // src_elements: begin of source FixedDoubleArray element fields,
300 // not tagged, +4
301 // dst_end: end of destination FixedArray, not tagged
302 // array: destination FixedArray
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000303 // hole_value: the-hole pointer
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400304 // heap_number_map: heap number map
305 __ b(&loop);
306
307 // Call into runtime if GC is required.
308 __ bind(&gc_required);
309 __ Pop(target_map, receiver, key, value);
310 __ b(fail);
311
312 // Replace the-hole NaN with the-hole pointer.
313 __ bind(&convert_hole);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000314 __ StoreP(hole_value, MemOperand(dst_elements));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400315 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
316 __ cmpl(dst_elements, dst_end);
317 __ bge(&loop_done);
318
319 __ bind(&loop);
320 Register upper_bits = key;
321 __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
322 __ addi(src_elements, src_elements, Operand(kDoubleSize));
323 // upper_bits: current element's upper 32 bit
324 // src_elements: address of next element's upper 32 bit
325 __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
326 __ beq(&convert_hole);
327
328 // Non-hole double, copy value into a heap number.
329 Register heap_number = receiver;
330 Register scratch2 = value;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000331 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400332 &gc_required);
333 // heap_number: new heap number
334#if V8_TARGET_ARCH_PPC64
335 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
336 // subtract tag for std
337 __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
338 __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
339#else
340 __ lwz(scratch2,
341 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
342 __ lwz(upper_bits,
343 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
344 __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
345 __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
346#endif
347 __ mr(scratch2, dst_elements);
348 __ StoreP(heap_number, MemOperand(dst_elements));
349 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
350 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
351 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400352 __ cmpl(dst_elements, dst_end);
353 __ blt(&loop);
354 __ bind(&loop_done);
355
356 __ Pop(target_map, receiver, key, value);
357 // Replace receiver's backing store with newly created and filled FixedArray.
358 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
359 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
360 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
361 OMIT_SMI_CHECK);
362
363 __ bind(&only_change_map);
364 // Update receiver's map.
365 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
366 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
367 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
368 OMIT_SMI_CHECK);
369}
370
371
372// assume ip can be used as a scratch register below
373void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
374 Register index, Register result,
375 Label* call_runtime) {
376 // Fetch the instance type of the receiver into result register.
377 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
378 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
379
380 // We need special handling for indirect strings.
381 Label check_sequential;
382 __ andi(r0, result, Operand(kIsIndirectStringMask));
383 __ beq(&check_sequential, cr0);
384
385 // Dispatch on the indirect string shape: slice or cons.
386 Label cons_string;
387 __ mov(ip, Operand(kSlicedNotConsMask));
388 __ and_(r0, result, ip, SetRC);
389 __ beq(&cons_string, cr0);
390
391 // Handle slices.
392 Label indirect_string_loaded;
393 __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
394 __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
395 __ SmiUntag(ip, result);
396 __ add(index, index, ip);
397 __ b(&indirect_string_loaded);
398
399 // Handle cons strings.
400 // Check whether the right hand side is the empty string (i.e. if
401 // this is really a flat string in a cons string). If that is not
402 // the case we would rather go to the runtime system now to flatten
403 // the string.
404 __ bind(&cons_string);
405 __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
406 __ CompareRoot(result, Heap::kempty_stringRootIndex);
407 __ bne(call_runtime);
408 // Get the first of the two strings and load its instance type.
409 __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
410
411 __ bind(&indirect_string_loaded);
412 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
413 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
414
415 // Distinguish sequential and external strings. Only these two string
416 // representations can reach here (slices and flat cons strings have been
417 // reduced to the underlying sequential or external string).
418 Label external_string, check_encoding;
419 __ bind(&check_sequential);
420 STATIC_ASSERT(kSeqStringTag == 0);
421 __ andi(r0, result, Operand(kStringRepresentationMask));
422 __ bne(&external_string, cr0);
423
424 // Prepare sequential strings
425 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
426 __ addi(string, string,
427 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
428 __ b(&check_encoding);
429
430 // Handle external strings.
431 __ bind(&external_string);
432 if (FLAG_debug_code) {
433 // Assert that we do not have a cons or slice (indirect strings) here.
434 // Sequential strings have already been ruled out.
435 __ andi(r0, result, Operand(kIsIndirectStringMask));
436 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
437 }
438 // Rule out short external strings.
439 STATIC_ASSERT(kShortExternalStringTag != 0);
440 __ andi(r0, result, Operand(kShortExternalStringMask));
441 __ bne(call_runtime, cr0);
442 __ LoadP(string,
443 FieldMemOperand(string, ExternalString::kResourceDataOffset));
444
445 Label one_byte, done;
446 __ bind(&check_encoding);
447 STATIC_ASSERT(kTwoByteStringTag == 0);
448 __ andi(r0, result, Operand(kStringEncodingMask));
449 __ bne(&one_byte, cr0);
450 // Two-byte string.
451 __ ShiftLeftImm(result, index, Operand(1));
452 __ lhzx(result, MemOperand(string, result));
453 __ b(&done);
454 __ bind(&one_byte);
455 // One-byte string.
456 __ lbzx(result, MemOperand(string, index));
457 __ bind(&done);
458}
459
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400460#undef __
461
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000462CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
463 USE(isolate);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400464 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
465 // Since patcher is a large object, allocate it dynamically when needed,
466 // to avoid overloading the stack in stress conditions.
467 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
468 // the process, before ARM simulator ICache is setup.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000469 base::SmartPointer<CodePatcher> patcher(
470 new CodePatcher(isolate, young_sequence_.start(),
471 young_sequence_.length() / Assembler::kInstrSize,
472 CodePatcher::DONT_FLUSH));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400473 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
Ben Murdochda12d292016-06-02 14:46:10 +0100474 patcher->masm()->PushStandardFrame(r4);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400475 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
476 patcher->masm()->nop();
477 }
478}
479
480
481#ifdef DEBUG
482bool CodeAgingHelper::IsOld(byte* candidate) const {
483 return Assembler::IsNop(Assembler::instr_at(candidate));
484}
485#endif
486
487
488bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
489 bool result = isolate->code_aging_helper()->IsYoung(sequence);
490 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
491 return result;
492}
493
494
495void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
496 MarkingParity* parity) {
497 if (IsYoungSequence(isolate, sequence)) {
498 *age = kNoAgeCodeAge;
499 *parity = NO_MARKING_PARITY;
500 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000501 Code* code = NULL;
502 Address target_address =
503 Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400504 Code* stub = GetCodeFromTargetAddress(target_address);
505 GetCodeAgeAndParity(stub, age, parity);
506 }
507}
508
509
510void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
511 MarkingParity parity) {
512 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
513 if (age == kNoAgeCodeAge) {
514 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515 Assembler::FlushICache(isolate, sequence, young_length);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400516 } else {
517 // FIXED_SEQUENCE
518 Code* stub = GetCodeAgeStub(isolate, age, parity);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000519 CodePatcher patcher(isolate, sequence,
520 young_length / Assembler::kInstrSize);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400521 Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
522 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
523 // Don't use Call -- we need to preserve ip and lr.
524 // GenerateMakeCodeYoungAgainCommon for the stub code.
525 patcher.masm()->nop(); // marker to detect sequence (see IsOld)
526 patcher.masm()->mov(r3, Operand(target));
527 patcher.masm()->Jump(r3);
528 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
529 patcher.masm()->nop();
530 }
531 }
532}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000533} // namespace internal
534} // namespace v8
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400535
536#endif // V8_TARGET_ARCH_PPC