Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1 | // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "src/s390/codegen-s390.h" |
| 6 | |
| 7 | #if V8_TARGET_ARCH_S390 |
| 8 | |
| 9 | #include "src/codegen.h" |
| 10 | #include "src/macro-assembler.h" |
| 11 | #include "src/s390/simulator-s390.h" |
| 12 | |
| 13 | namespace v8 { |
| 14 | namespace internal { |
| 15 | |
| 16 | #define __ masm. |
| 17 | |
| 18 | #if defined(USE_SIMULATOR) |
| 19 | byte* fast_exp_s390_machine_code = nullptr; |
| 20 | double fast_exp_simulator(double x, Isolate* isolate) { |
| 21 | return Simulator::current(isolate)->CallFPReturnsDouble( |
| 22 | fast_exp_s390_machine_code, x, 0); |
| 23 | } |
| 24 | #endif |
| 25 | |
| 26 | UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) { |
| 27 | size_t actual_size; |
| 28 | byte* buffer = |
| 29 | static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); |
| 30 | if (buffer == nullptr) return nullptr; |
| 31 | ExternalReference::InitializeMathExpData(); |
| 32 | |
| 33 | MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), |
| 34 | CodeObjectRequired::kNo); |
| 35 | |
| 36 | { |
| 37 | DoubleRegister input = d0; |
| 38 | DoubleRegister result = d2; |
| 39 | DoubleRegister double_scratch1 = d3; |
| 40 | DoubleRegister double_scratch2 = d4; |
| 41 | Register temp1 = r6; |
| 42 | Register temp2 = r7; |
| 43 | Register temp3 = r8; |
| 44 | |
| 45 | __ Push(temp3, temp2, temp1); |
| 46 | MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1, |
| 47 | double_scratch2, temp1, temp2, temp3); |
| 48 | __ Pop(temp3, temp2, temp1); |
| 49 | __ ldr(d0, result); |
| 50 | __ Ret(); |
| 51 | } |
| 52 | |
| 53 | CodeDesc desc; |
| 54 | masm.GetCode(&desc); |
| 55 | DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc)); |
| 56 | |
| 57 | Assembler::FlushICache(isolate, buffer, actual_size); |
| 58 | base::OS::ProtectCode(buffer, actual_size); |
| 59 | |
| 60 | #if !defined(USE_SIMULATOR) |
| 61 | return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer); |
| 62 | #else |
| 63 | fast_exp_s390_machine_code = buffer; |
| 64 | return &fast_exp_simulator; |
| 65 | #endif |
| 66 | } |
| 67 | |
| 68 | UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) { |
| 69 | #if defined(USE_SIMULATOR) |
| 70 | return nullptr; |
| 71 | #else |
| 72 | size_t actual_size; |
| 73 | byte* buffer = |
| 74 | static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); |
| 75 | if (buffer == nullptr) return nullptr; |
| 76 | |
| 77 | MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), |
| 78 | CodeObjectRequired::kNo); |
| 79 | |
| 80 | __ MovFromFloatParameter(d0); |
| 81 | __ sqdbr(d0, d0); |
| 82 | __ MovToFloatResult(d0); |
| 83 | __ Ret(); |
| 84 | |
| 85 | CodeDesc desc; |
| 86 | masm.GetCode(&desc); |
| 87 | DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc)); |
| 88 | |
| 89 | Assembler::FlushICache(isolate, buffer, actual_size); |
| 90 | base::OS::ProtectCode(buffer, actual_size); |
| 91 | return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer); |
| 92 | #endif |
| 93 | } |
| 94 | |
| 95 | #undef __ |
| 96 | |
| 97 | // ------------------------------------------------------------------------- |
| 98 | // Platform-specific RuntimeCallHelper functions. |
| 99 | |
| 100 | void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { |
| 101 | masm->EnterFrame(StackFrame::INTERNAL); |
| 102 | DCHECK(!masm->has_frame()); |
| 103 | masm->set_has_frame(true); |
| 104 | } |
| 105 | |
| 106 | void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { |
| 107 | masm->LeaveFrame(StackFrame::INTERNAL); |
| 108 | DCHECK(masm->has_frame()); |
| 109 | masm->set_has_frame(false); |
| 110 | } |
| 111 | |
| 112 | // ------------------------------------------------------------------------- |
| 113 | // Code generators |
| 114 | |
| 115 | #define __ ACCESS_MASM(masm) |
| 116 | |
| 117 | void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
| 118 | MacroAssembler* masm, Register receiver, Register key, Register value, |
| 119 | Register target_map, AllocationSiteMode mode, |
| 120 | Label* allocation_memento_found) { |
| 121 | Register scratch_elements = r6; |
| 122 | DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements)); |
| 123 | |
| 124 | if (mode == TRACK_ALLOCATION_SITE) { |
| 125 | DCHECK(allocation_memento_found != NULL); |
| 126 | __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1, |
| 127 | allocation_memento_found); |
| 128 | } |
| 129 | |
| 130 | // Set transitioned map. |
| 131 | __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 132 | __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1, |
| 133 | kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 134 | OMIT_SMI_CHECK); |
| 135 | } |
| 136 | |
| 137 | void ElementsTransitionGenerator::GenerateSmiToDouble( |
| 138 | MacroAssembler* masm, Register receiver, Register key, Register value, |
| 139 | Register target_map, AllocationSiteMode mode, Label* fail) { |
| 140 | // lr contains the return address |
| 141 | Label loop, entry, convert_hole, gc_required, only_change_map, done; |
| 142 | Register elements = r6; |
| 143 | Register length = r7; |
| 144 | Register array = r8; |
| 145 | Register array_end = array; |
| 146 | |
| 147 | // target_map parameter can be clobbered. |
| 148 | Register scratch1 = target_map; |
| 149 | Register scratch2 = r1; |
| 150 | |
| 151 | // Verify input registers don't conflict with locals. |
| 152 | DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array, |
| 153 | scratch2)); |
| 154 | |
| 155 | if (mode == TRACK_ALLOCATION_SITE) { |
| 156 | __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail); |
| 157 | } |
| 158 | |
| 159 | // Check for empty arrays, which only require a map transition and no changes |
| 160 | // to the backing store. |
| 161 | __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 162 | __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex); |
| 163 | __ beq(&only_change_map, Label::kNear); |
| 164 | |
| 165 | // Preserve lr and use r14 as a temporary register. |
| 166 | __ push(r14); |
| 167 | |
| 168 | __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 169 | // length: number of elements (smi-tagged) |
| 170 | |
| 171 | // Allocate new FixedDoubleArray. |
| 172 | __ SmiToDoubleArrayOffset(r14, length); |
| 173 | __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize)); |
| 174 | __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 175 | __ SubP(array, array, Operand(kHeapObjectTag)); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 176 | // Set destination FixedDoubleArray's length and map. |
| 177 | __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex); |
| 178 | __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); |
| 179 | // Update receiver's map. |
| 180 | __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset)); |
| 181 | |
| 182 | __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 183 | __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2, |
| 184 | kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, |
| 185 | OMIT_SMI_CHECK); |
| 186 | // Replace receiver's backing store with newly created FixedDoubleArray. |
| 187 | __ AddP(scratch1, array, Operand(kHeapObjectTag)); |
| 188 | __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 189 | __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2, |
| 190 | kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 191 | OMIT_SMI_CHECK); |
| 192 | |
| 193 | // Prepare for conversion loop. |
| 194 | __ AddP(target_map, elements, |
| 195 | Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 196 | __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize)); |
| 197 | __ SmiToDoubleArrayOffset(array, length); |
| 198 | __ AddP(array_end, r9, array); |
| 199 | // Repurpose registers no longer in use. |
| 200 | #if V8_TARGET_ARCH_S390X |
| 201 | Register hole_int64 = elements; |
| 202 | #else |
| 203 | Register hole_lower = elements; |
| 204 | Register hole_upper = length; |
| 205 | #endif |
| 206 | // scratch1: begin of source FixedArray element fields, not tagged |
| 207 | // hole_lower: kHoleNanLower32 OR hol_int64 |
| 208 | // hole_upper: kHoleNanUpper32 |
| 209 | // array_end: end of destination FixedDoubleArray, not tagged |
| 210 | // scratch2: begin of FixedDoubleArray element fields, not tagged |
| 211 | |
| 212 | __ b(&entry, Label::kNear); |
| 213 | |
| 214 | __ bind(&only_change_map); |
| 215 | __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 216 | __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2, |
| 217 | kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, |
| 218 | OMIT_SMI_CHECK); |
| 219 | __ b(&done, Label::kNear); |
| 220 | |
| 221 | // Call into runtime if GC is required. |
| 222 | __ bind(&gc_required); |
| 223 | __ pop(r14); |
| 224 | __ b(fail); |
| 225 | |
| 226 | // Convert and copy elements. |
| 227 | __ bind(&loop); |
| 228 | __ LoadP(r14, MemOperand(scratch1)); |
| 229 | __ la(scratch1, MemOperand(scratch1, kPointerSize)); |
| 230 | // r1: current element |
| 231 | __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole); |
| 232 | |
| 233 | // Normal smi, convert to double and store. |
| 234 | __ ConvertIntToDouble(r14, d0); |
| 235 | __ StoreDouble(d0, MemOperand(r9, 0)); |
| 236 | __ la(r9, MemOperand(r9, 8)); |
| 237 | |
| 238 | __ b(&entry, Label::kNear); |
| 239 | |
| 240 | // Hole found, store the-hole NaN. |
| 241 | __ bind(&convert_hole); |
| 242 | if (FLAG_debug_code) { |
| 243 | // Restore a "smi-untagged" heap object. |
| 244 | __ LoadP(r1, MemOperand(r5, -kPointerSize)); |
| 245 | __ CompareRoot(r1, Heap::kTheHoleValueRootIndex); |
| 246 | __ Assert(eq, kObjectFoundInSmiOnlyArray); |
| 247 | } |
| 248 | #if V8_TARGET_ARCH_S390X |
| 249 | __ stg(hole_int64, MemOperand(r9, 0)); |
| 250 | #else |
| 251 | __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset)); |
| 252 | __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset)); |
| 253 | #endif |
| 254 | __ AddP(r9, Operand(8)); |
| 255 | |
| 256 | __ bind(&entry); |
| 257 | __ CmpP(r9, array_end); |
| 258 | __ blt(&loop); |
| 259 | |
| 260 | __ pop(r14); |
| 261 | __ bind(&done); |
| 262 | } |
| 263 | |
| 264 | void ElementsTransitionGenerator::GenerateDoubleToObject( |
| 265 | MacroAssembler* masm, Register receiver, Register key, Register value, |
| 266 | Register target_map, AllocationSiteMode mode, Label* fail) { |
| 267 | // Register lr contains the return address. |
| 268 | Label loop, convert_hole, gc_required, only_change_map; |
| 269 | Register elements = r6; |
| 270 | Register array = r8; |
| 271 | Register length = r7; |
| 272 | Register scratch = r1; |
| 273 | Register scratch3 = r9; |
| 274 | Register hole_value = r9; |
| 275 | |
| 276 | // Verify input registers don't conflict with locals. |
| 277 | DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length, |
| 278 | scratch)); |
| 279 | |
| 280 | if (mode == TRACK_ALLOCATION_SITE) { |
| 281 | __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail); |
| 282 | } |
| 283 | |
| 284 | // Check for empty arrays, which only require a map transition and no changes |
| 285 | // to the backing store. |
| 286 | __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 287 | __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex); |
| 288 | __ beq(&only_change_map); |
| 289 | |
| 290 | __ Push(target_map, receiver, key, value); |
| 291 | __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 292 | // elements: source FixedDoubleArray |
| 293 | // length: number of elements (smi-tagged) |
| 294 | |
| 295 | // Allocate new FixedArray. |
| 296 | // Re-use value and target_map registers, as they have been saved on the |
| 297 | // stack. |
| 298 | Register array_size = value; |
| 299 | Register allocate_scratch = target_map; |
| 300 | __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize)); |
| 301 | __ SmiToPtrArrayOffset(r0, length); |
| 302 | __ AddP(array_size, r0); |
| 303 | __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required, |
| 304 | NO_ALLOCATION_FLAGS); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 305 | // array: destination FixedArray, tagged as heap object |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 306 | // Set destination FixedDoubleArray's length and map. |
| 307 | __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 308 | __ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset), |
| 309 | r0); |
| 310 | __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 311 | |
| 312 | // Prepare for conversion loop. |
| 313 | Register src_elements = elements; |
| 314 | Register dst_elements = target_map; |
| 315 | Register dst_end = length; |
| 316 | Register heap_number_map = scratch; |
| 317 | __ AddP(src_elements, |
| 318 | Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
| 319 | __ SmiToPtrArrayOffset(length, length); |
| 320 | __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex); |
| 321 | |
| 322 | Label initialization_loop, loop_done; |
| 323 | __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2)); |
| 324 | __ beq(&loop_done, Label::kNear); |
| 325 | |
| 326 | // Allocating heap numbers in the loop below can fail and cause a jump to |
| 327 | // gc_required. We can't leave a partly initialized FixedArray behind, |
| 328 | // so pessimistically fill it with holes now. |
| 329 | __ AddP(dst_elements, array, |
| 330 | Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); |
| 331 | __ bind(&initialization_loop); |
| 332 | __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize)); |
| 333 | __ lay(dst_elements, MemOperand(dst_elements, kPointerSize)); |
| 334 | __ BranchOnCount(scratch, &initialization_loop); |
| 335 | |
| 336 | __ AddP(dst_elements, array, |
| 337 | Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 338 | __ AddP(dst_end, dst_elements, length); |
| 339 | __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 340 | // Using offsetted addresses in src_elements to fully take advantage of |
| 341 | // post-indexing. |
| 342 | // dst_elements: begin of destination FixedArray element fields, not tagged |
| 343 | // src_elements: begin of source FixedDoubleArray element fields, |
| 344 | // not tagged, +4 |
| 345 | // dst_end: end of destination FixedArray, not tagged |
| 346 | // array: destination FixedArray |
| 347 | // hole_value: the-hole pointer |
| 348 | // heap_number_map: heap number map |
| 349 | __ b(&loop, Label::kNear); |
| 350 | |
| 351 | // Call into runtime if GC is required. |
| 352 | __ bind(&gc_required); |
| 353 | __ Pop(target_map, receiver, key, value); |
| 354 | __ b(fail); |
| 355 | |
| 356 | // Replace the-hole NaN with the-hole pointer. |
| 357 | __ bind(&convert_hole); |
| 358 | __ StoreP(hole_value, MemOperand(dst_elements)); |
| 359 | __ AddP(dst_elements, Operand(kPointerSize)); |
| 360 | __ CmpLogicalP(dst_elements, dst_end); |
| 361 | __ bge(&loop_done); |
| 362 | |
| 363 | __ bind(&loop); |
| 364 | Register upper_bits = key; |
| 365 | __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset)); |
| 366 | __ AddP(src_elements, Operand(kDoubleSize)); |
| 367 | // upper_bits: current element's upper 32 bit |
| 368 | // src_elements: address of next element's upper 32 bit |
| 369 | __ Cmp32(upper_bits, Operand(kHoleNanUpper32)); |
| 370 | __ beq(&convert_hole, Label::kNear); |
| 371 | |
| 372 | // Non-hole double, copy value into a heap number. |
| 373 | Register heap_number = receiver; |
| 374 | Register scratch2 = value; |
| 375 | __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map, |
| 376 | &gc_required); |
| 377 | // heap_number: new heap number |
| 378 | #if V8_TARGET_ARCH_S390X |
| 379 | __ lg(scratch2, MemOperand(src_elements, -kDoubleSize)); |
| 380 | // subtract tag for std |
| 381 | __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag)); |
| 382 | __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset)); |
| 383 | #else |
| 384 | __ LoadlW(scratch2, |
| 385 | MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize)); |
| 386 | __ LoadlW(upper_bits, |
| 387 | MemOperand(src_elements, Register::kExponentOffset - kDoubleSize)); |
| 388 | __ StoreW(scratch2, |
| 389 | FieldMemOperand(heap_number, HeapNumber::kMantissaOffset)); |
| 390 | __ StoreW(upper_bits, |
| 391 | FieldMemOperand(heap_number, HeapNumber::kExponentOffset)); |
| 392 | #endif |
| 393 | __ LoadRR(scratch2, dst_elements); |
| 394 | __ StoreP(heap_number, MemOperand(dst_elements)); |
| 395 | __ AddP(dst_elements, Operand(kPointerSize)); |
| 396 | __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved, |
| 397 | kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| 398 | __ CmpLogicalP(dst_elements, dst_end); |
| 399 | __ blt(&loop); |
| 400 | __ bind(&loop_done); |
| 401 | |
| 402 | __ Pop(target_map, receiver, key, value); |
| 403 | // Replace receiver's backing store with newly created and filled FixedArray. |
| 404 | __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 405 | __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch, |
| 406 | kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 407 | OMIT_SMI_CHECK); |
| 408 | |
| 409 | __ bind(&only_change_map); |
| 410 | // Update receiver's map. |
| 411 | __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 412 | __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch, |
| 413 | kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, |
| 414 | OMIT_SMI_CHECK); |
| 415 | } |
| 416 | |
| 417 | // assume ip can be used as a scratch register below |
| 418 | void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string, |
| 419 | Register index, Register result, |
| 420 | Label* call_runtime) { |
| 421 | // Fetch the instance type of the receiver into result register. |
| 422 | __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 423 | __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
| 424 | |
| 425 | // We need special handling for indirect strings. |
| 426 | Label check_sequential; |
| 427 | __ mov(r0, Operand(kIsIndirectStringMask)); |
| 428 | __ AndP(r0, result); |
| 429 | __ beq(&check_sequential, Label::kNear /*, cr0*/); |
| 430 | |
| 431 | // Dispatch on the indirect string shape: slice or cons. |
| 432 | Label cons_string; |
| 433 | __ mov(ip, Operand(kSlicedNotConsMask)); |
| 434 | __ LoadRR(r0, result); |
| 435 | __ AndP(r0, ip /*, SetRC*/); // Should be okay to remove RC |
| 436 | __ beq(&cons_string, Label::kNear /*, cr0*/); |
| 437 | |
| 438 | // Handle slices. |
| 439 | Label indirect_string_loaded; |
| 440 | __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); |
| 441 | __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset)); |
| 442 | __ SmiUntag(ip, result); |
| 443 | __ AddP(index, ip); |
| 444 | __ b(&indirect_string_loaded, Label::kNear); |
| 445 | |
| 446 | // Handle cons strings. |
| 447 | // Check whether the right hand side is the empty string (i.e. if |
| 448 | // this is really a flat string in a cons string). If that is not |
| 449 | // the case we would rather go to the runtime system now to flatten |
| 450 | // the string. |
| 451 | __ bind(&cons_string); |
| 452 | __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset)); |
| 453 | __ CompareRoot(result, Heap::kempty_stringRootIndex); |
| 454 | __ bne(call_runtime); |
| 455 | // Get the first of the two strings and load its instance type. |
| 456 | __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset)); |
| 457 | |
| 458 | __ bind(&indirect_string_loaded); |
| 459 | __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 460 | __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
| 461 | |
| 462 | // Distinguish sequential and external strings. Only these two string |
| 463 | // representations can reach here (slices and flat cons strings have been |
| 464 | // reduced to the underlying sequential or external string). |
| 465 | Label external_string, check_encoding; |
| 466 | __ bind(&check_sequential); |
| 467 | STATIC_ASSERT(kSeqStringTag == 0); |
| 468 | __ mov(r0, Operand(kStringRepresentationMask)); |
| 469 | __ AndP(r0, result); |
| 470 | __ bne(&external_string, Label::kNear); |
| 471 | |
| 472 | // Prepare sequential strings |
| 473 | STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
| 474 | __ AddP(string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| 475 | __ b(&check_encoding, Label::kNear); |
| 476 | |
| 477 | // Handle external strings. |
| 478 | __ bind(&external_string); |
| 479 | if (FLAG_debug_code) { |
| 480 | // Assert that we do not have a cons or slice (indirect strings) here. |
| 481 | // Sequential strings have already been ruled out. |
| 482 | __ mov(r0, Operand(kIsIndirectStringMask)); |
| 483 | __ AndP(r0, result); |
| 484 | __ Assert(eq, kExternalStringExpectedButNotFound, cr0); |
| 485 | } |
| 486 | // Rule out short external strings. |
| 487 | STATIC_ASSERT(kShortExternalStringTag != 0); |
| 488 | __ mov(r0, Operand(kShortExternalStringMask)); |
| 489 | __ AndP(r0, result); |
| 490 | __ bne(call_runtime /*, cr0*/); |
| 491 | __ LoadP(string, |
| 492 | FieldMemOperand(string, ExternalString::kResourceDataOffset)); |
| 493 | |
| 494 | Label one_byte, done; |
| 495 | __ bind(&check_encoding); |
| 496 | STATIC_ASSERT(kTwoByteStringTag == 0); |
| 497 | __ mov(r0, Operand(kStringEncodingMask)); |
| 498 | __ AndP(r0, result); |
| 499 | __ bne(&one_byte, Label::kNear); |
| 500 | // Two-byte string. |
| 501 | __ ShiftLeftP(result, index, Operand(1)); |
| 502 | __ LoadLogicalHalfWordP(result, MemOperand(string, result)); |
| 503 | __ b(&done, Label::kNear); |
| 504 | __ bind(&one_byte); |
| 505 | // One-byte string. |
| 506 | __ LoadlB(result, MemOperand(string, index)); |
| 507 | __ bind(&done); |
| 508 | } |
| 509 | |
| 510 | static MemOperand ExpConstant(int index, Register base) { |
| 511 | return MemOperand(base, index * kDoubleSize); |
| 512 | } |
| 513 | |
| 514 | void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input, |
| 515 | DoubleRegister result, |
| 516 | DoubleRegister double_scratch1, |
| 517 | DoubleRegister double_scratch2, |
| 518 | Register temp1, Register temp2, |
| 519 | Register temp3) { |
| 520 | DCHECK(!input.is(result)); |
| 521 | DCHECK(!input.is(double_scratch1)); |
| 522 | DCHECK(!input.is(double_scratch2)); |
| 523 | DCHECK(!result.is(double_scratch1)); |
| 524 | DCHECK(!result.is(double_scratch2)); |
| 525 | DCHECK(!double_scratch1.is(double_scratch2)); |
| 526 | DCHECK(!temp1.is(temp2)); |
| 527 | DCHECK(!temp1.is(temp3)); |
| 528 | DCHECK(!temp2.is(temp3)); |
| 529 | DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); |
| 530 | DCHECK(!masm->serializer_enabled()); // External references not serializable. |
| 531 | |
| 532 | Label zero, infinity, done; |
| 533 | |
| 534 | __ mov(temp3, Operand(ExternalReference::math_exp_constants(0))); |
| 535 | |
| 536 | __ LoadDouble(double_scratch1, ExpConstant(0, temp3)); |
| 537 | __ cdbr(double_scratch1, input); |
| 538 | __ ldr(result, input); |
| 539 | __ bunordered(&done, Label::kNear); |
| 540 | __ bge(&zero, Label::kNear); |
| 541 | |
| 542 | __ LoadDouble(double_scratch2, ExpConstant(1, temp3)); |
| 543 | __ cdbr(input, double_scratch2); |
| 544 | __ bge(&infinity, Label::kNear); |
| 545 | |
| 546 | __ LoadDouble(double_scratch1, ExpConstant(3, temp3)); |
| 547 | __ LoadDouble(result, ExpConstant(4, temp3)); |
| 548 | |
| 549 | // Do not generate madbr, as intermediate result are not |
| 550 | // rounded properly |
| 551 | __ mdbr(double_scratch1, input); |
| 552 | __ adbr(double_scratch1, result); |
| 553 | |
| 554 | // Move low word of double_scratch1 to temp2 |
| 555 | __ lgdr(temp2, double_scratch1); |
| 556 | __ nihf(temp2, Operand::Zero()); |
| 557 | |
| 558 | __ sdbr(double_scratch1, result); |
| 559 | __ LoadDouble(result, ExpConstant(6, temp3)); |
| 560 | __ LoadDouble(double_scratch2, ExpConstant(5, temp3)); |
| 561 | __ mdbr(double_scratch1, double_scratch2); |
| 562 | __ sdbr(double_scratch1, input); |
| 563 | __ sdbr(result, double_scratch1); |
| 564 | __ ldr(double_scratch2, double_scratch1); |
| 565 | __ mdbr(double_scratch2, double_scratch2); |
| 566 | __ mdbr(result, double_scratch2); |
| 567 | __ LoadDouble(double_scratch2, ExpConstant(7, temp3)); |
| 568 | __ mdbr(result, double_scratch2); |
| 569 | __ sdbr(result, double_scratch1); |
| 570 | __ LoadDouble(double_scratch2, ExpConstant(8, temp3)); |
| 571 | __ adbr(result, double_scratch2); |
| 572 | __ ShiftRight(temp1, temp2, Operand(11)); |
| 573 | __ AndP(temp2, Operand(0x7ff)); |
| 574 | __ AddP(temp1, Operand(0x3ff)); |
| 575 | |
| 576 | // Must not call ExpConstant() after overwriting temp3! |
| 577 | __ mov(temp3, Operand(ExternalReference::math_exp_log_table())); |
| 578 | __ ShiftLeft(temp2, temp2, Operand(3)); |
| 579 | |
| 580 | __ lg(temp2, MemOperand(temp2, temp3)); |
| 581 | __ sllg(temp1, temp1, Operand(52)); |
| 582 | __ ogr(temp2, temp1); |
| 583 | __ ldgr(double_scratch1, temp2); |
| 584 | |
| 585 | __ mdbr(result, double_scratch1); |
| 586 | __ b(&done, Label::kNear); |
| 587 | |
| 588 | __ bind(&zero); |
| 589 | __ lzdr(kDoubleRegZero); |
| 590 | __ ldr(result, kDoubleRegZero); |
| 591 | __ b(&done, Label::kNear); |
| 592 | |
| 593 | __ bind(&infinity); |
| 594 | __ LoadDouble(result, ExpConstant(2, temp3)); |
| 595 | |
| 596 | __ bind(&done); |
| 597 | } |
| 598 | |
| 599 | #undef __ |
| 600 | |
| 601 | CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { |
| 602 | USE(isolate); |
| 603 | DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); |
| 604 | // Since patcher is a large object, allocate it dynamically when needed, |
| 605 | // to avoid overloading the stack in stress conditions. |
| 606 | // DONT_FLUSH is used because the CodeAgingHelper is initialized early in |
| 607 | // the process, before ARM simulator ICache is setup. |
| 608 | base::SmartPointer<CodePatcher> patcher( |
| 609 | new CodePatcher(isolate, young_sequence_.start(), |
| 610 | young_sequence_.length(), CodePatcher::DONT_FLUSH)); |
| 611 | PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); |
| 612 | patcher->masm()->PushStandardFrame(r3); |
| 613 | } |
| 614 | |
| 615 | #ifdef DEBUG |
| 616 | bool CodeAgingHelper::IsOld(byte* candidate) const { |
| 617 | return Assembler::IsNop(Assembler::instr_at(candidate)); |
| 618 | } |
| 619 | #endif |
| 620 | |
| 621 | bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { |
| 622 | bool result = isolate->code_aging_helper()->IsYoung(sequence); |
| 623 | DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); |
| 624 | return result; |
| 625 | } |
| 626 | |
| 627 | void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, |
| 628 | MarkingParity* parity) { |
| 629 | if (IsYoungSequence(isolate, sequence)) { |
| 630 | *age = kNoAgeCodeAge; |
| 631 | *parity = NO_MARKING_PARITY; |
| 632 | } else { |
| 633 | Code* code = NULL; |
| 634 | Address target_address = |
| 635 | Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code); |
| 636 | Code* stub = GetCodeFromTargetAddress(target_address); |
| 637 | GetCodeAgeAndParity(stub, age, parity); |
| 638 | } |
| 639 | } |
| 640 | |
| 641 | void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age, |
| 642 | MarkingParity parity) { |
| 643 | uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); |
| 644 | if (age == kNoAgeCodeAge) { |
| 645 | isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); |
| 646 | Assembler::FlushICache(isolate, sequence, young_length); |
| 647 | } else { |
| 648 | // FIXED_SEQUENCE |
| 649 | Code* stub = GetCodeAgeStub(isolate, age, parity); |
| 650 | CodePatcher patcher(isolate, sequence, young_length); |
| 651 | intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start()); |
| 652 | // We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon |
| 653 | // knows where to pick up the return address |
| 654 | // |
| 655 | // Since we can no longer guarentee ip will hold the branch address |
| 656 | // because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon |
| 657 | // can calculate the branch address offset |
| 658 | patcher.masm()->nop(); // marker to detect sequence (see IsOld) |
| 659 | patcher.masm()->CleanseP(r14); |
| 660 | patcher.masm()->Push(r14); |
| 661 | patcher.masm()->mov(r2, Operand(target)); |
| 662 | patcher.masm()->Call(r2); |
| 663 | for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength; |
| 664 | i += 2) { |
| 665 | // TODO(joransiu): Create nop function to pad |
| 666 | // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes. |
| 667 | patcher.masm()->nop(); // 2-byte nops(). |
| 668 | } |
| 669 | } |
| 670 | } |
| 671 | |
| 672 | } // namespace internal |
| 673 | } // namespace v8 |
| 674 | |
| 675 | #endif // V8_TARGET_ARCH_S390 |