blob: e6abf68f1ed19374a5bebab54ba3480238f46257 [file] [log] [blame]
Ben Murdochda12d292016-06-02 14:46:10 +01001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <assert.h> // For assert
6#include <limits.h> // For LONG_MIN, LONG_MAX.
7
8#if V8_TARGET_ARCH_S390
9
10#include "src/base/bits.h"
11#include "src/base/division-by-constant.h"
12#include "src/bootstrapper.h"
13#include "src/codegen.h"
14#include "src/debug/debug.h"
15#include "src/register-configuration.h"
16#include "src/runtime/runtime.h"
17
18#include "src/s390/macro-assembler-s390.h"
19
20namespace v8 {
21namespace internal {
22
23MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
24 CodeObjectRequired create_code_object)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
27 has_frame_(false) {
28 if (create_code_object == CodeObjectRequired::kYes) {
29 code_object_ =
30 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
31 }
32}
33
34void MacroAssembler::Jump(Register target) { b(target); }
35
36void MacroAssembler::JumpToJSEntry(Register target) {
37 Move(ip, target);
38 Jump(ip);
39}
40
41void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
42 Condition cond, CRegister) {
43 Label skip;
44
45 if (cond != al) b(NegateCondition(cond), &skip);
46
47 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
48
49 mov(ip, Operand(target, rmode));
50 b(ip);
51
52 bind(&skip);
53}
54
55void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
56 CRegister cr) {
57 DCHECK(!RelocInfo::IsCodeTarget(rmode));
58 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
59}
60
61void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
62 Condition cond) {
63 DCHECK(RelocInfo::IsCodeTarget(rmode));
64 jump(code, rmode, cond);
65}
66
67int MacroAssembler::CallSize(Register target) { return 2; } // BASR
68
69void MacroAssembler::Call(Register target) {
70 Label start;
71 bind(&start);
72
73 // Statement positions are expected to be recorded when the target
74 // address is loaded.
75 positions_recorder()->WriteRecordedPositions();
76
77 // Branch to target via indirect branch
78 basr(r14, target);
79
80 DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
81}
82
83void MacroAssembler::CallJSEntry(Register target) {
84 DCHECK(target.is(ip));
85 Call(target);
86}
87
88int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
89 Condition cond) {
90 // S390 Assembler::move sequence is IILF / IIHF
91 int size;
92#if V8_TARGET_ARCH_S390X
93 size = 14; // IILF + IIHF + BASR
94#else
95 size = 8; // IILF + BASR
96#endif
97 return size;
98}
99
100int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
101 RelocInfo::Mode rmode,
102 Condition cond) {
103 // S390 Assembler::move sequence is IILF / IIHF
104 int size;
105#if V8_TARGET_ARCH_S390X
106 size = 14; // IILF + IIHF + BASR
107#else
108 size = 8; // IILF + BASR
109#endif
110 return size;
111}
112
113void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
114 Condition cond) {
115 DCHECK(cond == al);
116
117#ifdef DEBUG
118 // Check the expected size before generating code to ensure we assume the same
119 // constant pool availability (e.g., whether constant pool is full or not).
120 int expected_size = CallSize(target, rmode, cond);
121 Label start;
122 bind(&start);
123#endif
124
125 // Statement positions are expected to be recorded when the target
126 // address is loaded.
127 positions_recorder()->WriteRecordedPositions();
128
129 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
130 basr(r14, ip);
131
132 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
133}
134
135int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
136 TypeFeedbackId ast_id, Condition cond) {
137 return 6; // BRASL
138}
139
140void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
141 TypeFeedbackId ast_id, Condition cond) {
142 DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
143
144#ifdef DEBUG
145 // Check the expected size before generating code to ensure we assume the same
146 // constant pool availability (e.g., whether constant pool is full or not).
147 int expected_size = CallSize(code, rmode, ast_id, cond);
148 Label start;
149 bind(&start);
150#endif
151 call(code, rmode, ast_id);
152 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
153}
154
155void MacroAssembler::Drop(int count) {
156 if (count > 0) {
157 int total = count * kPointerSize;
158 if (is_uint12(total)) {
159 la(sp, MemOperand(sp, total));
160 } else if (is_int20(total)) {
161 lay(sp, MemOperand(sp, total));
162 } else {
163 AddP(sp, Operand(total));
164 }
165 }
166}
167
168void MacroAssembler::Drop(Register count, Register scratch) {
169 ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
170 AddP(sp, sp, scratch);
171}
172
173void MacroAssembler::Call(Label* target) { b(r14, target); }
174
175void MacroAssembler::Push(Handle<Object> handle) {
176 mov(r0, Operand(handle));
177 push(r0);
178}
179
180void MacroAssembler::Move(Register dst, Handle<Object> value) {
181 AllowDeferredHandleDereference smi_check;
182 if (value->IsSmi()) {
183 LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
184 } else {
185 DCHECK(value->IsHeapObject());
186 if (isolate()->heap()->InNewSpace(*value)) {
187 Handle<Cell> cell = isolate()->factory()->NewCell(value);
188 mov(dst, Operand(cell));
189 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
190 } else {
191 mov(dst, Operand(value));
192 }
193 }
194}
195
196void MacroAssembler::Move(Register dst, Register src, Condition cond) {
197 if (!dst.is(src)) {
198 LoadRR(dst, src);
199 }
200}
201
202void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
203 if (!dst.is(src)) {
204 ldr(dst, src);
205 }
206}
207
208void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src) {
209 StoreDouble(dst, MemOperand(sp, -kDoubleSize));
210#if V8_TARGET_LITTLE_ENDIAN
211 StoreW(src, MemOperand(sp, -kDoubleSize));
212#else
213 StoreW(src, MemOperand(sp, -kDoubleSize / 2));
214#endif
215 ldy(dst, MemOperand(sp, -kDoubleSize));
216}
217
218void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src) {
219 StoreDouble(dst, MemOperand(sp, -kDoubleSize));
220#if V8_TARGET_LITTLE_ENDIAN
221 StoreW(src, MemOperand(sp, -kDoubleSize / 2));
222#else
223 StoreW(src, MemOperand(sp, -kDoubleSize));
224#endif
225 ldy(dst, MemOperand(sp, -kDoubleSize));
226}
227
228void MacroAssembler::MultiPush(RegList regs, Register location) {
229 int16_t num_to_push = NumberOfBitsSet(regs);
230 int16_t stack_offset = num_to_push * kPointerSize;
231
232 SubP(location, location, Operand(stack_offset));
233 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
234 if ((regs & (1 << i)) != 0) {
235 stack_offset -= kPointerSize;
236 StoreP(ToRegister(i), MemOperand(location, stack_offset));
237 }
238 }
239}
240
241void MacroAssembler::MultiPop(RegList regs, Register location) {
242 int16_t stack_offset = 0;
243
244 for (int16_t i = 0; i < Register::kNumRegisters; i++) {
245 if ((regs & (1 << i)) != 0) {
246 LoadP(ToRegister(i), MemOperand(location, stack_offset));
247 stack_offset += kPointerSize;
248 }
249 }
250 AddP(location, location, Operand(stack_offset));
251}
252
253void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
254 int16_t num_to_push = NumberOfBitsSet(dregs);
255 int16_t stack_offset = num_to_push * kDoubleSize;
256
257 SubP(location, location, Operand(stack_offset));
258 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
259 if ((dregs & (1 << i)) != 0) {
260 DoubleRegister dreg = DoubleRegister::from_code(i);
261 stack_offset -= kDoubleSize;
262 StoreDouble(dreg, MemOperand(location, stack_offset));
263 }
264 }
265}
266
267void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
268 int16_t stack_offset = 0;
269
270 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
271 if ((dregs & (1 << i)) != 0) {
272 DoubleRegister dreg = DoubleRegister::from_code(i);
273 LoadDouble(dreg, MemOperand(location, stack_offset));
274 stack_offset += kDoubleSize;
275 }
276 }
277 AddP(location, location, Operand(stack_offset));
278}
279
280void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
281 Condition) {
282 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
283}
284
285void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
286 Condition) {
287 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
288 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2));
289}
290
291void MacroAssembler::InNewSpace(Register object, Register scratch,
292 Condition cond, Label* branch) {
293 DCHECK(cond == eq || cond == ne);
294 // TODO(joransiu): check if we can merge mov Operand into AndP.
295 const int mask =
296 (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
297 CheckPageFlag(object, scratch, mask, cond, branch);
298}
299
300void MacroAssembler::RecordWriteField(
301 Register object, int offset, Register value, Register dst,
302 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
303 RememberedSetAction remembered_set_action, SmiCheck smi_check,
304 PointersToHereCheck pointers_to_here_check_for_value) {
305 // First, check if a write barrier is even needed. The tests below
306 // catch stores of Smis.
307 Label done;
308
309 // Skip barrier if writing a smi.
310 if (smi_check == INLINE_SMI_CHECK) {
311 JumpIfSmi(value, &done);
312 }
313
314 // Although the object register is tagged, the offset is relative to the start
315 // of the object, so so offset must be a multiple of kPointerSize.
316 DCHECK(IsAligned(offset, kPointerSize));
317
318 lay(dst, MemOperand(object, offset - kHeapObjectTag));
319 if (emit_debug_code()) {
320 Label ok;
321 AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
322 beq(&ok, Label::kNear);
323 stop("Unaligned cell in write barrier");
324 bind(&ok);
325 }
326
327 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
328 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
329
330 bind(&done);
331
332 // Clobber clobbered input registers when running with the debug-code flag
333 // turned on to provoke errors.
334 if (emit_debug_code()) {
335 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
336 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
337 }
338}
339
340// Will clobber 4 registers: object, map, dst, ip. The
341// register 'object' contains a heap object pointer.
342void MacroAssembler::RecordWriteForMap(Register object, Register map,
343 Register dst,
344 LinkRegisterStatus lr_status,
345 SaveFPRegsMode fp_mode) {
346 if (emit_debug_code()) {
347 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
348 CmpP(dst, Operand(isolate()->factory()->meta_map()));
349 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
350 }
351
352 if (!FLAG_incremental_marking) {
353 return;
354 }
355
356 if (emit_debug_code()) {
357 CmpP(map, FieldMemOperand(object, HeapObject::kMapOffset));
358 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
359 }
360
361 Label done;
362
363 // A single check of the map's pages interesting flag suffices, since it is
364 // only set during incremental collection, and then it's also guaranteed that
365 // the from object's page's interesting flag is also set. This optimization
366 // relies on the fact that maps can never be in new space.
367 CheckPageFlag(map,
368 map, // Used as scratch.
369 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
370
371 lay(dst, MemOperand(object, HeapObject::kMapOffset - kHeapObjectTag));
372 if (emit_debug_code()) {
373 Label ok;
374 AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
375 beq(&ok, Label::kNear);
376 stop("Unaligned cell in write barrier");
377 bind(&ok);
378 }
379
380 // Record the actual write.
381 if (lr_status == kLRHasNotBeenSaved) {
382 push(r14);
383 }
384 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
385 fp_mode);
386 CallStub(&stub);
387 if (lr_status == kLRHasNotBeenSaved) {
388 pop(r14);
389 }
390
391 bind(&done);
392
393 // Count number of write barriers in generated code.
394 isolate()->counters()->write_barriers_static()->Increment();
395 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
396
397 // Clobber clobbered registers when running with the debug-code flag
398 // turned on to provoke errors.
399 if (emit_debug_code()) {
400 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
401 mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
402 }
403}
404
405// Will clobber 4 registers: object, address, scratch, ip. The
406// register 'object' contains a heap object pointer. The heap object
407// tag is shifted away.
408void MacroAssembler::RecordWrite(
409 Register object, Register address, Register value,
410 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
411 RememberedSetAction remembered_set_action, SmiCheck smi_check,
412 PointersToHereCheck pointers_to_here_check_for_value) {
413 DCHECK(!object.is(value));
414 if (emit_debug_code()) {
415 CmpP(value, MemOperand(address));
416 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
417 }
418
419 if (remembered_set_action == OMIT_REMEMBERED_SET &&
420 !FLAG_incremental_marking) {
421 return;
422 }
423 // First, check if a write barrier is even needed. The tests below
424 // catch stores of smis and stores into the young generation.
425 Label done;
426
427 if (smi_check == INLINE_SMI_CHECK) {
428 JumpIfSmi(value, &done);
429 }
430
431 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
432 CheckPageFlag(value,
433 value, // Used as scratch.
434 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
435 }
436 CheckPageFlag(object,
437 value, // Used as scratch.
438 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
439
440 // Record the actual write.
441 if (lr_status == kLRHasNotBeenSaved) {
442 push(r14);
443 }
444 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
445 fp_mode);
446 CallStub(&stub);
447 if (lr_status == kLRHasNotBeenSaved) {
448 pop(r14);
449 }
450
451 bind(&done);
452
453 // Count number of write barriers in generated code.
454 isolate()->counters()->write_barriers_static()->Increment();
455 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
456 value);
457
458 // Clobber clobbered registers when running with the debug-code flag
459 // turned on to provoke errors.
460 if (emit_debug_code()) {
461 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
462 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
463 }
464}
465
466void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
467 Register code_entry,
468 Register scratch) {
469 const int offset = JSFunction::kCodeEntryOffset;
470
471 // Since a code entry (value) is always in old space, we don't need to update
472 // remembered set. If incremental marking is off, there is nothing for us to
473 // do.
474 if (!FLAG_incremental_marking) return;
475
476 DCHECK(js_function.is(r3));
477 DCHECK(code_entry.is(r6));
478 DCHECK(scratch.is(r7));
479 AssertNotSmi(js_function);
480
481 if (emit_debug_code()) {
482 AddP(scratch, js_function, Operand(offset - kHeapObjectTag));
483 LoadP(ip, MemOperand(scratch));
484 CmpP(ip, code_entry);
485 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
486 }
487
488 // First, check if a write barrier is even needed. The tests below
489 // catch stores of Smis and stores into young gen.
490 Label done;
491
492 CheckPageFlag(code_entry, scratch,
493 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
494 CheckPageFlag(js_function, scratch,
495 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
496
497 const Register dst = scratch;
498 AddP(dst, js_function, Operand(offset - kHeapObjectTag));
499
500 // Save caller-saved registers. js_function and code_entry are in the
501 // caller-saved register list.
502 DCHECK(kJSCallerSaved & js_function.bit());
503 DCHECK(kJSCallerSaved & code_entry.bit());
504 MultiPush(kJSCallerSaved | r14.bit());
505
506 int argument_count = 3;
507 PrepareCallCFunction(argument_count, code_entry);
508
509 LoadRR(r2, js_function);
510 LoadRR(r3, dst);
511 mov(r4, Operand(ExternalReference::isolate_address(isolate())));
512
513 {
514 AllowExternalCallThatCantCauseGC scope(this);
515 CallCFunction(
516 ExternalReference::incremental_marking_record_write_code_entry_function(
517 isolate()),
518 argument_count);
519 }
520
521 // Restore caller-saved registers (including js_function and code_entry).
522 MultiPop(kJSCallerSaved | r14.bit());
523
524 bind(&done);
525}
526
527void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
528 Register address, Register scratch,
529 SaveFPRegsMode fp_mode,
530 RememberedSetFinalAction and_then) {
531 Label done;
532 if (emit_debug_code()) {
533 Label ok;
534 JumpIfNotInNewSpace(object, scratch, &ok);
535 stop("Remembered set pointer is in new space");
536 bind(&ok);
537 }
538 // Load store buffer top.
539 ExternalReference store_buffer =
540 ExternalReference::store_buffer_top(isolate());
541 mov(ip, Operand(store_buffer));
542 LoadP(scratch, MemOperand(ip));
543 // Store pointer to buffer and increment buffer top.
544 StoreP(address, MemOperand(scratch));
545 AddP(scratch, Operand(kPointerSize));
546 // Write back new top of buffer.
547 StoreP(scratch, MemOperand(ip));
548 // Call stub on end of buffer.
549 // Check for end of buffer.
550 AndP(scratch, Operand(StoreBuffer::kStoreBufferMask));
551
552 if (and_then == kFallThroughAtEnd) {
553 bne(&done, Label::kNear);
554 } else {
555 DCHECK(and_then == kReturnAtEnd);
556 bne(&done, Label::kNear);
557 }
558 push(r14);
559 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
560 CallStub(&store_buffer_overflow);
561 pop(r14);
562 bind(&done);
563 if (and_then == kReturnAtEnd) {
564 Ret();
565 }
566}
567
568void MacroAssembler::PushCommonFrame(Register marker_reg) {
569 int fp_delta = 0;
570 CleanseP(r14);
571 if (marker_reg.is_valid()) {
572 Push(r14, fp, marker_reg);
573 fp_delta = 1;
574 } else {
575 Push(r14, fp);
576 fp_delta = 0;
577 }
578 la(fp, MemOperand(sp, fp_delta * kPointerSize));
579}
580
581void MacroAssembler::PopCommonFrame(Register marker_reg) {
582 if (marker_reg.is_valid()) {
583 Pop(r14, fp, marker_reg);
584 } else {
585 Pop(r14, fp);
586 }
587}
588
589void MacroAssembler::PushStandardFrame(Register function_reg) {
590 int fp_delta = 0;
591 CleanseP(r14);
592 if (function_reg.is_valid()) {
593 Push(r14, fp, cp, function_reg);
594 fp_delta = 2;
595 } else {
596 Push(r14, fp, cp);
597 fp_delta = 1;
598 }
599 la(fp, MemOperand(sp, fp_delta * kPointerSize));
600}
601
602void MacroAssembler::RestoreFrameStateForTailCall() {
603 // if (FLAG_enable_embedded_constant_pool) {
604 // LoadP(kConstantPoolRegister,
605 // MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
606 // set_constant_pool_available(false);
607 // }
608 DCHECK(!FLAG_enable_embedded_constant_pool);
609 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
610 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
611}
612
613const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
614const int MacroAssembler::kNumSafepointSavedRegisters =
615 Register::kNumAllocatable;
616
617// Push and pop all registers that can hold pointers.
618void MacroAssembler::PushSafepointRegisters() {
619 // Safepoints expect a block of kNumSafepointRegisters values on the
620 // stack, so adjust the stack for unsaved registers.
621 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
622 DCHECK(num_unsaved >= 0);
623 if (num_unsaved > 0) {
624 lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
625 }
626 MultiPush(kSafepointSavedRegisters);
627}
628
629void MacroAssembler::PopSafepointRegisters() {
630 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
631 MultiPop(kSafepointSavedRegisters);
632 if (num_unsaved > 0) {
633 la(sp, MemOperand(sp, num_unsaved * kPointerSize));
634 }
635}
636
637void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
638 StoreP(src, SafepointRegisterSlot(dst));
639}
640
641void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
642 LoadP(dst, SafepointRegisterSlot(src));
643}
644
645int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
646 // The registers are pushed starting with the highest encoding,
647 // which means that lowest encodings are closest to the stack pointer.
648 RegList regs = kSafepointSavedRegisters;
649 int index = 0;
650
651 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
652
653 for (int16_t i = 0; i < reg_code; i++) {
654 if ((regs & (1 << i)) != 0) {
655 index++;
656 }
657 }
658
659 return index;
660}
661
662MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
663 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
664}
665
666MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
667 // General purpose registers are pushed last on the stack.
668 const RegisterConfiguration* config =
669 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
670 int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
671 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
672 return MemOperand(sp, doubles_size + register_offset);
673}
674
675void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
676 const DoubleRegister src) {
677 // Turn potential sNaN into qNaN
678 if (!dst.is(src)) ldr(dst, src);
679 lzdr(kDoubleRegZero);
680 sdbr(dst, kDoubleRegZero);
681}
682
683void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
684 cdfbr(dst, src);
685}
686
687void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
688 DoubleRegister dst) {
689 if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
690 cdlfbr(Condition(5), Condition(0), dst, src);
691 } else {
692 // zero-extend src
693 llgfr(src, src);
694 // convert to double
695 cdgbr(dst, src);
696 }
697}
698
699void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
700 cefbr(dst, src);
701}
702
703void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
704 DoubleRegister dst) {
705 celfbr(Condition(0), Condition(0), dst, src);
706}
707
708#if V8_TARGET_ARCH_S390X
709void MacroAssembler::ConvertInt64ToDouble(Register src,
710 DoubleRegister double_dst) {
711 cdgbr(double_dst, src);
712}
713
714void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
715 DoubleRegister double_dst) {
716 celgbr(Condition(0), Condition(0), double_dst, src);
717}
718
719void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
720 DoubleRegister double_dst) {
721 cdlgbr(Condition(0), Condition(0), double_dst, src);
722}
723
724void MacroAssembler::ConvertInt64ToFloat(Register src,
725 DoubleRegister double_dst) {
726 cegbr(double_dst, src);
727}
728#endif
729
730void MacroAssembler::ConvertFloat32ToInt64(const DoubleRegister double_input,
731#if !V8_TARGET_ARCH_S390X
732 const Register dst_hi,
733#endif
734 const Register dst,
735 const DoubleRegister double_dst,
736 FPRoundingMode rounding_mode) {
737 Condition m = Condition(0);
738 switch (rounding_mode) {
739 case kRoundToZero:
740 m = Condition(5);
741 break;
742 case kRoundToNearest:
743 UNIMPLEMENTED();
744 break;
745 case kRoundToPlusInf:
746 m = Condition(6);
747 break;
748 case kRoundToMinusInf:
749 m = Condition(7);
750 break;
751 default:
752 UNIMPLEMENTED();
753 break;
754 }
755 cgebr(m, dst, double_input);
756 ldgr(double_dst, dst);
757#if !V8_TARGET_ARCH_S390X
758 srlg(dst_hi, dst, Operand(32));
759#endif
760}
761
762void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
763#if !V8_TARGET_ARCH_S390X
764 const Register dst_hi,
765#endif
766 const Register dst,
767 const DoubleRegister double_dst,
768 FPRoundingMode rounding_mode) {
769 Condition m = Condition(0);
770 switch (rounding_mode) {
771 case kRoundToZero:
772 m = Condition(5);
773 break;
774 case kRoundToNearest:
775 UNIMPLEMENTED();
776 break;
777 case kRoundToPlusInf:
778 m = Condition(6);
779 break;
780 case kRoundToMinusInf:
781 m = Condition(7);
782 break;
783 default:
784 UNIMPLEMENTED();
785 break;
786 }
787 cgdbr(m, dst, double_input);
788 ldgr(double_dst, dst);
789#if !V8_TARGET_ARCH_S390X
790 srlg(dst_hi, dst, Operand(32));
791#endif
792}
793
794void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
795 const Register dst,
796 const DoubleRegister double_dst,
797 FPRoundingMode rounding_mode) {
798 Condition m = Condition(0);
799 switch (rounding_mode) {
800 case kRoundToZero:
801 m = Condition(5);
802 break;
803 case kRoundToNearest:
804 UNIMPLEMENTED();
805 break;
806 case kRoundToPlusInf:
807 m = Condition(6);
808 break;
809 case kRoundToMinusInf:
810 m = Condition(7);
811 break;
812 default:
813 UNIMPLEMENTED();
814 break;
815 }
816 cfebr(m, dst, double_input);
817 ldgr(double_dst, dst);
818}
819
820void MacroAssembler::ConvertFloat32ToUnsignedInt32(
821 const DoubleRegister double_input, const Register dst,
822 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
823 Condition m = Condition(0);
824 switch (rounding_mode) {
825 case kRoundToZero:
826 m = Condition(5);
827 break;
828 case kRoundToNearest:
829 UNIMPLEMENTED();
830 break;
831 case kRoundToPlusInf:
832 m = Condition(6);
833 break;
834 case kRoundToMinusInf:
835 m = Condition(7);
836 break;
837 default:
838 UNIMPLEMENTED();
839 break;
840 }
841 clfebr(m, Condition(0), dst, double_input);
842 ldgr(double_dst, dst);
843}
844
845#if V8_TARGET_ARCH_S390X
846void MacroAssembler::ConvertFloat32ToUnsignedInt64(
847 const DoubleRegister double_input, const Register dst,
848 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
849 Condition m = Condition(0);
850 switch (rounding_mode) {
851 case kRoundToZero:
852 m = Condition(5);
853 break;
854 case kRoundToNearest:
855 UNIMPLEMENTED();
856 break;
857 case kRoundToPlusInf:
858 m = Condition(6);
859 break;
860 case kRoundToMinusInf:
861 m = Condition(7);
862 break;
863 default:
864 UNIMPLEMENTED();
865 break;
866 }
867 clgebr(m, Condition(0), dst, double_input);
868 ldgr(double_dst, dst);
869}
870
871void MacroAssembler::ConvertDoubleToUnsignedInt64(
872 const DoubleRegister double_input, const Register dst,
873 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
874 Condition m = Condition(0);
875 switch (rounding_mode) {
876 case kRoundToZero:
877 m = Condition(5);
878 break;
879 case kRoundToNearest:
880 UNIMPLEMENTED();
881 break;
882 case kRoundToPlusInf:
883 m = Condition(6);
884 break;
885 case kRoundToMinusInf:
886 m = Condition(7);
887 break;
888 default:
889 UNIMPLEMENTED();
890 break;
891 }
892 clgdbr(m, Condition(0), dst, double_input);
893 ldgr(double_dst, dst);
894}
895
896#endif
897
898#if !V8_TARGET_ARCH_S390X
899void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
900 Register src_low, Register src_high,
901 Register scratch, Register shift) {
902 LoadRR(r0, src_high);
903 LoadRR(r1, src_low);
904 sldl(r0, shift, Operand::Zero());
905 LoadRR(dst_high, r0);
906 LoadRR(dst_low, r1);
907}
908
909void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
910 Register src_low, Register src_high,
911 uint32_t shift) {
912 LoadRR(r0, src_high);
913 LoadRR(r1, src_low);
914 sldl(r0, r0, Operand(shift));
915 LoadRR(dst_high, r0);
916 LoadRR(dst_low, r1);
917}
918
919void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
920 Register src_low, Register src_high,
921 Register scratch, Register shift) {
922 LoadRR(r0, src_high);
923 LoadRR(r1, src_low);
924 srdl(r0, shift, Operand::Zero());
925 LoadRR(dst_high, r0);
926 LoadRR(dst_low, r1);
927}
928
929void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
930 Register src_low, Register src_high,
931 uint32_t shift) {
932 LoadRR(r0, src_high);
933 LoadRR(r1, src_low);
934 srdl(r0, r0, Operand(shift));
935 LoadRR(dst_high, r0);
936 LoadRR(dst_low, r1);
937}
938
939void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
940 Register src_low, Register src_high,
941 Register scratch, Register shift) {
942 LoadRR(r0, src_high);
943 LoadRR(r1, src_low);
944 srda(r0, shift, Operand::Zero());
945 LoadRR(dst_high, r0);
946 LoadRR(dst_low, r1);
947}
948
949void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
950 Register src_low, Register src_high,
951 uint32_t shift) {
952 LoadRR(r0, src_high);
953 LoadRR(r1, src_low);
954 srda(r0, r0, Operand(shift));
955 LoadRR(dst_high, r0);
956 LoadRR(dst_low, r1);
957}
958#endif
959
960void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
961 lgdr(dst, src);
962}
963
964void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
965 ldgr(dst, src);
966}
967
968void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
969 int prologue_offset) {
970 {
971 ConstantPoolUnavailableScope constant_pool_unavailable(this);
972 LoadSmiLiteral(r1, Smi::FromInt(type));
973 PushCommonFrame(r1);
974 }
975}
976
977void MacroAssembler::Prologue(bool code_pre_aging, Register base,
978 int prologue_offset) {
979 DCHECK(!base.is(no_reg));
980 {
981 PredictableCodeSizeScope predictible_code_size_scope(
982 this, kNoCodeAgeSequenceLength);
983 // The following instructions must remain together and unmodified
984 // for code aging to work properly.
985 if (code_pre_aging) {
986 // Pre-age the code.
987 // This matches the code found in PatchPlatformCodeAge()
988 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
989 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
990 nop();
991 CleanseP(r14);
992 Push(r14);
993 mov(r2, Operand(target));
994 Call(r2);
995 for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
996 i += 2) {
997 // TODO(joransiu): Create nop function to pad
998 // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
999 nop(); // 2-byte nops().
1000 }
1001 } else {
1002 // This matches the code found in GetNoCodeAgeSequence()
1003 PushStandardFrame(r3);
1004 }
1005 }
1006}
1007
1008void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
1009 LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1010 LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
1011 LoadP(vector,
1012 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
1013}
1014
1015void MacroAssembler::EnterFrame(StackFrame::Type type,
1016 bool load_constant_pool_pointer_reg) {
1017 // We create a stack frame with:
1018 // Return Addr <-- old sp
1019 // Old FP <-- new fp
1020 // CP
1021 // type
1022 // CodeObject <-- new sp
1023
1024 LoadSmiLiteral(ip, Smi::FromInt(type));
1025 PushCommonFrame(ip);
1026
1027 if (type == StackFrame::INTERNAL) {
1028 mov(r0, Operand(CodeObject()));
1029 push(r0);
1030 }
1031}
1032
1033int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1034 // Drop the execution stack down to the frame pointer and restore
1035 // the caller frame pointer, return address and constant pool pointer.
1036 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1037 lay(r1, MemOperand(
1038 fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment));
1039 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1040 LoadRR(sp, r1);
1041 int frame_ends = pc_offset();
1042 return frame_ends;
1043}
1044
1045// ExitFrame layout (probably wrongish.. needs updating)
1046//
1047// SP -> previousSP
1048// LK reserved
1049// code
1050// sp_on_exit (for debug?)
1051// oldSP->prev SP
1052// LK
1053// <parameters on stack>
1054
1055// Prior to calling EnterExitFrame, we've got a bunch of parameters
1056// on the stack that we need to wrap a real frame around.. so first
1057// we reserve a slot for LK and push the previous SP which is captured
1058// in the fp register (r11)
1059// Then - we buy a new frame
1060
1061// r14
1062// oldFP <- newFP
1063// SP
1064// Code
1065// Floats
1066// gaps
1067// Args
1068// ABIRes <- newSP
1069void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
1070 // Set up the frame structure on the stack.
1071 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1072 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1073 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1074 DCHECK(stack_space > 0);
1075
1076 // This is an opportunity to build a frame to wrap
1077 // all of the pushes that have happened inside of V8
1078 // since we were called from C code
1079 CleanseP(r14);
1080 LoadSmiLiteral(r1, Smi::FromInt(StackFrame::EXIT));
1081 PushCommonFrame(r1);
1082 // Reserve room for saved entry sp and code object.
1083 lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
1084
1085 if (emit_debug_code()) {
1086 StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
1087 }
1088 mov(r1, Operand(CodeObject()));
1089 StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1090
1091 // Save the frame pointer and the context in top.
1092 mov(r1, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1093 StoreP(fp, MemOperand(r1));
1094 mov(r1, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1095 StoreP(cp, MemOperand(r1));
1096
1097 // Optionally save all volatile double registers.
1098 if (save_doubles) {
1099 MultiPushDoubles(kCallerSavedDoubles);
1100 // Note that d0 will be accessible at
1101 // fp - ExitFrameConstants::kFrameSize -
1102 // kNumCallerSavedDoubles * kDoubleSize,
1103 // since the sp slot and code slot were pushed after the fp.
1104 }
1105
1106 lay(sp, MemOperand(sp, -stack_space * kPointerSize));
1107
1108 // Allocate and align the frame preparing for calling the runtime
1109 // function.
1110 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1111 if (frame_alignment > 0) {
1112 DCHECK(frame_alignment == 8);
1113 ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
1114 }
1115
1116 StoreP(MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize),
1117 Operand::Zero(), r0);
1118 lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1119 // Set the exit frame sp value to point just before the return address
1120 // location.
1121 lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
1122 StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
1123}
1124
1125void MacroAssembler::InitializeNewString(Register string, Register length,
1126 Heap::RootListIndex map_index,
1127 Register scratch1, Register scratch2) {
1128 SmiTag(scratch1, length);
1129 LoadRoot(scratch2, map_index);
1130 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset));
1131 StoreP(FieldMemOperand(string, String::kHashFieldSlot),
1132 Operand(String::kEmptyHashField), scratch1);
1133 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1134}
1135
1136int MacroAssembler::ActivationFrameAlignment() {
1137#if !defined(USE_SIMULATOR)
1138 // Running on the real platform. Use the alignment as mandated by the local
1139 // environment.
1140 // Note: This will break if we ever start generating snapshots on one S390
1141 // platform for another S390 platform with a different alignment.
1142 return base::OS::ActivationFrameAlignment();
1143#else // Simulated
1144 // If we are using the simulator then we should always align to the expected
1145 // alignment. As the simulator is used to generate snapshots we do not know
1146 // if the target platform will need alignment, so this is controlled from a
1147 // flag.
1148 return FLAG_sim_stack_alignment;
1149#endif
1150}
1151
1152void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1153 bool restore_context,
1154 bool argument_count_is_length) {
1155 // Optionally restore all double registers.
1156 if (save_doubles) {
1157 // Calculate the stack location of the saved doubles and restore them.
1158 const int kNumRegs = kNumCallerSavedDoubles;
1159 lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
1160 kNumRegs * kDoubleSize)));
1161 MultiPopDoubles(kCallerSavedDoubles, r5);
1162 }
1163
1164 // Clear top frame.
1165 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1166 StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
1167
1168 // Restore current context from top and clear it in debug mode.
1169 if (restore_context) {
1170 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1171 LoadP(cp, MemOperand(ip));
1172 }
1173#ifdef DEBUG
1174 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1175 StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
1176#endif
1177
1178 // Tear down the exit frame, pop the arguments, and return.
1179 LeaveFrame(StackFrame::EXIT);
1180
1181 if (argument_count.is_valid()) {
1182 if (!argument_count_is_length) {
1183 ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
1184 }
1185 la(sp, MemOperand(sp, argument_count));
1186 }
1187}
1188
1189void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
1190 Move(dst, d0);
1191}
1192
1193void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1194 Move(dst, d0);
1195}
1196
1197void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1198 Register caller_args_count_reg,
1199 Register scratch0, Register scratch1) {
1200#if DEBUG
1201 if (callee_args_count.is_reg()) {
1202 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1203 scratch1));
1204 } else {
1205 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1206 }
1207#endif
1208
1209 // Calculate the end of destination area where we will put the arguments
1210 // after we drop current frame. We AddP kPointerSize to count the receiver
1211 // argument which is not included into formal parameters count.
1212 Register dst_reg = scratch0;
1213 ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1214 AddP(dst_reg, fp, dst_reg);
1215 AddP(dst_reg, dst_reg,
1216 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1217
1218 Register src_reg = caller_args_count_reg;
1219 // Calculate the end of source area. +kPointerSize is for the receiver.
1220 if (callee_args_count.is_reg()) {
1221 ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1222 AddP(src_reg, sp, src_reg);
1223 AddP(src_reg, src_reg, Operand(kPointerSize));
1224 } else {
1225 mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
1226 AddP(src_reg, src_reg, sp);
1227 }
1228
1229 if (FLAG_debug_code) {
1230 CmpLogicalP(src_reg, dst_reg);
1231 Check(lt, kStackAccessBelowStackPointer);
1232 }
1233
1234 // Restore caller's frame pointer and return address now as they will be
1235 // overwritten by the copying loop.
1236 RestoreFrameStateForTailCall();
1237
1238 // Now copy callee arguments to the caller frame going backwards to avoid
1239 // callee arguments corruption (source and destination areas could overlap).
1240
1241 // Both src_reg and dst_reg are pointing to the word after the one to copy,
1242 // so they must be pre-decremented in the loop.
1243 Register tmp_reg = scratch1;
1244 Label loop;
1245 if (callee_args_count.is_reg()) {
1246 AddP(tmp_reg, callee_args_count.reg(), Operand(1)); // +1 for receiver
1247 } else {
1248 mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1249 }
1250 LoadRR(r1, tmp_reg);
1251 bind(&loop);
1252 LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
1253 StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1254 lay(src_reg, MemOperand(src_reg, -kPointerSize));
1255 lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
1256 BranchOnCount(r1, &loop);
1257
1258 // Leave current frame.
1259 LoadRR(sp, dst_reg);
1260}
1261
1262void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1263 const ParameterCount& actual, Label* done,
1264 bool* definitely_mismatches,
1265 InvokeFlag flag,
1266 const CallWrapper& call_wrapper) {
1267 bool definitely_matches = false;
1268 *definitely_mismatches = false;
1269 Label regular_invoke;
1270
1271 // Check whether the expected and actual arguments count match. If not,
1272 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1273 // r2: actual arguments count
1274 // r3: function (passed through to callee)
1275 // r4: expected arguments count
1276
1277 // The code below is made a lot easier because the calling code already sets
1278 // up actual and expected registers according to the contract if values are
1279 // passed in registers.
1280
1281 // ARM has some sanity checks as per below, considering add them for S390
1282 // DCHECK(actual.is_immediate() || actual.reg().is(r2));
1283 // DCHECK(expected.is_immediate() || expected.reg().is(r4));
1284
1285 if (expected.is_immediate()) {
1286 DCHECK(actual.is_immediate());
1287 mov(r2, Operand(actual.immediate()));
1288 if (expected.immediate() == actual.immediate()) {
1289 definitely_matches = true;
1290 } else {
1291 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1292 if (expected.immediate() == sentinel) {
1293 // Don't worry about adapting arguments for builtins that
1294 // don't want that done. Skip adaption code by making it look
1295 // like we have a match between expected and actual number of
1296 // arguments.
1297 definitely_matches = true;
1298 } else {
1299 *definitely_mismatches = true;
1300 mov(r4, Operand(expected.immediate()));
1301 }
1302 }
1303 } else {
1304 if (actual.is_immediate()) {
1305 mov(r2, Operand(actual.immediate()));
1306 CmpPH(expected.reg(), Operand(actual.immediate()));
1307 beq(&regular_invoke);
1308 } else {
1309 CmpP(expected.reg(), actual.reg());
1310 beq(&regular_invoke);
1311 }
1312 }
1313
1314 if (!definitely_matches) {
1315 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1316 if (flag == CALL_FUNCTION) {
1317 call_wrapper.BeforeCall(CallSize(adaptor));
1318 Call(adaptor);
1319 call_wrapper.AfterCall();
1320 if (!*definitely_mismatches) {
1321 b(done);
1322 }
1323 } else {
1324 Jump(adaptor, RelocInfo::CODE_TARGET);
1325 }
1326 bind(&regular_invoke);
1327 }
1328}
1329
1330void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
1331 const ParameterCount& expected,
1332 const ParameterCount& actual) {
1333 Label skip_flooding;
1334 ExternalReference step_in_enabled =
1335 ExternalReference::debug_step_in_enabled_address(isolate());
1336 mov(r6, Operand(step_in_enabled));
1337 LoadlB(r6, MemOperand(r6));
1338 CmpP(r6, Operand::Zero());
1339 beq(&skip_flooding);
1340 {
1341 FrameScope frame(this,
1342 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1343 if (expected.is_reg()) {
1344 SmiTag(expected.reg());
1345 Push(expected.reg());
1346 }
1347 if (actual.is_reg()) {
1348 SmiTag(actual.reg());
1349 Push(actual.reg());
1350 }
1351 if (new_target.is_valid()) {
1352 Push(new_target);
1353 }
1354 Push(fun, fun);
1355 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
1356 Pop(fun);
1357 if (new_target.is_valid()) {
1358 Pop(new_target);
1359 }
1360 if (actual.is_reg()) {
1361 Pop(actual.reg());
1362 SmiUntag(actual.reg());
1363 }
1364 if (expected.is_reg()) {
1365 Pop(expected.reg());
1366 SmiUntag(expected.reg());
1367 }
1368 }
1369 bind(&skip_flooding);
1370}
1371
1372void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1373 const ParameterCount& expected,
1374 const ParameterCount& actual,
1375 InvokeFlag flag,
1376 const CallWrapper& call_wrapper) {
1377 // You can't call a function without a valid frame.
1378 DCHECK(flag == JUMP_FUNCTION || has_frame());
1379
1380 DCHECK(function.is(r3));
1381 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
1382
1383 if (call_wrapper.NeedsDebugStepCheck()) {
1384 FloodFunctionIfStepping(function, new_target, expected, actual);
1385 }
1386
1387 // Clear the new.target register if not given.
1388 if (!new_target.is_valid()) {
1389 LoadRoot(r5, Heap::kUndefinedValueRootIndex);
1390 }
1391
1392 Label done;
1393 bool definitely_mismatches = false;
1394 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1395 call_wrapper);
1396 if (!definitely_mismatches) {
1397 // We call indirectly through the code field in the function to
1398 // allow recompilation to take effect without changing any of the
1399 // call sites.
1400 Register code = ip;
1401 LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1402 if (flag == CALL_FUNCTION) {
1403 call_wrapper.BeforeCall(CallSize(code));
1404 CallJSEntry(code);
1405 call_wrapper.AfterCall();
1406 } else {
1407 DCHECK(flag == JUMP_FUNCTION);
1408 JumpToJSEntry(code);
1409 }
1410
1411 // Continue here if InvokePrologue does handle the invocation due to
1412 // mismatched parameter counts.
1413 bind(&done);
1414 }
1415}
1416
1417void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1418 const ParameterCount& actual,
1419 InvokeFlag flag,
1420 const CallWrapper& call_wrapper) {
1421 // You can't call a function without a valid frame.
1422 DCHECK(flag == JUMP_FUNCTION || has_frame());
1423
1424 // Contract with called JS functions requires that function is passed in r3.
1425 DCHECK(fun.is(r3));
1426
1427 Register expected_reg = r4;
1428 Register temp_reg = r6;
1429 LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
1430 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1431 LoadW(expected_reg,
1432 FieldMemOperand(temp_reg,
1433 SharedFunctionInfo::kFormalParameterCountOffset));
1434#if !defined(V8_TARGET_ARCH_S390X)
1435 SmiUntag(expected_reg);
1436#endif
1437
1438 ParameterCount expected(expected_reg);
1439 InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1440}
1441
1442void MacroAssembler::InvokeFunction(Register function,
1443 const ParameterCount& expected,
1444 const ParameterCount& actual,
1445 InvokeFlag flag,
1446 const CallWrapper& call_wrapper) {
1447 // You can't call a function without a valid frame.
1448 DCHECK(flag == JUMP_FUNCTION || has_frame());
1449
1450 // Contract with called JS functions requires that function is passed in r3.
1451 DCHECK(function.is(r3));
1452
1453 // Get the function and setup the context.
1454 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1455
1456 InvokeFunctionCode(r3, no_reg, expected, actual, flag, call_wrapper);
1457}
1458
1459void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1460 const ParameterCount& expected,
1461 const ParameterCount& actual,
1462 InvokeFlag flag,
1463 const CallWrapper& call_wrapper) {
1464 Move(r3, function);
1465 InvokeFunction(r3, expected, actual, flag, call_wrapper);
1466}
1467
1468void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1469 Label* fail) {
1470 DCHECK(kNotStringTag != 0);
1471
1472 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1473 LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1474 mov(r0, Operand(kIsNotStringMask));
1475 AndP(r0, scratch);
1476 bne(fail);
1477}
1478
1479void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1480 Label* fail) {
1481 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1482 LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1483 CmpP(scratch, Operand(LAST_NAME_TYPE));
1484 bgt(fail);
1485}
1486
1487void MacroAssembler::DebugBreak() {
1488 LoadImmP(r2, Operand::Zero());
1489 mov(r3,
1490 Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
1491 CEntryStub ces(isolate(), 1);
1492 DCHECK(AllowThisStubCall(&ces));
1493 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
1494}
1495
1496void MacroAssembler::PushStackHandler() {
1497 // Adjust this code if not the case.
1498 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1499 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1500
1501 // Link the current handler as the next handler.
1502 mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1503
1504 // Buy the full stack frame for 5 slots.
1505 lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
1506
1507 // Copy the old handler into the next handler slot.
1508 mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
1509 kPointerSize);
1510 // Set this new handler as the current one.
1511 StoreP(sp, MemOperand(r7));
1512}
1513
1514void MacroAssembler::PopStackHandler() {
1515 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1516 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1517
1518 // Pop the Next Handler into r3 and store it into Handler Address reference.
1519 Pop(r3);
1520 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1521
1522 StoreP(r3, MemOperand(ip));
1523}
1524
1525void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1526 Register scratch, Label* miss) {
1527 Label same_contexts;
1528
1529 DCHECK(!holder_reg.is(scratch));
1530 DCHECK(!holder_reg.is(ip));
1531 DCHECK(!scratch.is(ip));
1532
1533 // Load current lexical context from the active StandardFrame, which
1534 // may require crawling past STUB frames.
1535 Label load_context;
1536 Label has_context;
1537 DCHECK(!ip.is(scratch));
1538 LoadRR(ip, fp);
1539 bind(&load_context);
1540 LoadP(scratch,
1541 MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
1542 JumpIfNotSmi(scratch, &has_context);
1543 LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
1544 b(&load_context);
1545 bind(&has_context);
1546
1547// In debug mode, make sure the lexical context is set.
1548#ifdef DEBUG
1549 CmpP(scratch, Operand::Zero());
1550 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1551#endif
1552
1553 // Load the native context of the current context.
1554 LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
1555
1556 // Check the context is a native context.
1557 if (emit_debug_code()) {
1558 // Cannot use ip as a temporary in this verification code. Due to the fact
1559 // that ip is clobbered as part of cmp with an object Operand.
1560 push(holder_reg); // Temporarily save holder on the stack.
1561 // Read the first word and compare to the native_context_map.
1562 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1563 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
1564 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1565 pop(holder_reg); // Restore holder.
1566 }
1567
1568 // Check if both contexts are the same.
1569 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1570 CmpP(scratch, ip);
1571 beq(&same_contexts, Label::kNear);
1572
1573 // Check the context is a native context.
1574 if (emit_debug_code()) {
1575 // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1576 // Cannot use ip as a temporary in this verification code. Due to the fact
1577 // that ip is clobbered as part of cmp with an object Operand.
1578 push(holder_reg); // Temporarily save holder on the stack.
1579 LoadRR(holder_reg, ip); // Move ip to its holding place.
1580 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
1581 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1582
1583 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1584 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
1585 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1586 // Restore ip is not needed. ip is reloaded below.
1587 pop(holder_reg); // Restore holder.
1588 // Restore ip to holder's context.
1589 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1590 }
1591
1592 // Check that the security token in the calling global object is
1593 // compatible with the security token in the receiving global
1594 // object.
1595 int token_offset =
1596 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
1597
1598 LoadP(scratch, FieldMemOperand(scratch, token_offset));
1599 LoadP(ip, FieldMemOperand(ip, token_offset));
1600 CmpP(scratch, ip);
1601 bne(miss);
1602
1603 bind(&same_contexts);
1604}
1605
1606// Compute the hash code from the untagged key. This must be kept in sync with
1607// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1608// code-stub-hydrogen.cc
1609void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1610 // First of all we assign the hash seed to scratch.
1611 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1612 SmiUntag(scratch);
1613
1614 // Xor original key with a seed.
1615 XorP(t0, scratch);
1616
1617 // Compute the hash code from the untagged key. This must be kept in sync
1618 // with ComputeIntegerHash in utils.h.
1619 //
1620 // hash = ~hash + (hash << 15);
1621 LoadRR(scratch, t0);
1622 NotP(scratch);
1623 sll(t0, Operand(15));
1624 AddP(t0, scratch, t0);
1625 // hash = hash ^ (hash >> 12);
1626 ShiftRight(scratch, t0, Operand(12));
1627 XorP(t0, scratch);
1628 // hash = hash + (hash << 2);
1629 ShiftLeft(scratch, t0, Operand(2));
1630 AddP(t0, t0, scratch);
1631 // hash = hash ^ (hash >> 4);
1632 ShiftRight(scratch, t0, Operand(4));
1633 XorP(t0, scratch);
1634 // hash = hash * 2057;
1635 LoadRR(r0, t0);
1636 ShiftLeft(scratch, t0, Operand(3));
1637 AddP(t0, t0, scratch);
1638 ShiftLeft(scratch, r0, Operand(11));
1639 AddP(t0, t0, scratch);
1640 // hash = hash ^ (hash >> 16);
1641 ShiftRight(scratch, t0, Operand(16));
1642 XorP(t0, scratch);
1643 // hash & 0x3fffffff
1644 ExtractBitRange(t0, t0, 29, 0);
1645}
1646
1647void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
1648 Register key, Register result,
1649 Register t0, Register t1,
1650 Register t2) {
1651 // Register use:
1652 //
1653 // elements - holds the slow-case elements of the receiver on entry.
1654 // Unchanged unless 'result' is the same register.
1655 //
1656 // key - holds the smi key on entry.
1657 // Unchanged unless 'result' is the same register.
1658 //
1659 // result - holds the result on exit if the load succeeded.
1660 // Allowed to be the same as 'key' or 'result'.
1661 // Unchanged on bailout so 'key' or 'result' can be used
1662 // in further computation.
1663 //
1664 // Scratch registers:
1665 //
1666 // t0 - holds the untagged key on entry and holds the hash once computed.
1667 //
1668 // t1 - used to hold the capacity mask of the dictionary
1669 //
1670 // t2 - used for the index into the dictionary.
1671 Label done;
1672
1673 GetNumberHash(t0, t1);
1674
1675 // Compute the capacity mask.
1676 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1677 SmiUntag(t1);
1678 SubP(t1, Operand(1));
1679
1680 // Generate an unrolled loop that performs a few probes before giving up.
1681 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1682 // Use t2 for index calculations and keep the hash intact in t0.
1683 LoadRR(t2, t0);
1684 // Compute the masked index: (hash + i + i * i) & mask.
1685 if (i > 0) {
1686 AddP(t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1687 }
1688 AndP(t2, t1);
1689
1690 // Scale the index by multiplying by the element size.
1691 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1692 LoadRR(ip, t2);
1693 sll(ip, Operand(1));
1694 AddP(t2, ip); // t2 = t2 * 3
1695
1696 // Check if the key is identical to the name.
1697 sll(t2, Operand(kPointerSizeLog2));
1698 AddP(t2, elements);
1699 LoadP(ip,
1700 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1701 CmpP(key, ip);
1702 if (i != kNumberDictionaryProbes - 1) {
1703 beq(&done, Label::kNear);
1704 } else {
1705 bne(miss);
1706 }
1707 }
1708
1709 bind(&done);
1710 // Check that the value is a field property.
1711 // t2: elements + (index * kPointerSize)
1712 const int kDetailsOffset =
1713 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1714 LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
1715 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
1716 DCHECK_EQ(DATA, 0);
1717 AndP(r0, ip, t1);
1718 bne(miss);
1719
1720 // Get the value at the masked, scaled index and return.
1721 const int kValueOffset =
1722 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1723 LoadP(result, FieldMemOperand(t2, kValueOffset));
1724}
1725
1726void MacroAssembler::Allocate(int object_size, Register result,
1727 Register scratch1, Register scratch2,
1728 Label* gc_required, AllocationFlags flags) {
1729 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1730 if (!FLAG_inline_new) {
1731 if (emit_debug_code()) {
1732 // Trash the registers to simulate an allocation failure.
1733 LoadImmP(result, Operand(0x7091));
1734 LoadImmP(scratch1, Operand(0x7191));
1735 LoadImmP(scratch2, Operand(0x7291));
1736 }
1737 b(gc_required);
1738 return;
1739 }
1740
1741 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1742
1743 // Make object size into bytes.
1744 if ((flags & SIZE_IN_WORDS) != 0) {
1745 object_size *= kPointerSize;
1746 }
1747 DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1748
1749 // Check relative positions of allocation top and limit addresses.
1750 ExternalReference allocation_top =
1751 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1752 ExternalReference allocation_limit =
1753 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1754
1755 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1756 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1757 DCHECK((limit - top) == kPointerSize);
1758
1759 // Set up allocation top address register.
1760 Register top_address = scratch1;
1761 // This code stores a temporary value in ip. This is OK, as the code below
1762 // does not need ip for implicit literal generation.
1763 Register alloc_limit = ip;
1764 Register result_end = scratch2;
1765 mov(top_address, Operand(allocation_top));
1766
1767 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1768 // Load allocation top into result and allocation limit into ip.
1769 LoadP(result, MemOperand(top_address));
1770 LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1771 } else {
1772 if (emit_debug_code()) {
1773 // Assert that result actually contains top on entry.
1774 LoadP(alloc_limit, MemOperand(top_address));
1775 CmpP(result, alloc_limit);
1776 Check(eq, kUnexpectedAllocationTop);
1777 }
1778 // Load allocation limit. Result already contains allocation top.
1779 LoadP(alloc_limit, MemOperand(top_address, limit - top));
1780 }
1781
1782 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1783// Align the next allocation. Storing the filler map without checking top is
1784// safe in new-space because the limit of the heap is aligned there.
1785#if V8_TARGET_ARCH_S390X
1786 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1787#else
1788 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1789 AndP(result_end, result, Operand(kDoubleAlignmentMask));
1790 Label aligned;
1791 beq(&aligned);
1792 if ((flags & PRETENURE) != 0) {
1793 CmpLogicalP(result, alloc_limit);
1794 bge(gc_required);
1795 }
1796 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1797 StoreW(result_end, MemOperand(result));
1798 AddP(result, result, Operand(kDoubleSize / 2));
1799 bind(&aligned);
1800#endif
1801 }
1802
1803 // Calculate new top and bail out if new space is exhausted. Use result
1804 // to calculate the new top.
1805 SubP(r0, alloc_limit, result);
1806 if (is_int16(object_size)) {
1807 CmpP(r0, Operand(object_size));
1808 blt(gc_required);
1809 AddP(result_end, result, Operand(object_size));
1810 } else {
1811 mov(result_end, Operand(object_size));
1812 CmpP(r0, result_end);
1813 blt(gc_required);
1814 AddP(result_end, result, result_end);
1815 }
1816 StoreP(result_end, MemOperand(top_address));
1817
1818 // Tag object if requested.
1819 if ((flags & TAG_OBJECT) != 0) {
1820 AddP(result, result, Operand(kHeapObjectTag));
1821 }
1822}
1823
1824void MacroAssembler::Allocate(Register object_size, Register result,
1825 Register result_end, Register scratch,
1826 Label* gc_required, AllocationFlags flags) {
1827 if (!FLAG_inline_new) {
1828 if (emit_debug_code()) {
1829 // Trash the registers to simulate an allocation failure.
1830 LoadImmP(result, Operand(0x7091));
1831 LoadImmP(scratch, Operand(0x7191));
1832 LoadImmP(result_end, Operand(0x7291));
1833 }
1834 b(gc_required);
1835 return;
1836 }
1837
1838 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1839 // is not specified. Other registers must not overlap.
1840 DCHECK(!AreAliased(object_size, result, scratch, ip));
1841 DCHECK(!AreAliased(result_end, result, scratch, ip));
1842 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1843
1844 // Check relative positions of allocation top and limit addresses.
1845 ExternalReference allocation_top =
1846 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1847 ExternalReference allocation_limit =
1848 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1849 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1850 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1851 DCHECK((limit - top) == kPointerSize);
1852
1853 // Set up allocation top address and allocation limit registers.
1854 Register top_address = scratch;
1855 // This code stores a temporary value in ip. This is OK, as the code below
1856 // does not need ip for implicit literal generation.
1857 Register alloc_limit = ip;
1858 mov(top_address, Operand(allocation_top));
1859
1860 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1861 // Load allocation top into result and allocation limit into alloc_limit..
1862 LoadP(result, MemOperand(top_address));
1863 LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1864 } else {
1865 if (emit_debug_code()) {
1866 // Assert that result actually contains top on entry.
1867 LoadP(alloc_limit, MemOperand(top_address));
1868 CmpP(result, alloc_limit);
1869 Check(eq, kUnexpectedAllocationTop);
1870 }
1871 // Load allocation limit. Result already contains allocation top.
1872 LoadP(alloc_limit, MemOperand(top_address, limit - top));
1873 }
1874
1875 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1876// Align the next allocation. Storing the filler map without checking top is
1877// safe in new-space because the limit of the heap is aligned there.
1878#if V8_TARGET_ARCH_S390X
1879 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1880#else
1881 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1882 AndP(result_end, result, Operand(kDoubleAlignmentMask));
1883 Label aligned;
1884 beq(&aligned);
1885 if ((flags & PRETENURE) != 0) {
1886 CmpLogicalP(result, alloc_limit);
1887 bge(gc_required);
1888 }
1889 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1890 StoreW(result_end, MemOperand(result));
1891 AddP(result, result, Operand(kDoubleSize / 2));
1892 bind(&aligned);
1893#endif
1894 }
1895
1896 // Calculate new top and bail out if new space is exhausted. Use result
1897 // to calculate the new top. Object size may be in words so a shift is
1898 // required to get the number of bytes.
1899 SubP(r0, alloc_limit, result);
1900 if ((flags & SIZE_IN_WORDS) != 0) {
1901 ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
1902 CmpP(r0, result_end);
1903 blt(gc_required);
1904 AddP(result_end, result, result_end);
1905 } else {
1906 CmpP(r0, object_size);
1907 blt(gc_required);
1908 AddP(result_end, result, object_size);
1909 }
1910
1911 // Update allocation top. result temporarily holds the new top.
1912 if (emit_debug_code()) {
1913 AndP(r0, result_end, Operand(kObjectAlignmentMask));
1914 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1915 }
1916 StoreP(result_end, MemOperand(top_address));
1917
1918 // Tag object if requested.
1919 if ((flags & TAG_OBJECT) != 0) {
1920 AddP(result, result, Operand(kHeapObjectTag));
1921 }
1922}
1923
1924void MacroAssembler::AllocateTwoByteString(Register result, Register length,
1925 Register scratch1, Register scratch2,
1926 Register scratch3,
1927 Label* gc_required) {
1928 // Calculate the number of bytes needed for the characters in the string while
1929 // observing object alignment.
1930 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1931
1932 ShiftLeftP(scratch1, length, Operand(1)); // Length in bytes, not chars.
1933 AddP(scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1934
1935 AndP(scratch1, Operand(~kObjectAlignmentMask));
1936
1937 // Allocate two-byte string in new space.
1938 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1939
1940 // Set the map, length and hash field.
1941 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
1942 scratch2);
1943}
1944
1945void MacroAssembler::AllocateOneByteString(Register result, Register length,
1946 Register scratch1, Register scratch2,
1947 Register scratch3,
1948 Label* gc_required) {
1949 // Calculate the number of bytes needed for the characters in the string while
1950 // observing object alignment.
1951 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1952 DCHECK(kCharSize == 1);
1953 AddP(scratch1, length,
1954 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1955 AndP(scratch1, Operand(~kObjectAlignmentMask));
1956
1957 // Allocate one-byte string in new space.
1958 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1959
1960 // Set the map, length and hash field.
1961 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
1962 scratch1, scratch2);
1963}
1964
1965void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
1966 Register scratch1,
1967 Register scratch2,
1968 Label* gc_required) {
1969 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1970 TAG_OBJECT);
1971
1972 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
1973 scratch2);
1974}
1975
1976void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
1977 Register scratch1,
1978 Register scratch2,
1979 Label* gc_required) {
1980 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1981 TAG_OBJECT);
1982
1983 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
1984 scratch1, scratch2);
1985}
1986
1987void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1988 Register length,
1989 Register scratch1,
1990 Register scratch2,
1991 Label* gc_required) {
1992 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1993 TAG_OBJECT);
1994
1995 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
1996 scratch2);
1997}
1998
1999void MacroAssembler::AllocateOneByteSlicedString(Register result,
2000 Register length,
2001 Register scratch1,
2002 Register scratch2,
2003 Label* gc_required) {
2004 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2005 TAG_OBJECT);
2006
2007 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
2008 scratch1, scratch2);
2009}
2010
2011void MacroAssembler::CompareObjectType(Register object, Register map,
2012 Register type_reg, InstanceType type) {
2013 const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
2014
2015 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
2016 CompareInstanceType(map, temp, type);
2017}
2018
2019void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
2020 InstanceType type) {
2021 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2022 STATIC_ASSERT(LAST_TYPE < 256);
2023 LoadlB(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2024 CmpP(type_reg, Operand(type));
2025}
2026
2027void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
2028 CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
2029}
2030
2031void MacroAssembler::CheckFastElements(Register map, Register scratch,
2032 Label* fail) {
2033 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2034 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2035 STATIC_ASSERT(FAST_ELEMENTS == 2);
2036 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2037 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
2038 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
2039 Operand(Map::kMaximumBitField2FastHoleyElementValue));
2040 bgt(fail);
2041}
2042
2043void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
2044 Label* fail) {
2045 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2046 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2047 STATIC_ASSERT(FAST_ELEMENTS == 2);
2048 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2049 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
2050 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2051 ble(fail);
2052 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
2053 Operand(Map::kMaximumBitField2FastHoleyElementValue));
2054 bgt(fail);
2055}
2056
2057void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
2058 Label* fail) {
2059 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2060 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2061 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
2062 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2063 bgt(fail);
2064}
2065
2066void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
2067 SmiUntag(ip, smi);
2068 ConvertIntToDouble(ip, value);
2069}
2070void MacroAssembler::StoreNumberToDoubleElements(
2071 Register value_reg, Register key_reg, Register elements_reg,
2072 Register scratch1, DoubleRegister double_scratch, Label* fail,
2073 int elements_offset) {
2074 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
2075 Label smi_value, store;
2076
2077 // Handle smi values specially.
2078 JumpIfSmi(value_reg, &smi_value);
2079
2080 // Ensure that the object is a heap number
2081 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
2082 DONT_DO_SMI_CHECK);
2083
2084 LoadDouble(double_scratch,
2085 FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2086 // Force a canonical NaN.
2087 CanonicalizeNaN(double_scratch);
2088 b(&store);
2089
2090 bind(&smi_value);
2091 SmiToDouble(double_scratch, value_reg);
2092
2093 bind(&store);
2094 SmiToDoubleArrayOffset(scratch1, key_reg);
2095 StoreDouble(double_scratch,
2096 FieldMemOperand(elements_reg, scratch1,
2097 FixedDoubleArray::kHeaderSize - elements_offset));
2098}
2099
2100void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
2101 Register right,
2102 Register overflow_dst,
2103 Register scratch) {
2104 DCHECK(!dst.is(overflow_dst));
2105 DCHECK(!dst.is(scratch));
2106 DCHECK(!overflow_dst.is(scratch));
2107 DCHECK(!overflow_dst.is(left));
2108 DCHECK(!overflow_dst.is(right));
2109
2110 // TODO(joransiu): Optimize paths for left == right.
2111 bool left_is_right = left.is(right);
2112
2113 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
2114 if (dst.is(left)) {
2115 LoadRR(scratch, left); // Preserve left.
2116 AddP(dst, left, right); // Left is overwritten.
2117 XorP(overflow_dst, scratch, dst); // Original left.
2118 if (!left_is_right) XorP(scratch, dst, right);
2119 } else if (dst.is(right)) {
2120 LoadRR(scratch, right); // Preserve right.
2121 AddP(dst, left, right); // Right is overwritten.
2122 XorP(overflow_dst, dst, left);
2123 if (!left_is_right) XorP(scratch, dst, scratch);
2124 } else {
2125 AddP(dst, left, right);
2126 XorP(overflow_dst, dst, left);
2127 if (!left_is_right) XorP(scratch, dst, right);
2128 }
2129 if (!left_is_right) AndP(overflow_dst, scratch, overflow_dst);
2130 LoadAndTestRR(overflow_dst, overflow_dst);
2131}
2132
2133void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
2134 intptr_t right,
2135 Register overflow_dst,
2136 Register scratch) {
2137 DCHECK(!dst.is(overflow_dst));
2138 DCHECK(!dst.is(scratch));
2139 DCHECK(!overflow_dst.is(scratch));
2140 DCHECK(!overflow_dst.is(left));
2141
2142 mov(r1, Operand(right));
2143 AddAndCheckForOverflow(dst, left, r1, overflow_dst, scratch);
2144}
2145
2146void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
2147 Register right,
2148 Register overflow_dst,
2149 Register scratch) {
2150 DCHECK(!dst.is(overflow_dst));
2151 DCHECK(!dst.is(scratch));
2152 DCHECK(!overflow_dst.is(scratch));
2153 DCHECK(!overflow_dst.is(left));
2154 DCHECK(!overflow_dst.is(right));
2155
2156 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
2157 if (dst.is(left)) {
2158 LoadRR(scratch, left); // Preserve left.
2159 SubP(dst, left, right); // Left is overwritten.
2160 XorP(overflow_dst, dst, scratch);
2161 XorP(scratch, right);
2162 AndP(overflow_dst, scratch /*, SetRC*/);
2163 LoadAndTestRR(overflow_dst, overflow_dst);
2164 // Should be okay to remove rc
2165 } else if (dst.is(right)) {
2166 LoadRR(scratch, right); // Preserve right.
2167 SubP(dst, left, right); // Right is overwritten.
2168 XorP(overflow_dst, dst, left);
2169 XorP(scratch, left);
2170 AndP(overflow_dst, scratch /*, SetRC*/);
2171 LoadAndTestRR(overflow_dst, overflow_dst);
2172 // Should be okay to remove rc
2173 } else {
2174 SubP(dst, left, right);
2175 XorP(overflow_dst, dst, left);
2176 XorP(scratch, left, right);
2177 AndP(overflow_dst, scratch /*, SetRC*/);
2178 LoadAndTestRR(overflow_dst, overflow_dst);
2179 // Should be okay to remove rc
2180 }
2181}
2182
2183void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
2184 Label* early_success) {
2185 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2186 CompareMap(obj, map, early_success);
2187}
2188
2189void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
2190 Label* early_success) {
2191 mov(r0, Operand(map));
2192 CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset));
2193}
2194
2195void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
2196 Label* fail, SmiCheckType smi_check_type) {
2197 if (smi_check_type == DO_SMI_CHECK) {
2198 JumpIfSmi(obj, fail);
2199 }
2200
2201 Label success;
2202 CompareMap(obj, scratch, map, &success);
2203 bne(fail);
2204 bind(&success);
2205}
2206
2207void MacroAssembler::CheckMap(Register obj, Register scratch,
2208 Heap::RootListIndex index, Label* fail,
2209 SmiCheckType smi_check_type) {
2210 if (smi_check_type == DO_SMI_CHECK) {
2211 JumpIfSmi(obj, fail);
2212 }
2213 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2214 CompareRoot(scratch, index);
2215 bne(fail);
2216}
2217
2218void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2219 Register scratch2, Handle<WeakCell> cell,
2220 Handle<Code> success,
2221 SmiCheckType smi_check_type) {
2222 Label fail;
2223 if (smi_check_type == DO_SMI_CHECK) {
2224 JumpIfSmi(obj, &fail);
2225 }
2226 LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2227 CmpWeakValue(scratch1, cell, scratch2);
2228 Jump(success, RelocInfo::CODE_TARGET, eq);
2229 bind(&fail);
2230}
2231
2232void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2233 Register scratch, CRegister) {
2234 mov(scratch, Operand(cell));
2235 CmpP(value, FieldMemOperand(scratch, WeakCell::kValueOffset));
2236}
2237
2238void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2239 mov(value, Operand(cell));
2240 LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
2241}
2242
2243void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2244 Label* miss) {
2245 GetWeakValue(value, cell);
2246 JumpIfSmi(value, miss);
2247}
2248
2249void MacroAssembler::GetMapConstructor(Register result, Register map,
2250 Register temp, Register temp2) {
2251 Label done, loop;
2252 LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2253 bind(&loop);
2254 JumpIfSmi(result, &done);
2255 CompareObjectType(result, temp, temp2, MAP_TYPE);
2256 bne(&done);
2257 LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2258 b(&loop);
2259 bind(&done);
2260}
2261
2262void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2263 Register scratch, Label* miss) {
2264 // Get the prototype or initial map from the function.
2265 LoadP(result,
2266 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2267
2268 // If the prototype or initial map is the hole, don't return it and
2269 // simply miss the cache instead. This will allow us to allocate a
2270 // prototype object on-demand in the runtime system.
2271 CompareRoot(result, Heap::kTheHoleValueRootIndex);
2272 beq(miss);
2273
2274 // If the function does not have an initial map, we're done.
2275 Label done;
2276 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2277 bne(&done, Label::kNear);
2278
2279 // Get the prototype from the initial map.
2280 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2281
2282 // All done.
2283 bind(&done);
2284}
2285
2286void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2287 Condition cond) {
2288 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2289 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2290}
2291
2292void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2293 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2294}
2295
2296bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2297 return has_frame_ || !stub->SometimesSetsUpAFrame();
2298}
2299
2300void MacroAssembler::IndexFromHash(Register hash, Register index) {
2301 // If the hash field contains an array index pick it out. The assert checks
2302 // that the constants for the maximum number of digits for an array index
2303 // cached in the hash field and the number of bits reserved for it does not
2304 // conflict.
2305 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2306 (1 << String::kArrayIndexValueBits));
2307 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2308}
2309
2310void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2311 Register scratch1, Register scratch2,
2312 DoubleRegister double_scratch) {
2313 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2314}
2315
2316void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
2317 Register scratch1,
2318 Register scratch2) {
2319 lgdr(scratch1, input);
2320#if V8_TARGET_ARCH_S390X
2321 llihf(scratch2, Operand(0x80000000)); // scratch2 = 0x80000000_00000000
2322 CmpP(scratch1, scratch2);
2323#else
2324 Label done;
2325 CmpP(scratch1, Operand::Zero());
2326 bne(&done, Label::kNear);
2327
2328 srlg(scratch1, scratch1, Operand(32));
2329 CmpP(scratch1, Operand(HeapNumber::kSignMask));
2330 bind(&done);
2331#endif
2332}
2333
2334void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
2335 stdy(input, MemOperand(sp, -kDoubleSize));
2336 LoadlW(scratch, MemOperand(sp, -kDoubleSize + Register::kExponentOffset));
2337 Cmp32(scratch, Operand::Zero());
2338}
2339
2340void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
2341 LoadlW(scratch, FieldMemOperand(input, HeapNumber::kValueOffset +
2342 Register::kExponentOffset));
2343 Cmp32(scratch, Operand::Zero());
2344}
2345
2346void MacroAssembler::TryDoubleToInt32Exact(Register result,
2347 DoubleRegister double_input,
2348 Register scratch,
2349 DoubleRegister double_scratch) {
2350 Label done;
2351 DCHECK(!double_input.is(double_scratch));
2352
2353 ConvertDoubleToInt64(double_input,
2354#if !V8_TARGET_ARCH_S390X
2355 scratch,
2356#endif
2357 result, double_scratch);
2358
2359#if V8_TARGET_ARCH_S390X
2360 TestIfInt32(result, r0);
2361#else
2362 TestIfInt32(scratch, result, r0);
2363#endif
2364 bne(&done);
2365
2366 // convert back and compare
2367 lgdr(scratch, double_scratch);
2368 cdfbr(double_scratch, scratch);
2369 cdbr(double_scratch, double_input);
2370 bind(&done);
2371}
2372
2373void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2374 Register input_high, Register scratch,
2375 DoubleRegister double_scratch, Label* done,
2376 Label* exact) {
2377 DCHECK(!result.is(input_high));
2378 DCHECK(!double_input.is(double_scratch));
2379 Label exception;
2380
2381 // Move high word into input_high
2382 StoreDouble(double_input, MemOperand(sp, -kDoubleSize));
2383 lay(sp, MemOperand(sp, -kDoubleSize));
2384 LoadlW(input_high, MemOperand(sp, Register::kExponentOffset));
2385 la(sp, MemOperand(sp, kDoubleSize));
2386
2387 // Test for NaN/Inf
2388 ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2389 CmpLogicalP(result, Operand(0x7ff));
2390 beq(&exception);
2391
2392 // Convert (rounding to -Inf)
2393 ConvertDoubleToInt64(double_input,
2394#if !V8_TARGET_ARCH_S390X
2395 scratch,
2396#endif
2397 result, double_scratch, kRoundToMinusInf);
2398
2399// Test for overflow
2400#if V8_TARGET_ARCH_S390X
2401 TestIfInt32(result, r0);
2402#else
2403 TestIfInt32(scratch, result, r0);
2404#endif
2405 bne(&exception);
2406
2407 // Test for exactness
2408 lgdr(scratch, double_scratch);
2409 cdfbr(double_scratch, scratch);
2410 cdbr(double_scratch, double_input);
2411 beq(exact);
2412 b(done);
2413
2414 bind(&exception);
2415}
2416
2417void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2418 DoubleRegister double_input,
2419 Label* done) {
2420 DoubleRegister double_scratch = kScratchDoubleReg;
2421#if !V8_TARGET_ARCH_S390X
2422 Register scratch = ip;
2423#endif
2424
2425 ConvertDoubleToInt64(double_input,
2426#if !V8_TARGET_ARCH_S390X
2427 scratch,
2428#endif
2429 result, double_scratch);
2430
2431// Test for overflow
2432#if V8_TARGET_ARCH_S390X
2433 TestIfInt32(result, r0);
2434#else
2435 TestIfInt32(scratch, result, r0);
2436#endif
2437 beq(done);
2438}
2439
2440void MacroAssembler::TruncateDoubleToI(Register result,
2441 DoubleRegister double_input) {
2442 Label done;
2443
2444 TryInlineTruncateDoubleToI(result, double_input, &done);
2445
2446 // If we fell through then inline version didn't succeed - call stub instead.
2447 push(r14);
2448 // Put input on stack.
2449 StoreDouble(double_input, MemOperand(sp, -kDoubleSize));
2450 lay(sp, MemOperand(sp, -kDoubleSize));
2451
2452 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2453 CallStub(&stub);
2454
2455 la(sp, MemOperand(sp, kDoubleSize));
2456 pop(r14);
2457
2458 bind(&done);
2459}
2460
2461void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2462 Label done;
2463 DoubleRegister double_scratch = kScratchDoubleReg;
2464 DCHECK(!result.is(object));
2465
2466 LoadDouble(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2467 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2468
2469 // If we fell through then inline version didn't succeed - call stub instead.
2470 push(r14);
2471 DoubleToIStub stub(isolate(), object, result,
2472 HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2473 CallStub(&stub);
2474 pop(r14);
2475
2476 bind(&done);
2477}
2478
2479void MacroAssembler::TruncateNumberToI(Register object, Register result,
2480 Register heap_number_map,
2481 Register scratch1, Label* not_number) {
2482 Label done;
2483 DCHECK(!result.is(object));
2484
2485 UntagAndJumpIfSmi(result, object, &done);
2486 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2487 TruncateHeapNumberToI(result, object);
2488
2489 bind(&done);
2490}
2491
2492void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2493 int num_least_bits) {
2494 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
2495 // We rotate by kSmiShift amount, and extract the num_least_bits
2496 risbg(dst, src, Operand(64 - num_least_bits), Operand(63),
2497 Operand(64 - kSmiShift), true);
2498 } else {
2499 SmiUntag(dst, src);
2500 AndP(dst, Operand((1 << num_least_bits) - 1));
2501 }
2502}
2503
2504void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2505 int num_least_bits) {
2506 AndP(dst, src, Operand((1 << num_least_bits) - 1));
2507}
2508
2509void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2510 SaveFPRegsMode save_doubles) {
2511 // All parameters are on the stack. r2 has the return value after call.
2512
2513 // If the expected number of arguments of the runtime function is
2514 // constant, we check that the actual number of arguments match the
2515 // expectation.
2516 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2517
2518 // TODO(1236192): Most runtime routines don't need the number of
2519 // arguments passed in because it is constant. At some point we
2520 // should remove this need and make the runtime routine entry code
2521 // smarter.
2522 mov(r2, Operand(num_arguments));
2523 mov(r3, Operand(ExternalReference(f, isolate())));
2524 CEntryStub stub(isolate(),
2525#if V8_TARGET_ARCH_S390X
2526 f->result_size,
2527#else
2528 1,
2529#endif
2530 save_doubles);
2531 CallStub(&stub);
2532}
2533
2534void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2535 int num_arguments) {
2536 mov(r2, Operand(num_arguments));
2537 mov(r3, Operand(ext));
2538
2539 CEntryStub stub(isolate(), 1);
2540 CallStub(&stub);
2541}
2542
2543void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2544 const Runtime::Function* function = Runtime::FunctionForId(fid);
2545 DCHECK_EQ(1, function->result_size);
2546 if (function->nargs >= 0) {
2547 mov(r2, Operand(function->nargs));
2548 }
2549 JumpToExternalReference(ExternalReference(fid, isolate()));
2550}
2551
2552void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2553 mov(r3, Operand(builtin));
2554 CEntryStub stub(isolate(), 1);
2555 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2556}
2557
2558void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2559 Register scratch1, Register scratch2) {
2560 if (FLAG_native_code_counters && counter->Enabled()) {
2561 mov(scratch1, Operand(value));
2562 mov(scratch2, Operand(ExternalReference(counter)));
2563 StoreW(scratch1, MemOperand(scratch2));
2564 }
2565}
2566
2567void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2568 Register scratch1, Register scratch2) {
2569 DCHECK(value > 0 && is_int8(value));
2570 if (FLAG_native_code_counters && counter->Enabled()) {
2571 mov(scratch1, Operand(ExternalReference(counter)));
2572 // @TODO(john.yan): can be optimized by asi()
2573 LoadW(scratch2, MemOperand(scratch1));
2574 AddP(scratch2, Operand(value));
2575 StoreW(scratch2, MemOperand(scratch1));
2576 }
2577}
2578
2579void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2580 Register scratch1, Register scratch2) {
2581 DCHECK(value > 0 && is_int8(value));
2582 if (FLAG_native_code_counters && counter->Enabled()) {
2583 mov(scratch1, Operand(ExternalReference(counter)));
2584 // @TODO(john.yan): can be optimized by asi()
2585 LoadW(scratch2, MemOperand(scratch1));
2586 AddP(scratch2, Operand(-value));
2587 StoreW(scratch2, MemOperand(scratch1));
2588 }
2589}
2590
2591void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2592 CRegister cr) {
2593 if (emit_debug_code()) Check(cond, reason, cr);
2594}
2595
2596void MacroAssembler::AssertFastElements(Register elements) {
2597 if (emit_debug_code()) {
2598 DCHECK(!elements.is(r0));
2599 Label ok;
2600 push(elements);
2601 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2602 CompareRoot(elements, Heap::kFixedArrayMapRootIndex);
2603 beq(&ok, Label::kNear);
2604 CompareRoot(elements, Heap::kFixedDoubleArrayMapRootIndex);
2605 beq(&ok, Label::kNear);
2606 CompareRoot(elements, Heap::kFixedCOWArrayMapRootIndex);
2607 beq(&ok, Label::kNear);
2608 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2609 bind(&ok);
2610 pop(elements);
2611 }
2612}
2613
2614void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2615 Label L;
2616 b(cond, &L);
2617 Abort(reason);
2618 // will not return here
2619 bind(&L);
2620}
2621
2622void MacroAssembler::Abort(BailoutReason reason) {
2623 Label abort_start;
2624 bind(&abort_start);
2625#ifdef DEBUG
2626 const char* msg = GetBailoutReason(reason);
2627 if (msg != NULL) {
2628 RecordComment("Abort message: ");
2629 RecordComment(msg);
2630 }
2631
2632 if (FLAG_trap_on_abort) {
2633 stop(msg);
2634 return;
2635 }
2636#endif
2637
2638 LoadSmiLiteral(r0, Smi::FromInt(reason));
2639 push(r0);
2640 // Disable stub call restrictions to always allow calls to abort.
2641 if (!has_frame_) {
2642 // We don't actually want to generate a pile of code for this, so just
2643 // claim there is a stack frame, without generating one.
2644 FrameScope scope(this, StackFrame::NONE);
2645 CallRuntime(Runtime::kAbort);
2646 } else {
2647 CallRuntime(Runtime::kAbort);
2648 }
2649 // will not return here
2650}
2651
2652void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2653 if (context_chain_length > 0) {
2654 // Move up the chain of contexts to the context containing the slot.
2655 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2656 for (int i = 1; i < context_chain_length; i++) {
2657 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2658 }
2659 } else {
2660 // Slot is in the current function context. Move it into the
2661 // destination register in case we store into it (the write barrier
2662 // cannot be allowed to destroy the context in esi).
2663 LoadRR(dst, cp);
2664 }
2665}
2666
2667void MacroAssembler::LoadTransitionedArrayMapConditional(
2668 ElementsKind expected_kind, ElementsKind transitioned_kind,
2669 Register map_in_out, Register scratch, Label* no_map_match) {
2670 DCHECK(IsFastElementsKind(expected_kind));
2671 DCHECK(IsFastElementsKind(transitioned_kind));
2672
2673 // Check that the function's map is the same as the expected cached map.
2674 LoadP(scratch, NativeContextMemOperand());
2675 LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
2676 CmpP(map_in_out, ip);
2677 bne(no_map_match);
2678
2679 // Use the transitioned cached map.
2680 LoadP(map_in_out,
2681 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
2682}
2683
2684void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2685 LoadP(dst, NativeContextMemOperand());
2686 LoadP(dst, ContextMemOperand(dst, index));
2687}
2688
2689void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2690 Register map,
2691 Register scratch) {
2692 // Load the initial map. The global functions all have initial maps.
2693 LoadP(map,
2694 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2695 if (emit_debug_code()) {
2696 Label ok, fail;
2697 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2698 b(&ok);
2699 bind(&fail);
2700 Abort(kGlobalFunctionsMustHaveInitialMap);
2701 bind(&ok);
2702 }
2703}
2704
2705void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2706 Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2707 SubP(scratch, reg, Operand(1));
2708 CmpP(scratch, Operand::Zero());
2709 blt(not_power_of_two_or_zero);
2710 AndP(r0, reg, scratch /*, SetRC*/); // Should be okay to remove rc
2711 bne(not_power_of_two_or_zero /*, cr0*/);
2712}
2713
2714void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2715 Register scratch,
2716 Label* zero_and_neg,
2717 Label* not_power_of_two) {
2718 SubP(scratch, reg, Operand(1));
2719 CmpP(scratch, Operand::Zero());
2720 blt(zero_and_neg);
2721 AndP(r0, reg, scratch /*, SetRC*/); // Should be okay to remove rc
2722 bne(not_power_of_two /*, cr0*/);
2723}
2724
2725#if !V8_TARGET_ARCH_S390X
2726void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2727 DCHECK(!reg.is(overflow));
2728 LoadRR(overflow, reg); // Save original value.
2729 SmiTag(reg);
2730 XorP(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
2731 LoadAndTestRR(overflow, overflow);
2732}
2733
2734void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2735 Register overflow) {
2736 if (dst.is(src)) {
2737 // Fall back to slower case.
2738 SmiTagCheckOverflow(dst, overflow);
2739 } else {
2740 DCHECK(!dst.is(src));
2741 DCHECK(!dst.is(overflow));
2742 DCHECK(!src.is(overflow));
2743 SmiTag(dst, src);
2744 XorP(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
2745 LoadAndTestRR(overflow, overflow);
2746 }
2747}
2748#endif
2749
2750void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2751 Label* on_not_both_smi) {
2752 STATIC_ASSERT(kSmiTag == 0);
2753 OrP(r0, reg1, reg2 /*, LeaveRC*/); // should be okay to remove LeaveRC
2754 JumpIfNotSmi(r0, on_not_both_smi);
2755}
2756
2757void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2758 Label* smi_case) {
2759 STATIC_ASSERT(kSmiTag == 0);
2760 STATIC_ASSERT(kSmiTagSize == 1);
2761 // this won't work if src == dst
2762 DCHECK(src.code() != dst.code());
2763 SmiUntag(dst, src);
2764 TestIfSmi(src);
2765 beq(smi_case);
2766}
2767
2768void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2769 Label* non_smi_case) {
2770 STATIC_ASSERT(kSmiTag == 0);
2771 STATIC_ASSERT(kSmiTagSize == 1);
2772
2773 // We can more optimally use TestIfSmi if dst != src
2774 // otherwise, the UnTag operation will kill the CC and we cannot
2775 // test the Tag bit.
2776 if (src.code() != dst.code()) {
2777 SmiUntag(dst, src);
2778 TestIfSmi(src);
2779 } else {
2780 TestBit(src, 0, r0);
2781 SmiUntag(dst, src);
2782 LoadAndTestRR(r0, r0);
2783 }
2784 bne(non_smi_case);
2785}
2786
2787void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2788 Label* on_either_smi) {
2789 STATIC_ASSERT(kSmiTag == 0);
2790 JumpIfSmi(reg1, on_either_smi);
2791 JumpIfSmi(reg2, on_either_smi);
2792}
2793
2794void MacroAssembler::AssertNotNumber(Register object) {
2795 if (emit_debug_code()) {
2796 STATIC_ASSERT(kSmiTag == 0);
2797 TestIfSmi(object);
2798 Check(ne, kOperandIsANumber, cr0);
2799 push(object);
2800 CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
2801 pop(object);
2802 Check(ne, kOperandIsANumber);
2803 }
2804}
2805
2806void MacroAssembler::AssertNotSmi(Register object) {
2807 if (emit_debug_code()) {
2808 STATIC_ASSERT(kSmiTag == 0);
2809 TestIfSmi(object);
2810 Check(ne, kOperandIsASmi, cr0);
2811 }
2812}
2813
2814void MacroAssembler::AssertSmi(Register object) {
2815 if (emit_debug_code()) {
2816 STATIC_ASSERT(kSmiTag == 0);
2817 TestIfSmi(object);
2818 Check(eq, kOperandIsNotSmi, cr0);
2819 }
2820}
2821
2822void MacroAssembler::AssertString(Register object) {
2823 if (emit_debug_code()) {
2824 STATIC_ASSERT(kSmiTag == 0);
2825 TestIfSmi(object);
2826 Check(ne, kOperandIsASmiAndNotAString, cr0);
2827 push(object);
2828 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2829 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2830 pop(object);
2831 Check(lt, kOperandIsNotAString);
2832 }
2833}
2834
2835void MacroAssembler::AssertName(Register object) {
2836 if (emit_debug_code()) {
2837 STATIC_ASSERT(kSmiTag == 0);
2838 TestIfSmi(object);
2839 Check(ne, kOperandIsASmiAndNotAName, cr0);
2840 push(object);
2841 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2842 CompareInstanceType(object, object, LAST_NAME_TYPE);
2843 pop(object);
2844 Check(le, kOperandIsNotAName);
2845 }
2846}
2847
2848void MacroAssembler::AssertFunction(Register object) {
2849 if (emit_debug_code()) {
2850 STATIC_ASSERT(kSmiTag == 0);
2851 TestIfSmi(object);
2852 Check(ne, kOperandIsASmiAndNotAFunction, cr0);
2853 push(object);
2854 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2855 pop(object);
2856 Check(eq, kOperandIsNotAFunction);
2857 }
2858}
2859
2860void MacroAssembler::AssertBoundFunction(Register object) {
2861 if (emit_debug_code()) {
2862 STATIC_ASSERT(kSmiTag == 0);
2863 TestIfSmi(object);
2864 Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
2865 push(object);
2866 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2867 pop(object);
2868 Check(eq, kOperandIsNotABoundFunction);
2869 }
2870}
2871
2872void MacroAssembler::AssertReceiver(Register object) {
2873 if (emit_debug_code()) {
2874 STATIC_ASSERT(kSmiTag == 0);
2875 TestIfSmi(object);
2876 Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
2877 push(object);
2878 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2879 CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
2880 pop(object);
2881 Check(ge, kOperandIsNotAReceiver);
2882 }
2883}
2884
2885void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2886 Register scratch) {
2887 if (emit_debug_code()) {
2888 Label done_checking;
2889 AssertNotSmi(object);
2890 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2891 beq(&done_checking, Label::kNear);
2892 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2893 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2894 Assert(eq, kExpectedUndefinedOrCell);
2895 bind(&done_checking);
2896 }
2897}
2898
2899void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2900 if (emit_debug_code()) {
2901 CompareRoot(reg, index);
2902 Check(eq, kHeapNumberMapRegisterClobbered);
2903 }
2904}
2905
2906void MacroAssembler::JumpIfNotHeapNumber(Register object,
2907 Register heap_number_map,
2908 Register scratch,
2909 Label* on_not_heap_number) {
2910 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2911 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2912 CmpP(scratch, heap_number_map);
2913 bne(on_not_heap_number);
2914}
2915
2916void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
2917 Register first, Register second, Register scratch1, Register scratch2,
2918 Label* failure) {
2919 // Test that both first and second are sequential one-byte strings.
2920 // Assume that they are non-smis.
2921 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2922 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2923 LoadlB(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2924 LoadlB(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2925
2926 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
2927 scratch2, failure);
2928}
2929
2930void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
2931 Register second,
2932 Register scratch1,
2933 Register scratch2,
2934 Label* failure) {
2935 // Check that neither is a smi.
2936 AndP(scratch1, first, second);
2937 JumpIfSmi(scratch1, failure);
2938 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
2939 scratch2, failure);
2940}
2941
2942void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2943 Label* not_unique_name) {
2944 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2945 Label succeed;
2946 AndP(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2947 beq(&succeed, Label::kNear);
2948 CmpP(reg, Operand(SYMBOL_TYPE));
2949 bne(not_unique_name);
2950
2951 bind(&succeed);
2952}
2953
2954// Allocates a heap number or jumps to the need_gc label if the young space
2955// is full and a scavenge is needed.
2956void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
2957 Register scratch2,
2958 Register heap_number_map,
2959 Label* gc_required,
2960 TaggingMode tagging_mode,
2961 MutableMode mode) {
2962 // Allocate an object in the heap for the heap number and tag it as a heap
2963 // object.
2964 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
2965 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
2966
2967 Heap::RootListIndex map_index = mode == MUTABLE
2968 ? Heap::kMutableHeapNumberMapRootIndex
2969 : Heap::kHeapNumberMapRootIndex;
2970 AssertIsRoot(heap_number_map, map_index);
2971
2972 // Store heap number map in the allocated object.
2973 if (tagging_mode == TAG_RESULT) {
2974 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2975 } else {
2976 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
2977 }
2978}
2979
2980void MacroAssembler::AllocateHeapNumberWithValue(
2981 Register result, DoubleRegister value, Register scratch1, Register scratch2,
2982 Register heap_number_map, Label* gc_required) {
2983 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2984 StoreDouble(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2985}
2986
2987void MacroAssembler::AllocateJSValue(Register result, Register constructor,
2988 Register value, Register scratch1,
2989 Register scratch2, Label* gc_required) {
2990 DCHECK(!result.is(constructor));
2991 DCHECK(!result.is(scratch1));
2992 DCHECK(!result.is(scratch2));
2993 DCHECK(!result.is(value));
2994
2995 // Allocate JSValue in new space.
2996 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
2997
2998 // Initialize the JSValue.
2999 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3000 StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
3001 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3002 StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
3003 StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
3004 StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
3005 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3006}
3007
3008void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
3009 Register scratch) {
3010 Label big_loop, left_bytes, done, fake_call;
3011
3012 DCHECK(!scratch.is(r0));
3013
3014 // big loop moves 256 bytes at a time
3015 bind(&big_loop);
3016 CmpP(length, Operand(static_cast<intptr_t>(0x100)));
3017 blt(&left_bytes);
3018
3019 mvc(MemOperand(dst), MemOperand(src), 0x100);
3020
3021 AddP(src, Operand(static_cast<intptr_t>(0x100)));
3022 AddP(dst, Operand(static_cast<intptr_t>(0x100)));
3023 SubP(length, Operand(static_cast<intptr_t>(0x100)));
3024 b(&big_loop);
3025
3026 bind(&left_bytes);
3027 CmpP(length, Operand::Zero());
3028 beq(&done);
3029
3030 // TODO(john.yan): More optimal version is to use MVC
3031 // Sequence below has some undiagnosed issue.
3032 /*
3033 b(scratch, &fake_call); // use brasl to Save mvc addr to scratch
3034 mvc(MemOperand(dst), MemOperand(src), 1);
3035 bind(&fake_call);
3036 SubP(length, Operand(static_cast<intptr_t>(-1)));
3037 ex(length, MemOperand(scratch)); // execute mvc instr above
3038 AddP(src, length);
3039 AddP(dst, length);
3040 AddP(src, Operand(static_cast<intptr_t>(0x1)));
3041 AddP(dst, Operand(static_cast<intptr_t>(0x1)));
3042 */
3043
3044 mvc(MemOperand(dst), MemOperand(src), 1);
3045 AddP(src, Operand(static_cast<intptr_t>(0x1)));
3046 AddP(dst, Operand(static_cast<intptr_t>(0x1)));
3047 SubP(length, Operand(static_cast<intptr_t>(0x1)));
3048
3049 b(&left_bytes);
3050 bind(&done);
3051}
3052
3053void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
3054 Register count,
3055 Register filler) {
3056 Label loop;
3057 bind(&loop);
3058 StoreP(filler, MemOperand(current_address));
3059 AddP(current_address, current_address, Operand(kPointerSize));
3060 BranchOnCount(r1, &loop);
3061}
3062
3063void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
3064 Register end_address,
3065 Register filler) {
3066 Label done;
3067 DCHECK(!filler.is(r1));
3068 DCHECK(!current_address.is(r1));
3069 DCHECK(!end_address.is(r1));
3070 SubP(r1, end_address, current_address /*, LeaveOE, SetRC*/);
3071 beq(&done, Label::kNear);
3072 ShiftRightP(r1, r1, Operand(kPointerSizeLog2));
3073 InitializeNFieldsWithFiller(current_address, r1, filler);
3074 bind(&done);
3075}
3076
3077void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3078 Register first, Register second, Register scratch1, Register scratch2,
3079 Label* failure) {
3080 const int kFlatOneByteStringMask =
3081 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3082 const int kFlatOneByteStringTag =
3083 kStringTag | kOneByteStringTag | kSeqStringTag;
3084 if (!scratch1.is(first)) LoadRR(scratch1, first);
3085 if (!scratch2.is(second)) LoadRR(scratch2, second);
3086 nilf(scratch1, Operand(kFlatOneByteStringMask));
3087 CmpP(scratch1, Operand(kFlatOneByteStringTag));
3088 bne(failure);
3089 nilf(scratch2, Operand(kFlatOneByteStringMask));
3090 CmpP(scratch2, Operand(kFlatOneByteStringTag));
3091 bne(failure);
3092}
3093
3094void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3095 Register scratch,
3096 Label* failure) {
3097 const int kFlatOneByteStringMask =
3098 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3099 const int kFlatOneByteStringTag =
3100 kStringTag | kOneByteStringTag | kSeqStringTag;
3101
3102 if (!scratch.is(type)) LoadRR(scratch, type);
3103 nilf(scratch, Operand(kFlatOneByteStringMask));
3104 CmpP(scratch, Operand(kFlatOneByteStringTag));
3105 bne(failure);
3106}
3107
3108static const int kRegisterPassedArguments = 5;
3109
3110int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3111 int num_double_arguments) {
3112 int stack_passed_words = 0;
3113 if (num_double_arguments > DoubleRegister::kNumRegisters) {
3114 stack_passed_words +=
3115 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3116 }
3117 // Up to five simple arguments are passed in registers r2..r6
3118 if (num_reg_arguments > kRegisterPassedArguments) {
3119 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3120 }
3121 return stack_passed_words;
3122}
3123
3124void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
3125 Register value,
3126 uint32_t encoding_mask) {
3127 Label is_object;
3128 TestIfSmi(string);
3129 Check(ne, kNonObject, cr0);
3130
3131 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3132 LoadlB(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3133
3134 AndP(ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3135 CmpP(ip, Operand(encoding_mask));
3136 Check(eq, kUnexpectedStringType);
3137
3138// The index is assumed to be untagged coming in, tag it to compare with the
3139// string length without using a temp register, it is restored at the end of
3140// this function.
3141#if !V8_TARGET_ARCH_S390X
3142 Label index_tag_ok, index_tag_bad;
3143 JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
3144#endif
3145 SmiTag(index, index);
3146#if !V8_TARGET_ARCH_S390X
3147 b(&index_tag_ok);
3148 bind(&index_tag_bad);
3149 Abort(kIndexIsTooLarge);
3150 bind(&index_tag_ok);
3151#endif
3152
3153 LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
3154 CmpP(index, ip);
3155 Check(lt, kIndexIsTooLarge);
3156
3157 DCHECK(Smi::FromInt(0) == 0);
3158 CmpP(index, Operand::Zero());
3159 Check(ge, kIndexIsNegative);
3160
3161 SmiUntag(index, index);
3162}
3163
3164void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3165 int num_double_arguments,
3166 Register scratch) {
3167 int frame_alignment = ActivationFrameAlignment();
3168 int stack_passed_arguments =
3169 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3170 int stack_space = kNumRequiredStackFrameSlots;
3171 if (frame_alignment > kPointerSize) {
3172 // Make stack end at alignment and make room for stack arguments
3173 // -- preserving original value of sp.
3174 LoadRR(scratch, sp);
3175 lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
3176 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3177 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3178 StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
3179 } else {
3180 stack_space += stack_passed_arguments;
3181 }
3182 lay(sp, MemOperand(sp, -(stack_space)*kPointerSize));
3183}
3184
3185void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3186 Register scratch) {
3187 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3188}
3189
3190void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
3191
3192void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
3193
3194void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3195 DoubleRegister src2) {
3196 if (src2.is(d0)) {
3197 DCHECK(!src1.is(d2));
3198 Move(d2, src2);
3199 Move(d0, src1);
3200 } else {
3201 Move(d0, src1);
3202 Move(d2, src2);
3203 }
3204}
3205
3206void MacroAssembler::CallCFunction(ExternalReference function,
3207 int num_reg_arguments,
3208 int num_double_arguments) {
3209 mov(ip, Operand(function));
3210 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3211}
3212
3213void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3214 int num_double_arguments) {
3215 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3216}
3217
3218void MacroAssembler::CallCFunction(ExternalReference function,
3219 int num_arguments) {
3220 CallCFunction(function, num_arguments, 0);
3221}
3222
3223void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3224 CallCFunction(function, num_arguments, 0);
3225}
3226
3227void MacroAssembler::CallCFunctionHelper(Register function,
3228 int num_reg_arguments,
3229 int num_double_arguments) {
3230 DCHECK(has_frame());
3231
3232 // Just call directly. The function called cannot cause a GC, or
3233 // allow preemption, so the return address in the link register
3234 // stays correct.
3235 Register dest = function;
3236 if (ABI_CALL_VIA_IP) {
3237 Move(ip, function);
3238 dest = ip;
3239 }
3240
3241 Call(dest);
3242
3243 int stack_passed_arguments =
3244 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3245 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3246 if (ActivationFrameAlignment() > kPointerSize) {
3247 // Load the original stack pointer (pre-alignment) from the stack
3248 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3249 } else {
3250 la(sp, MemOperand(sp, stack_space * kPointerSize));
3251 }
3252}
3253
3254void MacroAssembler::CheckPageFlag(
3255 Register object,
3256 Register scratch, // scratch may be same register as object
3257 int mask, Condition cc, Label* condition_met) {
3258 DCHECK(cc == ne || cc == eq);
3259 ClearRightImm(scratch, object, Operand(kPageSizeBits));
3260
3261 if (base::bits::IsPowerOfTwo32(mask)) {
3262 // If it's a power of two, we can use Test-Under-Mask Memory-Imm form
3263 // which allows testing of a single byte in memory.
3264 int32_t byte_offset = 4;
3265 uint32_t shifted_mask = mask;
3266 // Determine the byte offset to be tested
3267 if (mask <= 0x80) {
3268 byte_offset = kPointerSize - 1;
3269 } else if (mask < 0x8000) {
3270 byte_offset = kPointerSize - 2;
3271 shifted_mask = mask >> 8;
3272 } else if (mask < 0x800000) {
3273 byte_offset = kPointerSize - 3;
3274 shifted_mask = mask >> 16;
3275 } else {
3276 byte_offset = kPointerSize - 4;
3277 shifted_mask = mask >> 24;
3278 }
3279#if V8_TARGET_LITTLE_ENDIAN
3280 // Reverse the byte_offset if emulating on little endian platform
3281 byte_offset = kPointerSize - byte_offset - 1;
3282#endif
3283 tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
3284 Operand(shifted_mask));
3285 } else {
3286 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3287 AndP(r0, scratch, Operand(mask));
3288 }
3289 // Should be okay to remove rc
3290
3291 if (cc == ne) {
3292 bne(condition_met);
3293 }
3294 if (cc == eq) {
3295 beq(condition_met);
3296 }
3297}
3298
3299void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3300 Register scratch1, Label* on_black) {
3301 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
3302 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3303}
3304
3305void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3306 Register mask_scratch, Label* has_color,
3307 int first_bit, int second_bit) {
3308 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3309
3310 GetMarkBits(object, bitmap_scratch, mask_scratch);
3311
3312 Label other_color, word_boundary;
3313 LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3314 // Test the first bit
3315 AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc
3316 b(first_bit == 1 ? eq : ne, &other_color, Label::kNear);
3317 // Shift left 1
3318 // May need to load the next cell
3319 sll(mask_scratch, Operand(1) /*, SetRC*/);
3320 LoadAndTest32(mask_scratch, mask_scratch);
3321 beq(&word_boundary, Label::kNear);
3322 // Test the second bit
3323 AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc
3324 b(second_bit == 1 ? ne : eq, has_color);
3325 b(&other_color, Label::kNear);
3326
3327 bind(&word_boundary);
3328 LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3329 AndP(r0, ip, Operand(1));
3330 b(second_bit == 1 ? ne : eq, has_color);
3331 bind(&other_color);
3332}
3333
3334void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3335 Register mask_reg) {
3336 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3337 LoadRR(bitmap_reg, addr_reg);
3338 nilf(bitmap_reg, Operand(~Page::kPageAlignmentMask));
3339 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3340 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3341 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3342 ShiftLeftP(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3343 AddP(bitmap_reg, ip);
3344 LoadRR(ip, mask_reg); // Have to do some funky reg shuffling as
3345 // 31-bit shift left clobbers on s390.
3346 LoadImmP(mask_reg, Operand(1));
3347 ShiftLeftP(mask_reg, mask_reg, ip);
3348}
3349
3350void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3351 Register mask_scratch, Register load_scratch,
3352 Label* value_is_white) {
3353 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3354 GetMarkBits(value, bitmap_scratch, mask_scratch);
3355
3356 // If the value is black or grey we don't need to do anything.
3357 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3358 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3359 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3360 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3361
3362 // Since both black and grey have a 1 in the first position and white does
3363 // not have a 1 there we only need to check one bit.
3364 LoadlW(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3365 LoadRR(r0, load_scratch);
3366 AndP(r0, mask_scratch);
3367 beq(value_is_white);
3368}
3369
3370// Saturate a value into 8-bit unsigned integer
3371// if input_value < 0, output_value is 0
3372// if input_value > 255, output_value is 255
3373// otherwise output_value is the input_value
3374void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3375 int satval = (1 << 8) - 1;
3376
3377 Label done, negative_label, overflow_label;
3378 CmpP(input_reg, Operand::Zero());
3379 blt(&negative_label);
3380
3381 CmpP(input_reg, Operand(satval));
3382 bgt(&overflow_label);
3383 if (!output_reg.is(input_reg)) {
3384 LoadRR(output_reg, input_reg);
3385 }
3386 b(&done);
3387
3388 bind(&negative_label);
3389 LoadImmP(output_reg, Operand::Zero()); // set to 0 if negative
3390 b(&done);
3391
3392 bind(&overflow_label); // set to satval if > satval
3393 LoadImmP(output_reg, Operand(satval));
3394
3395 bind(&done);
3396}
3397
3398void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3399 DoubleRegister input_reg,
3400 DoubleRegister double_scratch) {
3401 Label above_zero;
3402 Label done;
3403 Label in_bounds;
3404
3405 LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3406 cdbr(input_reg, double_scratch);
3407 bgt(&above_zero, Label::kNear);
3408
3409 // Double value is less than zero, NaN or Inf, return 0.
3410 LoadIntLiteral(result_reg, 0);
3411 b(&done, Label::kNear);
3412
3413 // Double value is >= 255, return 255.
3414 bind(&above_zero);
3415 LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3416 cdbr(input_reg, double_scratch);
3417 ble(&in_bounds, Label::kNear);
3418 LoadIntLiteral(result_reg, 255);
3419 b(&done, Label::kNear);
3420
3421 // In 0-255 range, round and truncate.
3422 bind(&in_bounds);
3423
3424 // round to nearest (default rounding mode)
3425 cfdbr(ROUND_TO_NEAREST_WITH_TIES_TO_EVEN, result_reg, input_reg);
3426 bind(&done);
3427}
3428
3429void MacroAssembler::LoadInstanceDescriptors(Register map,
3430 Register descriptors) {
3431 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3432}
3433
3434void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3435 LoadlW(dst, FieldMemOperand(map, Map::kBitField3Offset));
3436 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3437}
3438
3439void MacroAssembler::EnumLength(Register dst, Register map) {
3440 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3441 LoadW(dst, FieldMemOperand(map, Map::kBitField3Offset));
3442 And(dst, Operand(Map::EnumLengthBits::kMask));
3443 SmiTag(dst);
3444}
3445
3446void MacroAssembler::LoadAccessor(Register dst, Register holder,
3447 int accessor_index,
3448 AccessorComponent accessor) {
3449 LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3450 LoadInstanceDescriptors(dst, dst);
3451 LoadP(dst,
3452 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3453 const int getterOffset = AccessorPair::kGetterOffset;
3454 const int setterOffset = AccessorPair::kSetterOffset;
3455 int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
3456 LoadP(dst, FieldMemOperand(dst, offset));
3457}
3458
3459void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3460 Register null_value = r7;
3461 Register empty_fixed_array_value = r8;
3462 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3463 Label next, start;
3464 LoadRR(r4, r2);
3465
3466 // Check if the enum length field is properly initialized, indicating that
3467 // there is an enum cache.
3468 LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
3469
3470 EnumLength(r5, r3);
3471 CmpSmiLiteral(r5, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3472 beq(call_runtime);
3473
3474 LoadRoot(null_value, Heap::kNullValueRootIndex);
3475 b(&start, Label::kNear);
3476
3477 bind(&next);
3478 LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
3479
3480 // For all objects but the receiver, check that the cache is empty.
3481 EnumLength(r5, r3);
3482 CmpSmiLiteral(r5, Smi::FromInt(0), r0);
3483 bne(call_runtime);
3484
3485 bind(&start);
3486
3487 // Check that there are no elements. Register r4 contains the current JS
3488 // object we've reached through the prototype chain.
3489 Label no_elements;
3490 LoadP(r4, FieldMemOperand(r4, JSObject::kElementsOffset));
3491 CmpP(r4, empty_fixed_array_value);
3492 beq(&no_elements, Label::kNear);
3493
3494 // Second chance, the object may be using the empty slow element dictionary.
3495 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3496 bne(call_runtime);
3497
3498 bind(&no_elements);
3499 LoadP(r4, FieldMemOperand(r3, Map::kPrototypeOffset));
3500 CmpP(r4, null_value);
3501 bne(&next);
3502}
3503
3504////////////////////////////////////////////////////////////////////////////////
3505//
3506// New MacroAssembler Interfaces added for S390
3507//
3508////////////////////////////////////////////////////////////////////////////////
3509// Primarily used for loading constants
3510// This should really move to be in macro-assembler as it
3511// is really a pseudo instruction
3512// Some usages of this intend for a FIXED_SEQUENCE to be used
3513// @TODO - break this dependency so we can optimize mov() in general
3514// and only use the generic version when we require a fixed sequence
3515void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
3516 Representation r, Register scratch) {
3517 DCHECK(!r.IsDouble());
3518 if (r.IsInteger8()) {
3519 LoadB(dst, mem);
3520 lgbr(dst, dst);
3521 } else if (r.IsUInteger8()) {
3522 LoadlB(dst, mem);
3523 } else if (r.IsInteger16()) {
3524 LoadHalfWordP(dst, mem, scratch);
3525 lghr(dst, dst);
3526 } else if (r.IsUInteger16()) {
3527 LoadHalfWordP(dst, mem, scratch);
3528#if V8_TARGET_ARCH_S390X
3529 } else if (r.IsInteger32()) {
3530 LoadW(dst, mem, scratch);
3531#endif
3532 } else {
3533 LoadP(dst, mem, scratch);
3534 }
3535}
3536
3537void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
3538 Representation r, Register scratch) {
3539 DCHECK(!r.IsDouble());
3540 if (r.IsInteger8() || r.IsUInteger8()) {
3541 StoreByte(src, mem, scratch);
3542 } else if (r.IsInteger16() || r.IsUInteger16()) {
3543 StoreHalfWord(src, mem, scratch);
3544#if V8_TARGET_ARCH_S390X
3545 } else if (r.IsInteger32()) {
3546 StoreW(src, mem, scratch);
3547#endif
3548 } else {
3549 if (r.IsHeapObject()) {
3550 AssertNotSmi(src);
3551 } else if (r.IsSmi()) {
3552 AssertSmi(src);
3553 }
3554 StoreP(src, mem, scratch);
3555 }
3556}
3557
3558void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
3559 Register scratch_reg,
3560 Register scratch2_reg,
3561 Label* no_memento_found) {
3562 Label map_check;
3563 Label top_check;
3564 ExternalReference new_space_allocation_top =
3565 ExternalReference::new_space_allocation_top_address(isolate());
3566 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
3567 const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
3568
3569 DCHECK(!AreAliased(receiver_reg, scratch_reg));
3570
3571 // Bail out if the object is not in new space.
3572 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
3573
3574 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3575 AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
3576
3577 // If the object is in new space, we need to check whether it is on the same
3578 // page as the current top.
3579 XorP(r0, scratch_reg, Operand(new_space_allocation_top));
3580 AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
3581 beq(&top_check, Label::kNear);
3582 // The object is on a different page than allocation top. Bail out if the
3583 // object sits on the page boundary as no memento can follow and we cannot
3584 // touch the memory following it.
3585 XorP(r0, scratch_reg, receiver_reg);
3586 AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
3587 bne(no_memento_found);
3588 // Continue with the actual map check.
3589 b(&map_check, Label::kNear);
3590 // If top is on the same page as the current object, we need to check whether
3591 // we are below top.
3592 bind(&top_check);
3593 CmpP(scratch_reg, Operand(new_space_allocation_top));
3594 bgt(no_memento_found);
3595 // Memento map check.
3596 bind(&map_check);
3597 LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
3598 CmpP(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
3599}
3600
3601Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
3602 Register reg4, Register reg5,
3603 Register reg6) {
3604 RegList regs = 0;
3605 if (reg1.is_valid()) regs |= reg1.bit();
3606 if (reg2.is_valid()) regs |= reg2.bit();
3607 if (reg3.is_valid()) regs |= reg3.bit();
3608 if (reg4.is_valid()) regs |= reg4.bit();
3609 if (reg5.is_valid()) regs |= reg5.bit();
3610 if (reg6.is_valid()) regs |= reg6.bit();
3611
3612 const RegisterConfiguration* config =
3613 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
3614 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3615 int code = config->GetAllocatableGeneralCode(i);
3616 Register candidate = Register::from_code(code);
3617 if (regs & candidate.bit()) continue;
3618 return candidate;
3619 }
3620 UNREACHABLE();
3621 return no_reg;
3622}
3623
3624void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
3625 Register scratch0,
3626 Register scratch1,
3627 Label* found) {
3628 DCHECK(!scratch1.is(scratch0));
3629 Register current = scratch0;
3630 Label loop_again, end;
3631
3632 // scratch contained elements pointer.
3633 LoadRR(current, object);
3634 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
3635 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
3636 CompareRoot(current, Heap::kNullValueRootIndex);
3637 beq(&end);
3638
3639 // Loop based on the map going up the prototype chain.
3640 bind(&loop_again);
3641 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
3642
3643 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
3644 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
3645 LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
3646 CmpP(scratch1, Operand(JS_OBJECT_TYPE));
3647 blt(found);
3648
3649 LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3650 DecodeField<Map::ElementsKindBits>(scratch1);
3651 CmpP(scratch1, Operand(DICTIONARY_ELEMENTS));
3652 beq(found);
3653 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
3654 CompareRoot(current, Heap::kNullValueRootIndex);
3655 bne(&loop_again);
3656
3657 bind(&end);
3658}
3659
3660void MacroAssembler::mov(Register dst, const Operand& src) {
3661 if (src.rmode_ != kRelocInfo_NONEPTR) {
3662 // some form of relocation needed
3663 RecordRelocInfo(src.rmode_, src.imm_);
3664 }
3665
3666#if V8_TARGET_ARCH_S390X
3667 int64_t value = src.immediate();
3668 int32_t hi_32 = static_cast<int64_t>(value) >> 32;
3669 int32_t lo_32 = static_cast<int32_t>(value);
3670
3671 iihf(dst, Operand(hi_32));
3672 iilf(dst, Operand(lo_32));
3673#else
3674 int value = src.immediate();
3675 iilf(dst, Operand(value));
3676#endif
3677}
3678
3679void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
3680 if (dst.is(src2)) {
3681 MulP(dst, src1);
3682 } else if (dst.is(src1)) {
3683 MulP(dst, src2);
3684 } else {
3685 Move(dst, src1);
3686 MulP(dst, src2);
3687 }
3688}
3689
3690void MacroAssembler::DivP(Register dividend, Register divider) {
3691 // have to make sure the src and dst are reg pairs
3692 DCHECK(dividend.code() % 2 == 0);
3693#if V8_TARGET_ARCH_S390X
3694 dsgr(dividend, divider);
3695#else
3696 dr(dividend, divider);
3697#endif
3698}
3699
3700void MacroAssembler::MulP(Register dst, const Operand& opnd) {
3701#if V8_TARGET_ARCH_S390X
3702 msgfi(dst, opnd);
3703#else
3704 msfi(dst, opnd);
3705#endif
3706}
3707
3708void MacroAssembler::MulP(Register dst, Register src) {
3709#if V8_TARGET_ARCH_S390X
3710 msgr(dst, src);
3711#else
3712 msr(dst, src);
3713#endif
3714}
3715
3716void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
3717#if V8_TARGET_ARCH_S390X
3718 if (is_uint16(opnd.offset())) {
3719 ms(dst, opnd);
3720 } else if (is_int20(opnd.offset())) {
3721 msy(dst, opnd);
3722 } else {
3723 UNIMPLEMENTED();
3724 }
3725#else
3726 if (is_int20(opnd.offset())) {
3727 msg(dst, opnd);
3728 } else {
3729 UNIMPLEMENTED();
3730 }
3731#endif
3732}
3733
3734//----------------------------------------------------------------------------
3735// Add Instructions
3736//----------------------------------------------------------------------------
3737
3738// Add 32-bit (Register dst = Register dst + Immediate opnd)
3739void MacroAssembler::Add32(Register dst, const Operand& opnd) {
3740 if (is_int16(opnd.immediate()))
3741 ahi(dst, opnd);
3742 else
3743 afi(dst, opnd);
3744}
3745
3746// Add Pointer Size (Register dst = Register dst + Immediate opnd)
3747void MacroAssembler::AddP(Register dst, const Operand& opnd) {
3748#if V8_TARGET_ARCH_S390X
3749 if (is_int16(opnd.immediate()))
3750 aghi(dst, opnd);
3751 else
3752 agfi(dst, opnd);
3753#else
3754 Add32(dst, opnd);
3755#endif
3756}
3757
3758// Add 32-bit (Register dst = Register src + Immediate opnd)
3759void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
3760 if (!dst.is(src)) {
3761 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3762 ahik(dst, src, opnd);
3763 return;
3764 }
3765 lr(dst, src);
3766 }
3767 Add32(dst, opnd);
3768}
3769
3770// Add Pointer Size (Register dst = Register src + Immediate opnd)
3771void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
3772 if (!dst.is(src)) {
3773 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3774 AddPImm_RRI(dst, src, opnd);
3775 return;
3776 }
3777 LoadRR(dst, src);
3778 }
3779 AddP(dst, opnd);
3780}
3781
3782// Add 32-bit (Register dst = Register dst + Register src)
3783void MacroAssembler::Add32(Register dst, Register src) { ar(dst, src); }
3784
3785// Add Pointer Size (Register dst = Register dst + Register src)
3786void MacroAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
3787
3788// Add Pointer Size with src extension
3789// (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
3790// src is treated as a 32-bit signed integer, which is sign extended to
3791// 64-bit if necessary.
3792void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
3793#if V8_TARGET_ARCH_S390X
3794 agfr(dst, src);
3795#else
3796 ar(dst, src);
3797#endif
3798}
3799
3800// Add 32-bit (Register dst = Register src1 + Register src2)
3801void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
3802 if (!dst.is(src1) && !dst.is(src2)) {
3803 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3804 // as AR is a smaller instruction
3805 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3806 ark(dst, src1, src2);
3807 return;
3808 } else {
3809 lr(dst, src1);
3810 }
3811 } else if (dst.is(src2)) {
3812 src2 = src1;
3813 }
3814 ar(dst, src2);
3815}
3816
3817// Add Pointer Size (Register dst = Register src1 + Register src2)
3818void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
3819 if (!dst.is(src1) && !dst.is(src2)) {
3820 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3821 // as AR is a smaller instruction
3822 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3823 AddP_RRR(dst, src1, src2);
3824 return;
3825 } else {
3826 LoadRR(dst, src1);
3827 }
3828 } else if (dst.is(src2)) {
3829 src2 = src1;
3830 }
3831 AddRR(dst, src2);
3832}
3833
3834// Add Pointer Size with src extension
3835// (Register dst (ptr) = Register dst (ptr) + Register src1 (ptr) +
3836// Register src2 (32 | 32->64))
3837// src is treated as a 32-bit signed integer, which is sign extended to
3838// 64-bit if necessary.
3839void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
3840 Register src2) {
3841#if V8_TARGET_ARCH_S390X
3842 if (dst.is(src2)) {
3843 // The source we need to sign extend is the same as result.
3844 lgfr(dst, src2);
3845 agr(dst, src1);
3846 } else {
3847 if (!dst.is(src1)) LoadRR(dst, src1);
3848 agfr(dst, src2);
3849 }
3850#else
3851 AddP(dst, src1, src2);
3852#endif
3853}
3854
3855// Add 32-bit (Register-Memory)
3856void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
3857 DCHECK(is_int20(opnd.offset()));
3858 if (is_uint12(opnd.offset()))
3859 a(dst, opnd);
3860 else
3861 ay(dst, opnd);
3862}
3863
3864// Add Pointer Size (Register-Memory)
3865void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
3866#if V8_TARGET_ARCH_S390X
3867 DCHECK(is_int20(opnd.offset()));
3868 ag(dst, opnd);
3869#else
3870 Add32(dst, opnd);
3871#endif
3872}
3873
3874// Add Pointer Size with src extension
3875// (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
3876// src is treated as a 32-bit signed integer, which is sign extended to
3877// 64-bit if necessary.
3878void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
3879#if V8_TARGET_ARCH_S390X
3880 DCHECK(is_int20(opnd.offset()));
3881 agf(dst, opnd);
3882#else
3883 Add32(dst, opnd);
3884#endif
3885}
3886
3887// Add 32-bit (Memory - Immediate)
3888void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
3889 DCHECK(is_int8(imm.immediate()));
3890 DCHECK(is_int20(opnd.offset()));
3891 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3892 asi(opnd, imm);
3893}
3894
3895// Add Pointer-sized (Memory - Immediate)
3896void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
3897 DCHECK(is_int8(imm.immediate()));
3898 DCHECK(is_int20(opnd.offset()));
3899 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3900#if V8_TARGET_ARCH_S390X
3901 agsi(opnd, imm);
3902#else
3903 asi(opnd, imm);
3904#endif
3905}
3906
3907//----------------------------------------------------------------------------
3908// Add Logical Instructions
3909//----------------------------------------------------------------------------
3910
3911// Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
3912void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
3913 Register src2) {
3914 if (!dst.is(src2) && !dst.is(src1)) {
3915 lr(dst, src1);
3916 alcr(dst, src2);
3917 } else if (!dst.is(src2)) {
3918 // dst == src1
3919 DCHECK(dst.is(src1));
3920 alcr(dst, src2);
3921 } else {
3922 // dst == src2
3923 DCHECK(dst.is(src2));
3924 alcr(dst, src1);
3925 }
3926}
3927
3928// Add Logical 32-bit (Register dst = Register src1 + Register src2)
3929void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
3930 if (!dst.is(src2) && !dst.is(src1)) {
3931 lr(dst, src1);
3932 alr(dst, src2);
3933 } else if (!dst.is(src2)) {
3934 // dst == src1
3935 DCHECK(dst.is(src1));
3936 alr(dst, src2);
3937 } else {
3938 // dst == src2
3939 DCHECK(dst.is(src2));
3940 alr(dst, src1);
3941 }
3942}
3943
3944// Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
3945void MacroAssembler::AddLogical(Register dst, const Operand& imm) {
3946 alfi(dst, imm);
3947}
3948
3949// Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
3950void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
3951#ifdef V8_TARGET_ARCH_S390X
3952 algfi(dst, imm);
3953#else
3954 AddLogical(dst, imm);
3955#endif
3956}
3957
3958// Add Logical 32-bit (Register-Memory)
3959void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
3960 DCHECK(is_int20(opnd.offset()));
3961 if (is_uint12(opnd.offset()))
3962 al_z(dst, opnd);
3963 else
3964 aly(dst, opnd);
3965}
3966
3967// Add Logical Pointer Size (Register-Memory)
3968void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
3969#if V8_TARGET_ARCH_S390X
3970 DCHECK(is_int20(opnd.offset()));
3971 alg(dst, opnd);
3972#else
3973 AddLogical(dst, opnd);
3974#endif
3975}
3976
3977//----------------------------------------------------------------------------
3978// Subtract Instructions
3979//----------------------------------------------------------------------------
3980
3981// Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
3982// src2)
3983void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
3984 Register src2) {
3985 if (!dst.is(src2) && !dst.is(src1)) {
3986 lr(dst, src1);
3987 slbr(dst, src2);
3988 } else if (!dst.is(src2)) {
3989 // dst == src1
3990 DCHECK(dst.is(src1));
3991 slbr(dst, src2);
3992 } else {
3993 // dst == src2
3994 DCHECK(dst.is(src2));
3995 lr(r0, dst);
3996 SubLogicalWithBorrow32(dst, src1, r0);
3997 }
3998}
3999
4000// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
4001void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
4002 if (!dst.is(src2) && !dst.is(src1)) {
4003 lr(dst, src1);
4004 slr(dst, src2);
4005 } else if (!dst.is(src2)) {
4006 // dst == src1
4007 DCHECK(dst.is(src1));
4008 slr(dst, src2);
4009 } else {
4010 // dst == src2
4011 DCHECK(dst.is(src2));
4012 lr(r0, dst);
4013 SubLogical32(dst, src1, r0);
4014 }
4015}
4016
4017// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
4018void MacroAssembler::Sub32(Register dst, const Operand& imm) {
4019 Add32(dst, Operand(-(imm.imm_)));
4020}
4021
4022// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
4023void MacroAssembler::SubP(Register dst, const Operand& imm) {
4024 AddP(dst, Operand(-(imm.imm_)));
4025}
4026
4027// Subtract 32-bit (Register dst = Register src - Immediate opnd)
4028void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
4029 Add32(dst, src, Operand(-(imm.imm_)));
4030}
4031
4032// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
4033void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
4034 AddP(dst, src, Operand(-(imm.imm_)));
4035}
4036
4037// Subtract 32-bit (Register dst = Register dst - Register src)
4038void MacroAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
4039
4040// Subtract Pointer Size (Register dst = Register dst - Register src)
4041void MacroAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
4042
4043// Subtract Pointer Size with src extension
4044// (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
4045// src is treated as a 32-bit signed integer, which is sign extended to
4046// 64-bit if necessary.
4047void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
4048#if V8_TARGET_ARCH_S390X
4049 sgfr(dst, src);
4050#else
4051 sr(dst, src);
4052#endif
4053}
4054
4055// Subtract 32-bit (Register = Register - Register)
4056void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
4057 // Use non-clobbering version if possible
4058 if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
4059 srk(dst, src1, src2);
4060 return;
4061 }
4062 if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1);
4063 // In scenario where we have dst = src - dst, we need to swap and negate
4064 if (!dst.is(src1) && dst.is(src2)) {
4065 sr(dst, src1); // dst = (dst - src)
4066 lcr(dst, dst); // dst = -dst
4067 } else {
4068 sr(dst, src2);
4069 }
4070}
4071
4072// Subtract Pointer Sized (Register = Register - Register)
4073void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
4074 // Use non-clobbering version if possible
4075 if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
4076 SubP_RRR(dst, src1, src2);
4077 return;
4078 }
4079 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
4080 // In scenario where we have dst = src - dst, we need to swap and negate
4081 if (!dst.is(src1) && dst.is(src2)) {
4082 SubP(dst, src1); // dst = (dst - src)
4083 LoadComplementRR(dst, dst); // dst = -dst
4084 } else {
4085 SubP(dst, src2);
4086 }
4087}
4088
4089// Subtract Pointer Size with src extension
4090// (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
4091// src is treated as a 32-bit signed integer, which is sign extended to
4092// 64-bit if necessary.
4093void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
4094 Register src2) {
4095#if V8_TARGET_ARCH_S390X
4096 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
4097
4098 // In scenario where we have dst = src - dst, we need to swap and negate
4099 if (!dst.is(src1) && dst.is(src2)) {
4100 lgfr(dst, dst); // Sign extend this operand first.
4101 SubP(dst, src1); // dst = (dst - src)
4102 LoadComplementRR(dst, dst); // dst = -dst
4103 } else {
4104 sgfr(dst, src2);
4105 }
4106#else
4107 SubP(dst, src1, src2);
4108#endif
4109}
4110
4111// Subtract 32-bit (Register-Memory)
4112void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
4113 DCHECK(is_int20(opnd.offset()));
4114 if (is_uint12(opnd.offset()))
4115 s(dst, opnd);
4116 else
4117 sy(dst, opnd);
4118}
4119
4120// Subtract Pointer Sized (Register - Memory)
4121void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
4122#if V8_TARGET_ARCH_S390X
4123 sg(dst, opnd);
4124#else
4125 Sub32(dst, opnd);
4126#endif
4127}
4128
4129void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
4130 sllg(src, src, Operand(32));
4131 ldgr(dst, src);
4132}
4133
4134void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
4135 lgdr(dst, src);
4136 srlg(dst, dst, Operand(32));
4137}
4138
4139void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
4140#if V8_TARGET_ARCH_S390X
4141 DCHECK(is_int20(opnd.offset()));
4142 sgf(dst, opnd);
4143#else
4144 Sub32(dst, opnd);
4145#endif
4146}
4147
4148//----------------------------------------------------------------------------
4149// Subtract Logical Instructions
4150//----------------------------------------------------------------------------
4151
4152// Subtract Logical 32-bit (Register - Memory)
4153void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
4154 DCHECK(is_int20(opnd.offset()));
4155 if (is_uint12(opnd.offset()))
4156 sl(dst, opnd);
4157 else
4158 sly(dst, opnd);
4159}
4160
4161// Subtract Logical Pointer Sized (Register - Memory)
4162void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
4163 DCHECK(is_int20(opnd.offset()));
4164#if V8_TARGET_ARCH_S390X
4165 slgf(dst, opnd);
4166#else
4167 SubLogical(dst, opnd);
4168#endif
4169}
4170
4171// Subtract Logical Pointer Size with src extension
4172// (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
4173// src is treated as a 32-bit signed integer, which is sign extended to
4174// 64-bit if necessary.
4175void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
4176 const MemOperand& opnd) {
4177#if V8_TARGET_ARCH_S390X
4178 DCHECK(is_int20(opnd.offset()));
4179 slgf(dst, opnd);
4180#else
4181 SubLogical(dst, opnd);
4182#endif
4183}
4184
4185//----------------------------------------------------------------------------
4186// Bitwise Operations
4187//----------------------------------------------------------------------------
4188
4189// AND 32-bit - dst = dst & src
4190void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
4191
4192// AND Pointer Size - dst = dst & src
4193void MacroAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
4194
4195// Non-clobbering AND 32-bit - dst = src1 & src1
4196void MacroAssembler::And(Register dst, Register src1, Register src2) {
4197 if (!dst.is(src1) && !dst.is(src2)) {
4198 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4199 // as XR is a smaller instruction
4200 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4201 nrk(dst, src1, src2);
4202 return;
4203 } else {
4204 lr(dst, src1);
4205 }
4206 } else if (dst.is(src2)) {
4207 src2 = src1;
4208 }
4209 And(dst, src2);
4210}
4211
4212// Non-clobbering AND pointer size - dst = src1 & src1
4213void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
4214 if (!dst.is(src1) && !dst.is(src2)) {
4215 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4216 // as XR is a smaller instruction
4217 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4218 AndP_RRR(dst, src1, src2);
4219 return;
4220 } else {
4221 LoadRR(dst, src1);
4222 }
4223 } else if (dst.is(src2)) {
4224 src2 = src1;
4225 }
4226 AndP(dst, src2);
4227}
4228
4229// AND 32-bit (Reg - Mem)
4230void MacroAssembler::And(Register dst, const MemOperand& opnd) {
4231 DCHECK(is_int20(opnd.offset()));
4232 if (is_uint12(opnd.offset()))
4233 n(dst, opnd);
4234 else
4235 ny(dst, opnd);
4236}
4237
4238// AND Pointer Size (Reg - Mem)
4239void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
4240 DCHECK(is_int20(opnd.offset()));
4241#if V8_TARGET_ARCH_S390X
4242 ng(dst, opnd);
4243#else
4244 And(dst, opnd);
4245#endif
4246}
4247
4248// AND 32-bit - dst = dst & imm
4249void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
4250
4251// AND Pointer Size - dst = dst & imm
4252void MacroAssembler::AndP(Register dst, const Operand& opnd) {
4253#if V8_TARGET_ARCH_S390X
4254 intptr_t value = opnd.imm_;
4255 if (value >> 32 != -1) {
4256 // this may not work b/c condition code won't be set correctly
4257 nihf(dst, Operand(value >> 32));
4258 }
4259 nilf(dst, Operand(value & 0xFFFFFFFF));
4260#else
4261 And(dst, opnd);
4262#endif
4263}
4264
4265// AND 32-bit - dst = src & imm
4266void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
4267 if (!dst.is(src)) lr(dst, src);
4268 nilf(dst, opnd);
4269}
4270
4271// AND Pointer Size - dst = src & imm
4272void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
4273 // Try to exploit RISBG first
4274 intptr_t value = opnd.imm_;
4275 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
4276 intptr_t shifted_value = value;
4277 int trailing_zeros = 0;
4278
4279 // We start checking how many trailing zeros are left at the end.
4280 while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
4281 trailing_zeros++;
4282 shifted_value >>= 1;
4283 }
4284
4285 // If temp (value with right-most set of zeros shifted out) is 1 less
4286 // than power of 2, we have consecutive bits of 1.
4287 // Special case: If shift_value is zero, we cannot use RISBG, as it requires
4288 // selection of at least 1 bit.
4289 if ((0 != shifted_value) && base::bits::IsPowerOfTwo64(shifted_value + 1)) {
4290 int startBit =
4291 base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
4292 int endBit = 63 - trailing_zeros;
4293 // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
4294 risbg(dst, src, Operand(startBit), Operand(endBit), Operand::Zero(),
4295 true);
4296 return;
4297 } else if (-1 == shifted_value) {
4298 // A Special case in which all top bits up to MSB are 1's. In this case,
4299 // we can set startBit to be 0.
4300 int endBit = 63 - trailing_zeros;
4301 risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
4302 return;
4303 }
4304 }
4305
4306 // If we are &'ing zero, we can just whack the dst register and skip copy
4307 if (!dst.is(src) && (0 != value)) LoadRR(dst, src);
4308 AndP(dst, opnd);
4309}
4310
4311// OR 32-bit - dst = dst & src
4312void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
4313
4314// OR Pointer Size - dst = dst & src
4315void MacroAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
4316
4317// Non-clobbering OR 32-bit - dst = src1 & src1
4318void MacroAssembler::Or(Register dst, Register src1, Register src2) {
4319 if (!dst.is(src1) && !dst.is(src2)) {
4320 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4321 // as XR is a smaller instruction
4322 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4323 ork(dst, src1, src2);
4324 return;
4325 } else {
4326 lr(dst, src1);
4327 }
4328 } else if (dst.is(src2)) {
4329 src2 = src1;
4330 }
4331 Or(dst, src2);
4332}
4333
4334// Non-clobbering OR pointer size - dst = src1 & src1
4335void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
4336 if (!dst.is(src1) && !dst.is(src2)) {
4337 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4338 // as XR is a smaller instruction
4339 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4340 OrP_RRR(dst, src1, src2);
4341 return;
4342 } else {
4343 LoadRR(dst, src1);
4344 }
4345 } else if (dst.is(src2)) {
4346 src2 = src1;
4347 }
4348 OrP(dst, src2);
4349}
4350
4351// OR 32-bit (Reg - Mem)
4352void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
4353 DCHECK(is_int20(opnd.offset()));
4354 if (is_uint12(opnd.offset()))
4355 o(dst, opnd);
4356 else
4357 oy(dst, opnd);
4358}
4359
4360// OR Pointer Size (Reg - Mem)
4361void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
4362 DCHECK(is_int20(opnd.offset()));
4363#if V8_TARGET_ARCH_S390X
4364 og(dst, opnd);
4365#else
4366 Or(dst, opnd);
4367#endif
4368}
4369
4370// OR 32-bit - dst = dst & imm
4371void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
4372
4373// OR Pointer Size - dst = dst & imm
4374void MacroAssembler::OrP(Register dst, const Operand& opnd) {
4375#if V8_TARGET_ARCH_S390X
4376 intptr_t value = opnd.imm_;
4377 if (value >> 32 != 0) {
4378 // this may not work b/c condition code won't be set correctly
4379 oihf(dst, Operand(value >> 32));
4380 }
4381 oilf(dst, Operand(value & 0xFFFFFFFF));
4382#else
4383 Or(dst, opnd);
4384#endif
4385}
4386
4387// OR 32-bit - dst = src & imm
4388void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
4389 if (!dst.is(src)) lr(dst, src);
4390 oilf(dst, opnd);
4391}
4392
4393// OR Pointer Size - dst = src & imm
4394void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
4395 if (!dst.is(src)) LoadRR(dst, src);
4396 OrP(dst, opnd);
4397}
4398
4399// XOR 32-bit - dst = dst & src
4400void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
4401
4402// XOR Pointer Size - dst = dst & src
4403void MacroAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
4404
4405// Non-clobbering XOR 32-bit - dst = src1 & src1
4406void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
4407 if (!dst.is(src1) && !dst.is(src2)) {
4408 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4409 // as XR is a smaller instruction
4410 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4411 xrk(dst, src1, src2);
4412 return;
4413 } else {
4414 lr(dst, src1);
4415 }
4416 } else if (dst.is(src2)) {
4417 src2 = src1;
4418 }
4419 Xor(dst, src2);
4420}
4421
4422// Non-clobbering XOR pointer size - dst = src1 & src1
4423void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
4424 if (!dst.is(src1) && !dst.is(src2)) {
4425 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4426 // as XR is a smaller instruction
4427 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4428 XorP_RRR(dst, src1, src2);
4429 return;
4430 } else {
4431 LoadRR(dst, src1);
4432 }
4433 } else if (dst.is(src2)) {
4434 src2 = src1;
4435 }
4436 XorP(dst, src2);
4437}
4438
4439// XOR 32-bit (Reg - Mem)
4440void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
4441 DCHECK(is_int20(opnd.offset()));
4442 if (is_uint12(opnd.offset()))
4443 x(dst, opnd);
4444 else
4445 xy(dst, opnd);
4446}
4447
4448// XOR Pointer Size (Reg - Mem)
4449void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
4450 DCHECK(is_int20(opnd.offset()));
4451#if V8_TARGET_ARCH_S390X
4452 xg(dst, opnd);
4453#else
4454 Xor(dst, opnd);
4455#endif
4456}
4457
4458// XOR 32-bit - dst = dst & imm
4459void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
4460
4461// XOR Pointer Size - dst = dst & imm
4462void MacroAssembler::XorP(Register dst, const Operand& opnd) {
4463#if V8_TARGET_ARCH_S390X
4464 intptr_t value = opnd.imm_;
4465 xihf(dst, Operand(value >> 32));
4466 xilf(dst, Operand(value & 0xFFFFFFFF));
4467#else
4468 Xor(dst, opnd);
4469#endif
4470}
4471
4472// XOR 32-bit - dst = src & imm
4473void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
4474 if (!dst.is(src)) lr(dst, src);
4475 xilf(dst, opnd);
4476}
4477
4478// XOR Pointer Size - dst = src & imm
4479void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
4480 if (!dst.is(src)) LoadRR(dst, src);
4481 XorP(dst, opnd);
4482}
4483
4484void MacroAssembler::NotP(Register dst) {
4485#if V8_TARGET_ARCH_S390X
4486 xihf(dst, Operand(0xFFFFFFFF));
4487 xilf(dst, Operand(0xFFFFFFFF));
4488#else
4489 XorP(dst, Operand(0xFFFFFFFF));
4490#endif
4491}
4492
4493// works the same as mov
4494void MacroAssembler::Load(Register dst, const Operand& opnd) {
4495 intptr_t value = opnd.immediate();
4496 if (is_int16(value)) {
4497#if V8_TARGET_ARCH_S390X
4498 lghi(dst, opnd);
4499#else
4500 lhi(dst, opnd);
4501#endif
4502 } else {
4503#if V8_TARGET_ARCH_S390X
4504 llilf(dst, opnd);
4505#else
4506 iilf(dst, opnd);
4507#endif
4508 }
4509}
4510
4511void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
4512 DCHECK(is_int20(opnd.offset()));
4513#if V8_TARGET_ARCH_S390X
4514 lgf(dst, opnd); // 64<-32
4515#else
4516 if (is_uint12(opnd.offset())) {
4517 l(dst, opnd);
4518 } else {
4519 ly(dst, opnd);
4520 }
4521#endif
4522}
4523
4524//-----------------------------------------------------------------------------
4525// Compare Helpers
4526//-----------------------------------------------------------------------------
4527
4528// Compare 32-bit Register vs Register
4529void MacroAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
4530
4531// Compare Pointer Sized Register vs Register
4532void MacroAssembler::CmpP(Register src1, Register src2) {
4533#if V8_TARGET_ARCH_S390X
4534 cgr(src1, src2);
4535#else
4536 Cmp32(src1, src2);
4537#endif
4538}
4539
4540// Compare 32-bit Register vs Immediate
4541// This helper will set up proper relocation entries if required.
4542void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
4543 if (opnd.rmode_ == kRelocInfo_NONEPTR) {
4544 intptr_t value = opnd.immediate();
4545 if (is_int16(value))
4546 chi(dst, opnd);
4547 else
4548 cfi(dst, opnd);
4549 } else {
4550 // Need to generate relocation record here
4551 RecordRelocInfo(opnd.rmode_, opnd.imm_);
4552 cfi(dst, opnd);
4553 }
4554}
4555
4556// Compare Pointer Sized Register vs Immediate
4557// This helper will set up proper relocation entries if required.
4558void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
4559#if V8_TARGET_ARCH_S390X
4560 if (opnd.rmode_ == kRelocInfo_NONEPTR) {
4561 cgfi(dst, opnd);
4562 } else {
4563 mov(r0, opnd); // Need to generate 64-bit relocation
4564 cgr(dst, r0);
4565 }
4566#else
4567 Cmp32(dst, opnd);
4568#endif
4569}
4570
4571// Compare 32-bit Register vs Memory
4572void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
4573 // make sure offset is within 20 bit range
4574 DCHECK(is_int20(opnd.offset()));
4575 if (is_uint12(opnd.offset()))
4576 c(dst, opnd);
4577 else
4578 cy(dst, opnd);
4579}
4580
4581// Compare Pointer Size Register vs Memory
4582void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
4583 // make sure offset is within 20 bit range
4584 DCHECK(is_int20(opnd.offset()));
4585#if V8_TARGET_ARCH_S390X
4586 cg(dst, opnd);
4587#else
4588 Cmp32(dst, opnd);
4589#endif
4590}
4591
4592//-----------------------------------------------------------------------------
4593// Compare Logical Helpers
4594//-----------------------------------------------------------------------------
4595
4596// Compare Logical 32-bit Register vs Register
4597void MacroAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
4598
4599// Compare Logical Pointer Sized Register vs Register
4600void MacroAssembler::CmpLogicalP(Register dst, Register src) {
4601#ifdef V8_TARGET_ARCH_S390X
4602 clgr(dst, src);
4603#else
4604 CmpLogical32(dst, src);
4605#endif
4606}
4607
4608// Compare Logical 32-bit Register vs Immediate
4609void MacroAssembler::CmpLogical32(Register dst, const Operand& opnd) {
4610 clfi(dst, opnd);
4611}
4612
4613// Compare Logical Pointer Sized Register vs Immediate
4614void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
4615#if V8_TARGET_ARCH_S390X
4616 DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0);
4617 clgfi(dst, opnd);
4618#else
4619 CmpLogical32(dst, opnd);
4620#endif
4621}
4622
4623// Compare Logical 32-bit Register vs Memory
4624void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
4625 // make sure offset is within 20 bit range
4626 DCHECK(is_int20(opnd.offset()));
4627 if (is_uint12(opnd.offset()))
4628 cl(dst, opnd);
4629 else
4630 cly(dst, opnd);
4631}
4632
4633// Compare Logical Pointer Sized Register vs Memory
4634void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
4635 // make sure offset is within 20 bit range
4636 DCHECK(is_int20(opnd.offset()));
4637#if V8_TARGET_ARCH_S390X
4638 clg(dst, opnd);
4639#else
4640 CmpLogical32(dst, opnd);
4641#endif
4642}
4643
4644// Compare Logical Byte (Mem - Imm)
4645void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
4646 DCHECK(is_uint8(imm.immediate()));
4647 if (is_uint12(mem.offset()))
4648 cli(mem, imm);
4649 else
4650 cliy(mem, imm);
4651}
4652
4653void MacroAssembler::Branch(Condition c, const Operand& opnd) {
4654 intptr_t value = opnd.immediate();
4655 if (is_int16(value))
4656 brc(c, opnd);
4657 else
4658 brcl(c, opnd);
4659}
4660
4661// Branch On Count. Decrement R1, and branch if R1 != 0.
4662void MacroAssembler::BranchOnCount(Register r1, Label* l) {
4663 int32_t offset = branch_offset(l);
4664 positions_recorder()->WriteRecordedPositions();
4665 if (is_int16(offset)) {
4666#if V8_TARGET_ARCH_S390X
4667 brctg(r1, Operand(offset));
4668#else
4669 brct(r1, Operand(offset));
4670#endif
4671 } else {
4672 AddP(r1, Operand(-1));
4673 Branch(ne, Operand(offset));
4674 }
4675}
4676
4677void MacroAssembler::LoadIntLiteral(Register dst, int value) {
4678 Load(dst, Operand(value));
4679}
4680
4681void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
4682 intptr_t value = reinterpret_cast<intptr_t>(smi);
4683#if V8_TARGET_ARCH_S390X
4684 DCHECK((value & 0xffffffff) == 0);
4685 // The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
4686 llihf(dst, Operand(value >> 32));
4687#else
4688 llilf(dst, Operand(value));
4689#endif
4690}
4691
4692void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
4693 Register scratch) {
4694 uint32_t hi_32 = value >> 32;
4695 uint32_t lo_32 = static_cast<uint32_t>(value);
4696
4697 // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
4698 iihf(scratch, Operand(hi_32));
4699 iilf(scratch, Operand(lo_32));
4700 ldgr(result, scratch);
4701}
4702
4703void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
4704 Register scratch) {
4705 uint64_t int_val = bit_cast<uint64_t, double>(value);
4706 LoadDoubleLiteral(result, int_val, scratch);
4707}
4708
4709void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
4710 Register scratch) {
4711 uint32_t hi_32 = bit_cast<uint32_t>(value);
4712 uint32_t lo_32 = 0;
4713
4714 // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
4715 iihf(scratch, Operand(hi_32));
4716 iilf(scratch, Operand(lo_32));
4717 ldgr(result, scratch);
4718}
4719
4720void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
4721#if V8_TARGET_ARCH_S390X
4722 LoadSmiLiteral(scratch, smi);
4723 cgr(src1, scratch);
4724#else
4725 // CFI takes 32-bit immediate.
4726 cfi(src1, Operand(smi));
4727#endif
4728}
4729
4730void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
4731 Register scratch) {
4732#if V8_TARGET_ARCH_S390X
4733 LoadSmiLiteral(scratch, smi);
4734 clgr(src1, scratch);
4735#else
4736 // CLFI takes 32-bit immediate
4737 clfi(src1, Operand(smi));
4738#endif
4739}
4740
4741void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
4742 Register scratch) {
4743#if V8_TARGET_ARCH_S390X
4744 LoadSmiLiteral(scratch, smi);
4745 AddP(dst, src, scratch);
4746#else
4747 AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
4748#endif
4749}
4750
4751void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
4752 Register scratch) {
4753#if V8_TARGET_ARCH_S390X
4754 LoadSmiLiteral(scratch, smi);
4755 SubP(dst, src, scratch);
4756#else
4757 AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
4758#endif
4759}
4760
4761void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
4762 if (!dst.is(src)) LoadRR(dst, src);
4763#if V8_TARGET_ARCH_S390X
4764 DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
4765 int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
4766 nihf(dst, Operand(value));
4767#else
4768 nilf(dst, Operand(reinterpret_cast<int>(smi)));
4769#endif
4770}
4771
4772// Load a "pointer" sized value from the memory location
4773void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4774 Register scratch) {
4775 int offset = mem.offset();
4776
4777 if (!scratch.is(no_reg) && !is_int20(offset)) {
4778 /* cannot use d-form */
4779 LoadIntLiteral(scratch, offset);
4780#if V8_TARGET_ARCH_S390X
4781 lg(dst, MemOperand(mem.rb(), scratch));
4782#else
4783 l(dst, MemOperand(mem.rb(), scratch));
4784#endif
4785 } else {
4786#if V8_TARGET_ARCH_S390X
4787 lg(dst, mem);
4788#else
4789 if (is_uint12(offset)) {
4790 l(dst, mem);
4791 } else {
4792 ly(dst, mem);
4793 }
4794#endif
4795 }
4796}
4797
4798// Store a "pointer" sized value to the memory location
4799void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4800 Register scratch) {
4801 if (!is_int20(mem.offset())) {
4802 DCHECK(!scratch.is(no_reg));
4803 DCHECK(!scratch.is(r0));
4804 LoadIntLiteral(scratch, mem.offset());
4805#if V8_TARGET_ARCH_S390X
4806 stg(src, MemOperand(mem.rb(), scratch));
4807#else
4808 st(src, MemOperand(mem.rb(), scratch));
4809#endif
4810 } else {
4811#if V8_TARGET_ARCH_S390X
4812 stg(src, mem);
4813#else
4814 // StoreW will try to generate ST if offset fits, otherwise
4815 // it'll generate STY.
4816 StoreW(src, mem);
4817#endif
4818 }
4819}
4820
4821// Store a "pointer" sized constant to the memory location
4822void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
4823 Register scratch) {
4824 // Relocations not supported
4825 DCHECK(opnd.rmode_ == kRelocInfo_NONEPTR);
4826
4827 // Try to use MVGHI/MVHI
4828 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
4829 mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) {
4830#if V8_TARGET_ARCH_S390X
4831 mvghi(mem, opnd);
4832#else
4833 mvhi(mem, opnd);
4834#endif
4835 } else {
4836 LoadImmP(scratch, opnd);
4837 StoreP(scratch, mem);
4838 }
4839}
4840
4841void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
4842 const MemOperand& mem) {
4843#if V8_TARGET_ARCH_S390X
4844 DCHECK(is_int20(mem.offset()));
4845 lmg(dst1, dst2, mem);
4846#else
4847 if (is_uint12(mem.offset())) {
4848 lm(dst1, dst2, mem);
4849 } else {
4850 DCHECK(is_int20(mem.offset()));
4851 lmy(dst1, dst2, mem);
4852 }
4853#endif
4854}
4855
4856void MacroAssembler::StoreMultipleP(Register src1, Register src2,
4857 const MemOperand& mem) {
4858#if V8_TARGET_ARCH_S390X
4859 DCHECK(is_int20(mem.offset()));
4860 stmg(src1, src2, mem);
4861#else
4862 if (is_uint12(mem.offset())) {
4863 stm(src1, src2, mem);
4864 } else {
4865 DCHECK(is_int20(mem.offset()));
4866 stmy(src1, src2, mem);
4867 }
4868#endif
4869}
4870
4871void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
4872 const MemOperand& mem) {
4873 if (is_uint12(mem.offset())) {
4874 lm(dst1, dst2, mem);
4875 } else {
4876 DCHECK(is_int20(mem.offset()));
4877 lmy(dst1, dst2, mem);
4878 }
4879}
4880
4881void MacroAssembler::StoreMultipleW(Register src1, Register src2,
4882 const MemOperand& mem) {
4883 if (is_uint12(mem.offset())) {
4884 stm(src1, src2, mem);
4885 } else {
4886 DCHECK(is_int20(mem.offset()));
4887 stmy(src1, src2, mem);
4888 }
4889}
4890
4891// Load 32-bits and sign extend if necessary.
4892void MacroAssembler::LoadW(Register dst, Register src) {
4893#if V8_TARGET_ARCH_S390X
4894 lgfr(dst, src);
4895#else
4896 if (!dst.is(src)) lr(dst, src);
4897#endif
4898}
4899
4900// Load 32-bits and sign extend if necessary.
4901void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
4902 Register scratch) {
4903 int offset = mem.offset();
4904
4905 if (!is_int20(offset)) {
4906 DCHECK(!scratch.is(no_reg));
4907 LoadIntLiteral(scratch, offset);
4908#if V8_TARGET_ARCH_S390X
4909 lgf(dst, MemOperand(mem.rb(), scratch));
4910#else
4911 l(dst, MemOperand(mem.rb(), scratch));
4912#endif
4913 } else {
4914#if V8_TARGET_ARCH_S390X
4915 lgf(dst, mem);
4916#else
4917 if (is_uint12(offset)) {
4918 l(dst, mem);
4919 } else {
4920 ly(dst, mem);
4921 }
4922#endif
4923 }
4924}
4925
4926// Load 32-bits and zero extend if necessary.
4927void MacroAssembler::LoadlW(Register dst, Register src) {
4928#if V8_TARGET_ARCH_S390X
4929 llgfr(dst, src);
4930#else
4931 if (!dst.is(src)) lr(dst, src);
4932#endif
4933}
4934
4935// Variable length depending on whether offset fits into immediate field
4936// MemOperand of RX or RXY format
4937void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
4938 Register scratch) {
4939 Register base = mem.rb();
4940 int offset = mem.offset();
4941
4942#if V8_TARGET_ARCH_S390X
4943 if (is_int20(offset)) {
4944 llgf(dst, mem);
4945 } else if (!scratch.is(no_reg)) {
4946 // Materialize offset into scratch register.
4947 LoadIntLiteral(scratch, offset);
4948 llgf(dst, MemOperand(base, scratch));
4949 } else {
4950 DCHECK(false);
4951 }
4952#else
4953 bool use_RXform = false;
4954 bool use_RXYform = false;
4955 if (is_uint12(offset)) {
4956 // RX-format supports unsigned 12-bits offset.
4957 use_RXform = true;
4958 } else if (is_int20(offset)) {
4959 // RXY-format supports signed 20-bits offset.
4960 use_RXYform = true;
4961 } else if (!scratch.is(no_reg)) {
4962 // Materialize offset into scratch register.
4963 LoadIntLiteral(scratch, offset);
4964 } else {
4965 DCHECK(false);
4966 }
4967
4968 if (use_RXform) {
4969 l(dst, mem);
4970 } else if (use_RXYform) {
4971 ly(dst, mem);
4972 } else {
4973 ly(dst, MemOperand(base, scratch));
4974 }
4975#endif
4976}
4977
4978void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
4979#if V8_TARGET_ARCH_S390X
4980 lgb(dst, mem);
4981#else
4982 lb(dst, mem);
4983#endif
4984}
4985
4986void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
4987#if V8_TARGET_ARCH_S390X
4988 llgc(dst, mem);
4989#else
4990 llc(dst, mem);
4991#endif
4992}
4993
4994// Load And Test (Reg <- Reg)
4995void MacroAssembler::LoadAndTest32(Register dst, Register src) {
4996 ltr(dst, src);
4997}
4998
4999// Load And Test
5000// (Register dst(ptr) = Register src (32 | 32->64))
5001// src is treated as a 32-bit signed integer, which is sign extended to
5002// 64-bit if necessary.
5003void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
5004#if V8_TARGET_ARCH_S390X
5005 ltgfr(dst, src);
5006#else
5007 ltr(dst, src);
5008#endif
5009}
5010
5011// Load And Test Pointer Sized (Reg <- Reg)
5012void MacroAssembler::LoadAndTestP(Register dst, Register src) {
5013#if V8_TARGET_ARCH_S390X
5014 ltgr(dst, src);
5015#else
5016 ltr(dst, src);
5017#endif
5018}
5019
5020// Load And Test 32-bit (Reg <- Mem)
5021void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
5022 lt_z(dst, mem);
5023}
5024
5025// Load And Test Pointer Sized (Reg <- Mem)
5026void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
5027#if V8_TARGET_ARCH_S390X
5028 ltg(dst, mem);
5029#else
5030 lt_z(dst, mem);
5031#endif
5032}
5033
5034// Load Double Precision (64-bit) Floating Point number from memory
5035void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
5036 // for 32bit and 64bit we all use 64bit floating point regs
5037 if (is_uint12(mem.offset())) {
5038 ld(dst, mem);
5039 } else {
5040 ldy(dst, mem);
5041 }
5042}
5043
5044// Load Single Precision (32-bit) Floating Point number from memory
5045void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
5046 if (is_uint12(mem.offset())) {
5047 le_z(dst, mem);
5048 } else {
5049 DCHECK(is_int20(mem.offset()));
5050 ley(dst, mem);
5051 }
5052}
5053
5054// Load Single Precision (32-bit) Floating Point number from memory,
5055// and convert to Double Precision (64-bit)
5056void MacroAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
5057 const MemOperand& mem) {
5058 LoadFloat32(dst, mem);
5059 ldebr(dst, dst);
5060}
5061
5062// Store Double Precision (64-bit) Floating Point number to memory
5063void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
5064 if (is_uint12(mem.offset())) {
5065 std(dst, mem);
5066 } else {
5067 stdy(dst, mem);
5068 }
5069}
5070
5071// Store Single Precision (32-bit) Floating Point number to memory
5072void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
5073 if (is_uint12(mem.offset())) {
5074 ste(src, mem);
5075 } else {
5076 stey(src, mem);
5077 }
5078}
5079
5080// Convert Double precision (64-bit) to Single Precision (32-bit)
5081// and store resulting Float32 to memory
5082void MacroAssembler::StoreDoubleAsFloat32(DoubleRegister src,
5083 const MemOperand& mem,
5084 DoubleRegister scratch) {
5085 ledbr(scratch, src);
5086 StoreFloat32(scratch, mem);
5087}
5088
5089// Variable length depending on whether offset fits into immediate field
5090// MemOperand of RX or RXY format
5091void MacroAssembler::StoreW(Register src, const MemOperand& mem,
5092 Register scratch) {
5093 Register base = mem.rb();
5094 int offset = mem.offset();
5095
5096 bool use_RXform = false;
5097 bool use_RXYform = false;
5098
5099 if (is_uint12(offset)) {
5100 // RX-format supports unsigned 12-bits offset.
5101 use_RXform = true;
5102 } else if (is_int20(offset)) {
5103 // RXY-format supports signed 20-bits offset.
5104 use_RXYform = true;
5105 } else if (!scratch.is(no_reg)) {
5106 // Materialize offset into scratch register.
5107 LoadIntLiteral(scratch, offset);
5108 } else {
5109 // scratch is no_reg
5110 DCHECK(false);
5111 }
5112
5113 if (use_RXform) {
5114 st(src, mem);
5115 } else if (use_RXYform) {
5116 sty(src, mem);
5117 } else {
5118 StoreW(src, MemOperand(base, scratch));
5119 }
5120}
5121
5122// Loads 16-bits half-word value from memory and sign extends to pointer
5123// sized register
5124void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
5125 Register scratch) {
5126 Register base = mem.rb();
5127 int offset = mem.offset();
5128
5129 if (!is_int20(offset)) {
5130 DCHECK(!scratch.is(no_reg));
5131 LoadIntLiteral(scratch, offset);
5132#if V8_TARGET_ARCH_S390X
5133 lgh(dst, MemOperand(base, scratch));
5134#else
5135 lh(dst, MemOperand(base, scratch));
5136#endif
5137 } else {
5138#if V8_TARGET_ARCH_S390X
5139 lgh(dst, mem);
5140#else
5141 if (is_uint12(offset)) {
5142 lh(dst, mem);
5143 } else {
5144 lhy(dst, mem);
5145 }
5146#endif
5147 }
5148}
5149
5150// Variable length depending on whether offset fits into immediate field
5151// MemOperand current only supports d-form
5152void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
5153 Register scratch) {
5154 Register base = mem.rb();
5155 int offset = mem.offset();
5156
5157 if (is_uint12(offset)) {
5158 sth(src, mem);
5159 } else if (is_int20(offset)) {
5160 sthy(src, mem);
5161 } else {
5162 DCHECK(!scratch.is(no_reg));
5163 LoadIntLiteral(scratch, offset);
5164 sth(src, MemOperand(base, scratch));
5165 }
5166}
5167
5168// Variable length depending on whether offset fits into immediate field
5169// MemOperand current only supports d-form
5170void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
5171 Register scratch) {
5172 Register base = mem.rb();
5173 int offset = mem.offset();
5174
5175 if (is_uint12(offset)) {
5176 stc(src, mem);
5177 } else if (is_int20(offset)) {
5178 stcy(src, mem);
5179 } else {
5180 DCHECK(!scratch.is(no_reg));
5181 LoadIntLiteral(scratch, offset);
5182 stc(src, MemOperand(base, scratch));
5183 }
5184}
5185
5186// Shift left logical for 32-bit integer types.
5187void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
5188 if (dst.is(src)) {
5189 sll(dst, val);
5190 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5191 sllk(dst, src, val);
5192 } else {
5193 lr(dst, src);
5194 sll(dst, val);
5195 }
5196}
5197
5198// Shift left logical for 32-bit integer types.
5199void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
5200 if (dst.is(src)) {
5201 sll(dst, val);
5202 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5203 sllk(dst, src, val);
5204 } else {
5205 DCHECK(!dst.is(val)); // The lr/sll path clobbers val.
5206 lr(dst, src);
5207 sll(dst, val);
5208 }
5209}
5210
5211// Shift right logical for 32-bit integer types.
5212void MacroAssembler::ShiftRight(Register dst, Register src,
5213 const Operand& val) {
5214 if (dst.is(src)) {
5215 srl(dst, val);
5216 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5217 srlk(dst, src, val);
5218 } else {
5219 lr(dst, src);
5220 srl(dst, val);
5221 }
5222}
5223
5224// Shift right logical for 32-bit integer types.
5225void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
5226 DCHECK(!dst.is(val)); // The lr/srl path clobbers val.
5227 if (dst.is(src)) {
5228 srl(dst, val);
5229 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5230 srlk(dst, src, val);
5231 } else {
5232 lr(dst, src);
5233 srl(dst, val);
5234 }
5235}
5236
5237// Shift left arithmetic for 32-bit integer types.
5238void MacroAssembler::ShiftLeftArith(Register dst, Register src,
5239 const Operand& val) {
5240 if (dst.is(src)) {
5241 sla(dst, val);
5242 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5243 slak(dst, src, val);
5244 } else {
5245 lr(dst, src);
5246 sla(dst, val);
5247 }
5248}
5249
5250// Shift left arithmetic for 32-bit integer types.
5251void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
5252 DCHECK(!dst.is(val)); // The lr/sla path clobbers val.
5253 if (dst.is(src)) {
5254 sla(dst, val);
5255 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5256 slak(dst, src, val);
5257 } else {
5258 lr(dst, src);
5259 sla(dst, val);
5260 }
5261}
5262
5263// Shift right arithmetic for 32-bit integer types.
5264void MacroAssembler::ShiftRightArith(Register dst, Register src,
5265 const Operand& val) {
5266 if (dst.is(src)) {
5267 sra(dst, val);
5268 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5269 srak(dst, src, val);
5270 } else {
5271 lr(dst, src);
5272 sra(dst, val);
5273 }
5274}
5275
5276// Shift right arithmetic for 32-bit integer types.
5277void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
5278 DCHECK(!dst.is(val)); // The lr/sra path clobbers val.
5279 if (dst.is(src)) {
5280 sra(dst, val);
5281 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5282 srak(dst, src, val);
5283 } else {
5284 lr(dst, src);
5285 sra(dst, val);
5286 }
5287}
5288
5289// Clear right most # of bits
5290void MacroAssembler::ClearRightImm(Register dst, Register src,
5291 const Operand& val) {
5292 int numBitsToClear = val.imm_ % (kPointerSize * 8);
5293
5294 // Try to use RISBG if possible
5295 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
5296 int endBit = 63 - numBitsToClear;
5297 risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
5298 return;
5299 }
5300
5301 uint64_t hexMask = ~((1L << numBitsToClear) - 1);
5302
5303 // S390 AND instr clobbers source. Make a copy if necessary
5304 if (!dst.is(src)) LoadRR(dst, src);
5305
5306 if (numBitsToClear <= 16) {
5307 nill(dst, Operand(static_cast<uint16_t>(hexMask)));
5308 } else if (numBitsToClear <= 32) {
5309 nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
5310 } else if (numBitsToClear <= 64) {
5311 nilf(dst, Operand(static_cast<intptr_t>(0)));
5312 nihf(dst, Operand(hexMask >> 32));
5313 }
5314}
5315
5316void MacroAssembler::Popcnt32(Register dst, Register src) {
5317 DCHECK(!src.is(r0));
5318 DCHECK(!dst.is(r0));
5319
5320 popcnt(dst, src);
5321 ShiftRight(r0, dst, Operand(16));
5322 ar(dst, r0);
5323 ShiftRight(r0, dst, Operand(8));
5324 ar(dst, r0);
5325 lbr(dst, dst);
5326}
5327
5328#ifdef V8_TARGET_ARCH_S390X
5329void MacroAssembler::Popcnt64(Register dst, Register src) {
5330 DCHECK(!src.is(r0));
5331 DCHECK(!dst.is(r0));
5332
5333 popcnt(dst, src);
5334 ShiftRightP(r0, dst, Operand(32));
5335 AddP(dst, r0);
5336 ShiftRightP(r0, dst, Operand(16));
5337 AddP(dst, r0);
5338 ShiftRightP(r0, dst, Operand(8));
5339 AddP(dst, r0);
5340 lbr(dst, dst);
5341}
5342#endif
5343
5344#ifdef DEBUG
5345bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
5346 Register reg5, Register reg6, Register reg7, Register reg8,
5347 Register reg9, Register reg10) {
5348 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
5349 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5350 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
5351 reg10.is_valid();
5352
5353 RegList regs = 0;
5354 if (reg1.is_valid()) regs |= reg1.bit();
5355 if (reg2.is_valid()) regs |= reg2.bit();
5356 if (reg3.is_valid()) regs |= reg3.bit();
5357 if (reg4.is_valid()) regs |= reg4.bit();
5358 if (reg5.is_valid()) regs |= reg5.bit();
5359 if (reg6.is_valid()) regs |= reg6.bit();
5360 if (reg7.is_valid()) regs |= reg7.bit();
5361 if (reg8.is_valid()) regs |= reg8.bit();
5362 if (reg9.is_valid()) regs |= reg9.bit();
5363 if (reg10.is_valid()) regs |= reg10.bit();
5364 int n_of_non_aliasing_regs = NumRegs(regs);
5365
5366 return n_of_valid_regs != n_of_non_aliasing_regs;
5367}
5368#endif
5369
5370CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size,
5371 FlushICache flush_cache)
5372 : address_(address),
5373 size_(size),
5374 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
5375 flush_cache_(flush_cache) {
5376 // Create a new macro assembler pointing to the address of the code to patch.
5377 // The size is adjusted with kGap on order for the assembler to generate size
5378 // bytes of instructions without failing with buffer size constraints.
5379 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5380}
5381
5382CodePatcher::~CodePatcher() {
5383 // Indicate that code has changed.
5384 if (flush_cache_ == FLUSH) {
5385 Assembler::FlushICache(masm_.isolate(), address_, size_);
5386 }
5387
5388 // Check that the code was patched as expected.
5389 DCHECK(masm_.pc_ == address_ + size_);
5390 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5391}
5392
5393void MacroAssembler::TruncatingDiv(Register result, Register dividend,
5394 int32_t divisor) {
5395 DCHECK(!dividend.is(result));
5396 DCHECK(!dividend.is(r0));
5397 DCHECK(!result.is(r0));
5398 base::MagicNumbersForDivision<uint32_t> mag =
5399 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5400#ifdef V8_TARGET_ARCH_S390X
5401 LoadRR(result, dividend);
5402 MulP(result, Operand(mag.multiplier));
5403 ShiftRightArithP(result, result, Operand(32));
5404
5405#else
5406 lay(sp, MemOperand(sp, -kPointerSize));
5407 StoreP(r1, MemOperand(sp));
5408
5409 mov(r1, Operand(mag.multiplier));
5410 mr_z(r0, dividend); // r0:r1 = r1 * dividend
5411
5412 LoadRR(result, r0);
5413 LoadP(r1, MemOperand(sp));
5414 la(sp, MemOperand(sp, kPointerSize));
5415#endif
5416 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5417 if (divisor > 0 && neg) {
5418 AddP(result, dividend);
5419 }
5420 if (divisor < 0 && !neg && mag.multiplier > 0) {
5421 SubP(result, dividend);
5422 }
5423 if (mag.shift > 0) ShiftRightArith(result, result, Operand(mag.shift));
5424 ExtractBit(r0, dividend, 31);
5425 AddP(result, r0);
5426}
5427
5428} // namespace internal
5429} // namespace v8
5430
5431#endif // V8_TARGET_ARCH_S390