blob: fb83fe9b76872c1b7ace3fb33bc41896173536b0 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <limits.h> // For LONG_MIN, LONG_MAX.
6
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007#if V8_TARGET_ARCH_MIPS64
8
9#include "src/base/division-by-constant.h"
10#include "src/bootstrapper.h"
11#include "src/codegen.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000012#include "src/debug/debug.h"
13#include "src/mips64/macro-assembler-mips64.h"
14#include "src/register-configuration.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040015#include "src/runtime/runtime.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016
17namespace v8 {
18namespace internal {
19
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
21 CodeObjectRequired create_code_object)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022 : Assembler(arg_isolate, buffer, size),
23 generating_stub_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040024 has_frame_(false),
25 has_double_zero_reg_set_(false) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000026 if (create_code_object == CodeObjectRequired::kYes) {
27 code_object_ =
28 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029 }
30}
31
32
33void MacroAssembler::Load(Register dst,
34 const MemOperand& src,
35 Representation r) {
36 DCHECK(!r.IsDouble());
37 if (r.IsInteger8()) {
38 lb(dst, src);
39 } else if (r.IsUInteger8()) {
40 lbu(dst, src);
41 } else if (r.IsInteger16()) {
42 lh(dst, src);
43 } else if (r.IsUInteger16()) {
44 lhu(dst, src);
45 } else if (r.IsInteger32()) {
46 lw(dst, src);
47 } else {
48 ld(dst, src);
49 }
50}
51
52
53void MacroAssembler::Store(Register src,
54 const MemOperand& dst,
55 Representation r) {
56 DCHECK(!r.IsDouble());
57 if (r.IsInteger8() || r.IsUInteger8()) {
58 sb(src, dst);
59 } else if (r.IsInteger16() || r.IsUInteger16()) {
60 sh(src, dst);
61 } else if (r.IsInteger32()) {
62 sw(src, dst);
63 } else {
64 if (r.IsHeapObject()) {
65 AssertNotSmi(src);
66 } else if (r.IsSmi()) {
67 AssertSmi(src);
68 }
69 sd(src, dst);
70 }
71}
72
73
74void MacroAssembler::LoadRoot(Register destination,
75 Heap::RootListIndex index) {
76 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
77}
78
79
80void MacroAssembler::LoadRoot(Register destination,
81 Heap::RootListIndex index,
82 Condition cond,
83 Register src1, const Operand& src2) {
84 Branch(2, NegateCondition(cond), src1, src2);
85 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
86}
87
88
89void MacroAssembler::StoreRoot(Register source,
90 Heap::RootListIndex index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000091 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 sd(source, MemOperand(s6, index << kPointerSizeLog2));
93}
94
95
96void MacroAssembler::StoreRoot(Register source,
97 Heap::RootListIndex index,
98 Condition cond,
99 Register src1, const Operand& src2) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000100 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000101 Branch(2, NegateCondition(cond), src1, src2);
102 sd(source, MemOperand(s6, index << kPointerSizeLog2));
103}
104
Ben Murdochda12d292016-06-02 14:46:10 +0100105void MacroAssembler::PushCommonFrame(Register marker_reg) {
106 if (marker_reg.is_valid()) {
107 Push(ra, fp, marker_reg);
108 Daddu(fp, sp, Operand(kPointerSize));
109 } else {
110 Push(ra, fp);
111 mov(fp, sp);
112 }
113}
114
115void MacroAssembler::PopCommonFrame(Register marker_reg) {
116 if (marker_reg.is_valid()) {
117 Pop(ra, fp, marker_reg);
118 } else {
119 Pop(ra, fp);
120 }
121}
122
123void MacroAssembler::PushStandardFrame(Register function_reg) {
124 int offset = -StandardFrameConstants::kContextOffset;
125 if (function_reg.is_valid()) {
126 Push(ra, fp, cp, function_reg);
127 offset += kPointerSize;
128 } else {
129 Push(ra, fp, cp);
130 }
131 Daddu(fp, sp, Operand(offset));
132}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000133
134// Push and pop all registers that can hold pointers.
135void MacroAssembler::PushSafepointRegisters() {
136 // Safepoints expect a block of kNumSafepointRegisters values on the
137 // stack, so adjust the stack for unsaved registers.
138 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
139 DCHECK(num_unsaved >= 0);
140 if (num_unsaved > 0) {
141 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
142 }
143 MultiPush(kSafepointSavedRegisters);
144}
145
146
147void MacroAssembler::PopSafepointRegisters() {
148 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
149 MultiPop(kSafepointSavedRegisters);
150 if (num_unsaved > 0) {
151 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
152 }
153}
154
155
156void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
157 sd(src, SafepointRegisterSlot(dst));
158}
159
160
161void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
162 ld(dst, SafepointRegisterSlot(src));
163}
164
165
166int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
167 // The registers are pushed starting with the highest encoding,
168 // which means that lowest encodings are closest to the stack pointer.
169 return kSafepointRegisterStackIndexMap[reg_code];
170}
171
172
173MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
174 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
175}
176
177
178MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
179 UNIMPLEMENTED_MIPS();
180 // General purpose registers are pushed last on the stack.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000181 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000182 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
183 return MemOperand(sp, doubles_size + register_offset);
184}
185
186
187void MacroAssembler::InNewSpace(Register object,
188 Register scratch,
189 Condition cc,
190 Label* branch) {
191 DCHECK(cc == eq || cc == ne);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100192 const int mask =
193 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
194 CheckPageFlag(object, scratch, mask, cc, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000195}
196
197
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000198// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
199// The register 'object' contains a heap object pointer. The heap object
200// tag is shifted away.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000201void MacroAssembler::RecordWriteField(
202 Register object,
203 int offset,
204 Register value,
205 Register dst,
206 RAStatus ra_status,
207 SaveFPRegsMode save_fp,
208 RememberedSetAction remembered_set_action,
209 SmiCheck smi_check,
210 PointersToHereCheck pointers_to_here_check_for_value) {
211 DCHECK(!AreAliased(value, dst, t8, object));
212 // First, check if a write barrier is even needed. The tests below
213 // catch stores of Smis.
214 Label done;
215
216 // Skip barrier if writing a smi.
217 if (smi_check == INLINE_SMI_CHECK) {
218 JumpIfSmi(value, &done);
219 }
220
221 // Although the object register is tagged, the offset is relative to the start
222 // of the object, so so offset must be a multiple of kPointerSize.
223 DCHECK(IsAligned(offset, kPointerSize));
224
225 Daddu(dst, object, Operand(offset - kHeapObjectTag));
226 if (emit_debug_code()) {
227 Label ok;
228 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
229 Branch(&ok, eq, t8, Operand(zero_reg));
230 stop("Unaligned cell in write barrier");
231 bind(&ok);
232 }
233
234 RecordWrite(object,
235 dst,
236 value,
237 ra_status,
238 save_fp,
239 remembered_set_action,
240 OMIT_SMI_CHECK,
241 pointers_to_here_check_for_value);
242
243 bind(&done);
244
245 // Clobber clobbered input registers when running with the debug-code flag
246 // turned on to provoke errors.
247 if (emit_debug_code()) {
248 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
249 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
250 }
251}
252
253
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000254// Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000255void MacroAssembler::RecordWriteForMap(Register object,
256 Register map,
257 Register dst,
258 RAStatus ra_status,
259 SaveFPRegsMode fp_mode) {
260 if (emit_debug_code()) {
261 DCHECK(!dst.is(at));
262 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
263 Check(eq,
264 kWrongAddressOrValuePassedToRecordWrite,
265 dst,
266 Operand(isolate()->factory()->meta_map()));
267 }
268
269 if (!FLAG_incremental_marking) {
270 return;
271 }
272
273 if (emit_debug_code()) {
274 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
275 Check(eq,
276 kWrongAddressOrValuePassedToRecordWrite,
277 map,
278 Operand(at));
279 }
280
281 Label done;
282
283 // A single check of the map's pages interesting flag suffices, since it is
284 // only set during incremental collection, and then it's also guaranteed that
285 // the from object's page's interesting flag is also set. This optimization
286 // relies on the fact that maps can never be in new space.
287 CheckPageFlag(map,
288 map, // Used as scratch.
289 MemoryChunk::kPointersToHereAreInterestingMask,
290 eq,
291 &done);
292
293 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
294 if (emit_debug_code()) {
295 Label ok;
296 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
297 Branch(&ok, eq, at, Operand(zero_reg));
298 stop("Unaligned cell in write barrier");
299 bind(&ok);
300 }
301
302 // Record the actual write.
303 if (ra_status == kRAHasNotBeenSaved) {
304 push(ra);
305 }
306 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
307 fp_mode);
308 CallStub(&stub);
309 if (ra_status == kRAHasNotBeenSaved) {
310 pop(ra);
311 }
312
313 bind(&done);
314
315 // Count number of write barriers in generated code.
316 isolate()->counters()->write_barriers_static()->Increment();
317 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
318
319 // Clobber clobbered registers when running with the debug-code flag
320 // turned on to provoke errors.
321 if (emit_debug_code()) {
322 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
323 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
324 }
325}
326
327
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000328// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
329// The register 'object' contains a heap object pointer. The heap object
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000330// tag is shifted away.
331void MacroAssembler::RecordWrite(
332 Register object,
333 Register address,
334 Register value,
335 RAStatus ra_status,
336 SaveFPRegsMode fp_mode,
337 RememberedSetAction remembered_set_action,
338 SmiCheck smi_check,
339 PointersToHereCheck pointers_to_here_check_for_value) {
340 DCHECK(!AreAliased(object, address, value, t8));
341 DCHECK(!AreAliased(object, address, value, t9));
342
343 if (emit_debug_code()) {
344 ld(at, MemOperand(address));
345 Assert(
346 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
347 }
348
349 if (remembered_set_action == OMIT_REMEMBERED_SET &&
350 !FLAG_incremental_marking) {
351 return;
352 }
353
354 // First, check if a write barrier is even needed. The tests below
355 // catch stores of smis and stores into the young generation.
356 Label done;
357
358 if (smi_check == INLINE_SMI_CHECK) {
359 DCHECK_EQ(0, kSmiTag);
360 JumpIfSmi(value, &done);
361 }
362
363 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
364 CheckPageFlag(value,
365 value, // Used as scratch.
366 MemoryChunk::kPointersToHereAreInterestingMask,
367 eq,
368 &done);
369 }
370 CheckPageFlag(object,
371 value, // Used as scratch.
372 MemoryChunk::kPointersFromHereAreInterestingMask,
373 eq,
374 &done);
375
376 // Record the actual write.
377 if (ra_status == kRAHasNotBeenSaved) {
378 push(ra);
379 }
380 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
381 fp_mode);
382 CallStub(&stub);
383 if (ra_status == kRAHasNotBeenSaved) {
384 pop(ra);
385 }
386
387 bind(&done);
388
389 // Count number of write barriers in generated code.
390 isolate()->counters()->write_barriers_static()->Increment();
391 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
392 value);
393
394 // Clobber clobbered registers when running with the debug-code flag
395 // turned on to provoke errors.
396 if (emit_debug_code()) {
397 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
398 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
399 }
400}
401
Ben Murdoch097c5b22016-05-18 11:27:45 +0100402void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
403 Register code_entry,
404 Register scratch) {
405 const int offset = JSFunction::kCodeEntryOffset;
406
407 // Since a code entry (value) is always in old space, we don't need to update
408 // remembered set. If incremental marking is off, there is nothing for us to
409 // do.
410 if (!FLAG_incremental_marking) return;
411
412 DCHECK(js_function.is(a1));
413 DCHECK(code_entry.is(a4));
414 DCHECK(scratch.is(a5));
415 AssertNotSmi(js_function);
416
417 if (emit_debug_code()) {
418 Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
419 ld(at, MemOperand(scratch));
420 Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
421 Operand(code_entry));
422 }
423
424 // First, check if a write barrier is even needed. The tests below
425 // catch stores of Smis and stores into young gen.
426 Label done;
427
428 CheckPageFlag(code_entry, scratch,
429 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
430 CheckPageFlag(js_function, scratch,
431 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
432
433 const Register dst = scratch;
434 Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
435
436 // Save caller-saved registers. js_function and code_entry are in the
437 // caller-saved register list.
438 DCHECK(kJSCallerSaved & js_function.bit());
439 DCHECK(kJSCallerSaved & code_entry.bit());
440 MultiPush(kJSCallerSaved | ra.bit());
441
442 int argument_count = 3;
443
444 PrepareCallCFunction(argument_count, code_entry);
445
446 Move(a0, js_function);
447 Move(a1, dst);
448 li(a2, Operand(ExternalReference::isolate_address(isolate())));
449
450 {
451 AllowExternalCallThatCantCauseGC scope(this);
452 CallCFunction(
453 ExternalReference::incremental_marking_record_write_code_entry_function(
454 isolate()),
455 argument_count);
456 }
457
458 // Restore caller-saved registers.
459 MultiPop(kJSCallerSaved | ra.bit());
460
461 bind(&done);
462}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000463
464void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
465 Register address,
466 Register scratch,
467 SaveFPRegsMode fp_mode,
468 RememberedSetFinalAction and_then) {
469 Label done;
470 if (emit_debug_code()) {
471 Label ok;
472 JumpIfNotInNewSpace(object, scratch, &ok);
473 stop("Remembered set pointer is in new space");
474 bind(&ok);
475 }
476 // Load store buffer top.
477 ExternalReference store_buffer =
478 ExternalReference::store_buffer_top(isolate());
479 li(t8, Operand(store_buffer));
480 ld(scratch, MemOperand(t8));
481 // Store pointer to buffer and increment buffer top.
482 sd(address, MemOperand(scratch));
483 Daddu(scratch, scratch, kPointerSize);
484 // Write back new top of buffer.
485 sd(scratch, MemOperand(t8));
486 // Call stub on end of buffer.
487 // Check for end of buffer.
Ben Murdochda12d292016-06-02 14:46:10 +0100488 And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000489 DCHECK(!scratch.is(t8));
490 if (and_then == kFallThroughAtEnd) {
Ben Murdochda12d292016-06-02 14:46:10 +0100491 Branch(&done, ne, t8, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000492 } else {
493 DCHECK(and_then == kReturnAtEnd);
Ben Murdochda12d292016-06-02 14:46:10 +0100494 Ret(ne, t8, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000495 }
496 push(ra);
497 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
498 CallStub(&store_buffer_overflow);
499 pop(ra);
500 bind(&done);
501 if (and_then == kReturnAtEnd) {
502 Ret();
503 }
504}
505
506
507// -----------------------------------------------------------------------------
508// Allocation support.
509
510
511void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
512 Register scratch,
513 Label* miss) {
514 Label same_contexts;
Ben Murdochda12d292016-06-02 14:46:10 +0100515 Register temporary = t8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000516
517 DCHECK(!holder_reg.is(scratch));
518 DCHECK(!holder_reg.is(at));
519 DCHECK(!scratch.is(at));
520
Ben Murdochda12d292016-06-02 14:46:10 +0100521 // Load current lexical context from the active StandardFrame, which
522 // may require crawling past STUB frames.
523 Label load_context;
524 Label has_context;
525 mov(at, fp);
526 bind(&load_context);
527 ld(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
528 // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
529 JumpIfNotSmi(scratch, &has_context, temporary);
530 ld(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
531 Branch(&load_context);
532 bind(&has_context);
533
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000534 // In debug mode, make sure the lexical context is set.
535#ifdef DEBUG
536 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
537 scratch, Operand(zero_reg));
538#endif
539
540 // Load the native context of the current context.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000541 ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000542
543 // Check the context is a native context.
544 if (emit_debug_code()) {
545 push(holder_reg); // Temporarily save holder on the stack.
546 // Read the first word and compare to the native_context_map.
547 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
548 LoadRoot(at, Heap::kNativeContextMapRootIndex);
549 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
550 holder_reg, Operand(at));
551 pop(holder_reg); // Restore holder.
552 }
553
554 // Check if both contexts are the same.
555 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
556 Branch(&same_contexts, eq, scratch, Operand(at));
557
558 // Check the context is a native context.
559 if (emit_debug_code()) {
560 push(holder_reg); // Temporarily save holder on the stack.
561 mov(holder_reg, at); // Move at to its holding place.
562 LoadRoot(at, Heap::kNullValueRootIndex);
563 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
564 holder_reg, Operand(at));
565
566 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
567 LoadRoot(at, Heap::kNativeContextMapRootIndex);
568 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
569 holder_reg, Operand(at));
570 // Restore at is not needed. at is reloaded below.
571 pop(holder_reg); // Restore holder.
572 // Restore at to holder's context.
573 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
574 }
575
576 // Check that the security token in the calling global object is
577 // compatible with the security token in the receiving global
578 // object.
579 int token_offset = Context::kHeaderSize +
580 Context::SECURITY_TOKEN_INDEX * kPointerSize;
581
582 ld(scratch, FieldMemOperand(scratch, token_offset));
583 ld(at, FieldMemOperand(at, token_offset));
584 Branch(miss, ne, scratch, Operand(at));
585
586 bind(&same_contexts);
587}
588
589
590// Compute the hash code from the untagged key. This must be kept in sync with
591// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
592// code-stub-hydrogen.cc
593void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
594 // First of all we assign the hash seed to scratch.
595 LoadRoot(scratch, Heap::kHashSeedRootIndex);
596 SmiUntag(scratch);
597
598 // Xor original key with a seed.
599 xor_(reg0, reg0, scratch);
600
601 // Compute the hash code from the untagged key. This must be kept in sync
602 // with ComputeIntegerHash in utils.h.
603 //
604 // hash = ~hash + (hash << 15);
605 // The algorithm uses 32-bit integer values.
606 nor(scratch, reg0, zero_reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100607 Lsa(reg0, scratch, reg0, 15);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000608
609 // hash = hash ^ (hash >> 12);
610 srl(at, reg0, 12);
611 xor_(reg0, reg0, at);
612
613 // hash = hash + (hash << 2);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100614 Lsa(reg0, reg0, reg0, 2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615
616 // hash = hash ^ (hash >> 4);
617 srl(at, reg0, 4);
618 xor_(reg0, reg0, at);
619
620 // hash = hash * 2057;
621 sll(scratch, reg0, 11);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100622 Lsa(reg0, reg0, reg0, 3);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000623 addu(reg0, reg0, scratch);
624
625 // hash = hash ^ (hash >> 16);
626 srl(at, reg0, 16);
627 xor_(reg0, reg0, at);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000628 And(reg0, reg0, Operand(0x3fffffff));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000629}
630
631
632void MacroAssembler::LoadFromNumberDictionary(Label* miss,
633 Register elements,
634 Register key,
635 Register result,
636 Register reg0,
637 Register reg1,
638 Register reg2) {
639 // Register use:
640 //
641 // elements - holds the slow-case elements of the receiver on entry.
642 // Unchanged unless 'result' is the same register.
643 //
644 // key - holds the smi key on entry.
645 // Unchanged unless 'result' is the same register.
646 //
647 //
648 // result - holds the result on exit if the load succeeded.
649 // Allowed to be the same as 'key' or 'result'.
650 // Unchanged on bailout so 'key' or 'result' can be used
651 // in further computation.
652 //
653 // Scratch registers:
654 //
655 // reg0 - holds the untagged key on entry and holds the hash once computed.
656 //
657 // reg1 - Used to hold the capacity mask of the dictionary.
658 //
659 // reg2 - Used for the index into the dictionary.
660 // at - Temporary (avoid MacroAssembler instructions also using 'at').
661 Label done;
662
663 GetNumberHash(reg0, reg1);
664
665 // Compute the capacity mask.
666 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
667 SmiUntag(reg1, reg1);
668 Dsubu(reg1, reg1, Operand(1));
669
670 // Generate an unrolled loop that performs a few probes before giving up.
671 for (int i = 0; i < kNumberDictionaryProbes; i++) {
672 // Use reg2 for index calculations and keep the hash intact in reg0.
673 mov(reg2, reg0);
674 // Compute the masked index: (hash + i + i * i) & mask.
675 if (i > 0) {
676 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
677 }
678 and_(reg2, reg2, reg1);
679
680 // Scale the index by multiplying by the element size.
681 DCHECK(SeededNumberDictionary::kEntrySize == 3);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100682 Dlsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000683
684 // Check if the key is identical to the name.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100685 Dlsa(reg2, elements, reg2, kPointerSizeLog2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000686
687 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
688 if (i != kNumberDictionaryProbes - 1) {
689 Branch(&done, eq, key, Operand(at));
690 } else {
691 Branch(miss, ne, key, Operand(at));
692 }
693 }
694
695 bind(&done);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400696 // Check that the value is a field property.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000697 // reg2: elements + (index * kPointerSize).
698 const int kDetailsOffset =
699 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
700 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000701 DCHECK_EQ(DATA, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000702 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
703 Branch(miss, ne, at, Operand(zero_reg));
704
705 // Get the value at the masked, scaled index and return.
706 const int kValueOffset =
707 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
708 ld(result, FieldMemOperand(reg2, kValueOffset));
709}
710
711
712// ---------------------------------------------------------------------------
713// Instruction macros.
714
715void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
716 if (rt.is_reg()) {
717 addu(rd, rs, rt.rm());
718 } else {
719 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000720 addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000721 } else {
722 // li handles the relocation.
723 DCHECK(!rs.is(at));
724 li(at, rt);
725 addu(rd, rs, at);
726 }
727 }
728}
729
730
731void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
732 if (rt.is_reg()) {
733 daddu(rd, rs, rt.rm());
734 } else {
735 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000736 daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000737 } else {
738 // li handles the relocation.
739 DCHECK(!rs.is(at));
740 li(at, rt);
741 daddu(rd, rs, at);
742 }
743 }
744}
745
746
747void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
748 if (rt.is_reg()) {
749 subu(rd, rs, rt.rm());
750 } else {
751 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000752 addiu(rd, rs, static_cast<int32_t>(
753 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000754 } else {
755 // li handles the relocation.
756 DCHECK(!rs.is(at));
757 li(at, rt);
758 subu(rd, rs, at);
759 }
760 }
761}
762
763
764void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
765 if (rt.is_reg()) {
766 dsubu(rd, rs, rt.rm());
767 } else {
768 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000769 daddiu(rd, rs,
770 static_cast<int32_t>(
771 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000772 } else {
773 // li handles the relocation.
774 DCHECK(!rs.is(at));
775 li(at, rt);
776 dsubu(rd, rs, at);
777 }
778 }
779}
780
781
782void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
783 if (rt.is_reg()) {
784 mul(rd, rs, rt.rm());
785 } else {
786 // li handles the relocation.
787 DCHECK(!rs.is(at));
788 li(at, rt);
789 mul(rd, rs, at);
790 }
791}
792
793
794void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
795 if (rt.is_reg()) {
796 if (kArchVariant != kMips64r6) {
797 mult(rs, rt.rm());
798 mfhi(rd);
799 } else {
800 muh(rd, rs, rt.rm());
801 }
802 } else {
803 // li handles the relocation.
804 DCHECK(!rs.is(at));
805 li(at, rt);
806 if (kArchVariant != kMips64r6) {
807 mult(rs, at);
808 mfhi(rd);
809 } else {
810 muh(rd, rs, at);
811 }
812 }
813}
814
815
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400816void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
817 if (rt.is_reg()) {
818 if (kArchVariant != kMips64r6) {
819 multu(rs, rt.rm());
820 mfhi(rd);
821 } else {
822 muhu(rd, rs, rt.rm());
823 }
824 } else {
825 // li handles the relocation.
826 DCHECK(!rs.is(at));
827 li(at, rt);
828 if (kArchVariant != kMips64r6) {
829 multu(rs, at);
830 mfhi(rd);
831 } else {
832 muhu(rd, rs, at);
833 }
834 }
835}
836
837
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000838void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
839 if (rt.is_reg()) {
840 if (kArchVariant == kMips64r6) {
841 dmul(rd, rs, rt.rm());
842 } else {
843 dmult(rs, rt.rm());
844 mflo(rd);
845 }
846 } else {
847 // li handles the relocation.
848 DCHECK(!rs.is(at));
849 li(at, rt);
850 if (kArchVariant == kMips64r6) {
851 dmul(rd, rs, at);
852 } else {
853 dmult(rs, at);
854 mflo(rd);
855 }
856 }
857}
858
859
860void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
861 if (rt.is_reg()) {
862 if (kArchVariant == kMips64r6) {
863 dmuh(rd, rs, rt.rm());
864 } else {
865 dmult(rs, rt.rm());
866 mfhi(rd);
867 }
868 } else {
869 // li handles the relocation.
870 DCHECK(!rs.is(at));
871 li(at, rt);
872 if (kArchVariant == kMips64r6) {
873 dmuh(rd, rs, at);
874 } else {
875 dmult(rs, at);
876 mfhi(rd);
877 }
878 }
879}
880
881
882void MacroAssembler::Mult(Register rs, const Operand& rt) {
883 if (rt.is_reg()) {
884 mult(rs, rt.rm());
885 } else {
886 // li handles the relocation.
887 DCHECK(!rs.is(at));
888 li(at, rt);
889 mult(rs, at);
890 }
891}
892
893
894void MacroAssembler::Dmult(Register rs, const Operand& rt) {
895 if (rt.is_reg()) {
896 dmult(rs, rt.rm());
897 } else {
898 // li handles the relocation.
899 DCHECK(!rs.is(at));
900 li(at, rt);
901 dmult(rs, at);
902 }
903}
904
905
906void MacroAssembler::Multu(Register rs, const Operand& rt) {
907 if (rt.is_reg()) {
908 multu(rs, rt.rm());
909 } else {
910 // li handles the relocation.
911 DCHECK(!rs.is(at));
912 li(at, rt);
913 multu(rs, at);
914 }
915}
916
917
918void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
919 if (rt.is_reg()) {
920 dmultu(rs, rt.rm());
921 } else {
922 // li handles the relocation.
923 DCHECK(!rs.is(at));
924 li(at, rt);
925 dmultu(rs, at);
926 }
927}
928
929
930void MacroAssembler::Div(Register rs, const Operand& rt) {
931 if (rt.is_reg()) {
932 div(rs, rt.rm());
933 } else {
934 // li handles the relocation.
935 DCHECK(!rs.is(at));
936 li(at, rt);
937 div(rs, at);
938 }
939}
940
941
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400942void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
943 if (rt.is_reg()) {
944 if (kArchVariant != kMips64r6) {
945 div(rs, rt.rm());
946 mflo(res);
947 } else {
948 div(res, rs, rt.rm());
949 }
950 } else {
951 // li handles the relocation.
952 DCHECK(!rs.is(at));
953 li(at, rt);
954 if (kArchVariant != kMips64r6) {
955 div(rs, at);
956 mflo(res);
957 } else {
958 div(res, rs, at);
959 }
960 }
961}
962
963
964void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
965 if (rt.is_reg()) {
966 if (kArchVariant != kMips64r6) {
967 div(rs, rt.rm());
968 mfhi(rd);
969 } else {
970 mod(rd, rs, rt.rm());
971 }
972 } else {
973 // li handles the relocation.
974 DCHECK(!rs.is(at));
975 li(at, rt);
976 if (kArchVariant != kMips64r6) {
977 div(rs, at);
978 mfhi(rd);
979 } else {
980 mod(rd, rs, at);
981 }
982 }
983}
984
985
986void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
987 if (rt.is_reg()) {
988 if (kArchVariant != kMips64r6) {
989 divu(rs, rt.rm());
990 mfhi(rd);
991 } else {
992 modu(rd, rs, rt.rm());
993 }
994 } else {
995 // li handles the relocation.
996 DCHECK(!rs.is(at));
997 li(at, rt);
998 if (kArchVariant != kMips64r6) {
999 divu(rs, at);
1000 mfhi(rd);
1001 } else {
1002 modu(rd, rs, at);
1003 }
1004 }
1005}
1006
1007
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001008void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
1009 if (rt.is_reg()) {
1010 ddiv(rs, rt.rm());
1011 } else {
1012 // li handles the relocation.
1013 DCHECK(!rs.is(at));
1014 li(at, rt);
1015 ddiv(rs, at);
1016 }
1017}
1018
1019
1020void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
1021 if (kArchVariant != kMips64r6) {
1022 if (rt.is_reg()) {
1023 ddiv(rs, rt.rm());
1024 mflo(rd);
1025 } else {
1026 // li handles the relocation.
1027 DCHECK(!rs.is(at));
1028 li(at, rt);
1029 ddiv(rs, at);
1030 mflo(rd);
1031 }
1032 } else {
1033 if (rt.is_reg()) {
1034 ddiv(rd, rs, rt.rm());
1035 } else {
1036 // li handles the relocation.
1037 DCHECK(!rs.is(at));
1038 li(at, rt);
1039 ddiv(rd, rs, at);
1040 }
1041 }
1042}
1043
1044
1045void MacroAssembler::Divu(Register rs, const Operand& rt) {
1046 if (rt.is_reg()) {
1047 divu(rs, rt.rm());
1048 } else {
1049 // li handles the relocation.
1050 DCHECK(!rs.is(at));
1051 li(at, rt);
1052 divu(rs, at);
1053 }
1054}
1055
1056
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001057void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
1058 if (rt.is_reg()) {
1059 if (kArchVariant != kMips64r6) {
1060 divu(rs, rt.rm());
1061 mflo(res);
1062 } else {
1063 divu(res, rs, rt.rm());
1064 }
1065 } else {
1066 // li handles the relocation.
1067 DCHECK(!rs.is(at));
1068 li(at, rt);
1069 if (kArchVariant != kMips64r6) {
1070 divu(rs, at);
1071 mflo(res);
1072 } else {
1073 divu(res, rs, at);
1074 }
1075 }
1076}
1077
1078
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001079void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
1080 if (rt.is_reg()) {
1081 ddivu(rs, rt.rm());
1082 } else {
1083 // li handles the relocation.
1084 DCHECK(!rs.is(at));
1085 li(at, rt);
1086 ddivu(rs, at);
1087 }
1088}
1089
1090
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001091void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
1092 if (rt.is_reg()) {
1093 if (kArchVariant != kMips64r6) {
1094 ddivu(rs, rt.rm());
1095 mflo(res);
1096 } else {
1097 ddivu(res, rs, rt.rm());
1098 }
1099 } else {
1100 // li handles the relocation.
1101 DCHECK(!rs.is(at));
1102 li(at, rt);
1103 if (kArchVariant != kMips64r6) {
1104 ddivu(rs, at);
1105 mflo(res);
1106 } else {
1107 ddivu(res, rs, at);
1108 }
1109 }
1110}
1111
1112
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001113void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
1114 if (kArchVariant != kMips64r6) {
1115 if (rt.is_reg()) {
1116 ddiv(rs, rt.rm());
1117 mfhi(rd);
1118 } else {
1119 // li handles the relocation.
1120 DCHECK(!rs.is(at));
1121 li(at, rt);
1122 ddiv(rs, at);
1123 mfhi(rd);
1124 }
1125 } else {
1126 if (rt.is_reg()) {
1127 dmod(rd, rs, rt.rm());
1128 } else {
1129 // li handles the relocation.
1130 DCHECK(!rs.is(at));
1131 li(at, rt);
1132 dmod(rd, rs, at);
1133 }
1134 }
1135}
1136
1137
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001138void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
1139 if (kArchVariant != kMips64r6) {
1140 if (rt.is_reg()) {
1141 ddivu(rs, rt.rm());
1142 mfhi(rd);
1143 } else {
1144 // li handles the relocation.
1145 DCHECK(!rs.is(at));
1146 li(at, rt);
1147 ddivu(rs, at);
1148 mfhi(rd);
1149 }
1150 } else {
1151 if (rt.is_reg()) {
1152 dmodu(rd, rs, rt.rm());
1153 } else {
1154 // li handles the relocation.
1155 DCHECK(!rs.is(at));
1156 li(at, rt);
1157 dmodu(rd, rs, at);
1158 }
1159 }
1160}
1161
1162
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001163void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1164 if (rt.is_reg()) {
1165 and_(rd, rs, rt.rm());
1166 } else {
1167 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001168 andi(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001169 } else {
1170 // li handles the relocation.
1171 DCHECK(!rs.is(at));
1172 li(at, rt);
1173 and_(rd, rs, at);
1174 }
1175 }
1176}
1177
1178
1179void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1180 if (rt.is_reg()) {
1181 or_(rd, rs, rt.rm());
1182 } else {
1183 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001184 ori(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001185 } else {
1186 // li handles the relocation.
1187 DCHECK(!rs.is(at));
1188 li(at, rt);
1189 or_(rd, rs, at);
1190 }
1191 }
1192}
1193
1194
1195void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1196 if (rt.is_reg()) {
1197 xor_(rd, rs, rt.rm());
1198 } else {
1199 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001200 xori(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001201 } else {
1202 // li handles the relocation.
1203 DCHECK(!rs.is(at));
1204 li(at, rt);
1205 xor_(rd, rs, at);
1206 }
1207 }
1208}
1209
1210
1211void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1212 if (rt.is_reg()) {
1213 nor(rd, rs, rt.rm());
1214 } else {
1215 // li handles the relocation.
1216 DCHECK(!rs.is(at));
1217 li(at, rt);
1218 nor(rd, rs, at);
1219 }
1220}
1221
1222
1223void MacroAssembler::Neg(Register rs, const Operand& rt) {
1224 DCHECK(rt.is_reg());
1225 DCHECK(!at.is(rs));
1226 DCHECK(!at.is(rt.rm()));
1227 li(at, -1);
1228 xor_(rs, rt.rm(), at);
1229}
1230
1231
1232void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1233 if (rt.is_reg()) {
1234 slt(rd, rs, rt.rm());
1235 } else {
1236 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001237 slti(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001238 } else {
1239 // li handles the relocation.
1240 DCHECK(!rs.is(at));
1241 li(at, rt);
1242 slt(rd, rs, at);
1243 }
1244 }
1245}
1246
1247
1248void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1249 if (rt.is_reg()) {
1250 sltu(rd, rs, rt.rm());
1251 } else {
1252 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001253 sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001254 } else {
1255 // li handles the relocation.
1256 DCHECK(!rs.is(at));
1257 li(at, rt);
1258 sltu(rd, rs, at);
1259 }
1260 }
1261}
1262
1263
1264void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001265 if (rt.is_reg()) {
1266 rotrv(rd, rs, rt.rm());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001267 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01001268 int64_t ror_value = rt.imm64_ % 32;
1269 if (ror_value < 0) {
1270 ror_value += 32;
1271 }
1272 rotr(rd, rs, ror_value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001273 }
1274}
1275
1276
1277void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1278 if (rt.is_reg()) {
1279 drotrv(rd, rs, rt.rm());
1280 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01001281 int64_t dror_value = rt.imm64_ % 64;
1282 if (dror_value < 0) dror_value += 64;
1283 if (dror_value <= 31) {
1284 drotr(rd, rs, dror_value);
1285 } else {
1286 drotr32(rd, rs, dror_value - 32);
1287 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001288 }
1289}
1290
1291
1292void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1293 pref(hint, rs);
1294}
1295
1296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001297void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1298 Register scratch) {
Ben Murdochda12d292016-06-02 14:46:10 +01001299 DCHECK(sa >= 1 && sa <= 31);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001300 if (kArchVariant == kMips64r6 && sa <= 4) {
Ben Murdochda12d292016-06-02 14:46:10 +01001301 lsa(rd, rt, rs, sa - 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001302 } else {
1303 Register tmp = rd.is(rt) ? scratch : rd;
1304 DCHECK(!tmp.is(rt));
1305 sll(tmp, rs, sa);
1306 Addu(rd, rt, tmp);
1307 }
1308}
1309
1310
1311void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
1312 Register scratch) {
Ben Murdochda12d292016-06-02 14:46:10 +01001313 DCHECK(sa >= 1 && sa <= 31);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001314 if (kArchVariant == kMips64r6 && sa <= 4) {
Ben Murdochda12d292016-06-02 14:46:10 +01001315 dlsa(rd, rt, rs, sa - 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001316 } else {
1317 Register tmp = rd.is(rt) ? scratch : rd;
1318 DCHECK(!tmp.is(rt));
1319 dsll(tmp, rs, sa);
1320 Daddu(rd, rt, tmp);
1321 }
1322}
1323
1324
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001325// ------------Pseudo-instructions-------------
1326
1327void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1328 lwr(rd, rs);
1329 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1330}
1331
1332
1333void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1334 swr(rd, rs);
1335 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1336}
1337
1338
1339// Do 64-bit load from unaligned address. Note this only handles
1340// the specific case of 32-bit aligned, but not 64-bit aligned.
1341void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1342 // Assert fail if the offset from start of object IS actually aligned.
1343 // ONLY use with known misalignment, since there is performance cost.
1344 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001345 if (kArchEndian == kLittle) {
1346 lwu(rd, rs);
1347 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1348 dsll32(scratch, scratch, 0);
1349 } else {
1350 lw(rd, rs);
1351 lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1352 dsll32(rd, rd, 0);
1353 }
1354 Daddu(rd, rd, scratch);
1355}
1356
1357
1358// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1359// bits,
1360// second word in high bits.
1361void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1362 Register scratch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001363 lwu(rd, rs);
1364 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1365 dsll32(scratch, scratch, 0);
1366 Daddu(rd, rd, scratch);
1367}
1368
1369
1370// Do 64-bit store to unaligned address. Note this only handles
1371// the specific case of 32-bit aligned, but not 64-bit aligned.
1372void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1373 // Assert fail if the offset from start of object IS actually aligned.
1374 // ONLY use with known misalignment, since there is performance cost.
1375 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001376 if (kArchEndian == kLittle) {
1377 sw(rd, rs);
1378 dsrl32(scratch, rd, 0);
1379 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1380 } else {
1381 sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1382 dsrl32(scratch, rd, 0);
1383 sw(scratch, rs);
1384 }
1385}
1386
1387
1388// Do 64-bit store as two consequent 32-bit stores to unaligned address.
1389void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1390 Register scratch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001391 sw(rd, rs);
1392 dsrl32(scratch, rd, 0);
1393 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1394}
1395
1396
1397void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1398 AllowDeferredHandleDereference smi_check;
1399 if (value->IsSmi()) {
1400 li(dst, Operand(value), mode);
1401 } else {
1402 DCHECK(value->IsHeapObject());
1403 if (isolate()->heap()->InNewSpace(*value)) {
1404 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1405 li(dst, Operand(cell));
1406 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1407 } else {
1408 li(dst, Operand(value));
1409 }
1410 }
1411}
1412
Ben Murdoch097c5b22016-05-18 11:27:45 +01001413static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
1414 if ((imm >> (bitnum - 1)) & 0x1) {
1415 imm = (imm >> bitnum) + 1;
1416 } else {
1417 imm = imm >> bitnum;
1418 }
1419 return imm;
1420}
1421
1422bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1423 bool higher_bits_sign_extended = false;
1424 if (is_int16(j.imm64_)) {
1425 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1426 } else if (!(j.imm64_ & kHiMask)) {
1427 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1428 } else if (!(j.imm64_ & kImm16Mask)) {
1429 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1430 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1431 higher_bits_sign_extended = true;
1432 }
1433 } else {
1434 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1435 ori(rd, rd, (j.imm64_ & kImm16Mask));
1436 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1437 higher_bits_sign_extended = true;
1438 }
1439 }
1440 return higher_bits_sign_extended;
1441}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001442
1443void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1444 DCHECK(!j.is_reg());
1445 BlockTrampolinePoolScope block_trampoline_pool(this);
1446 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1447 // Normal load of an immediate value which does not need Relocation Info.
1448 if (is_int32(j.imm64_)) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001449 LiLower32BitHelper(rd, j);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001450 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001451 if (kArchVariant == kMips64r6) {
1452 int64_t imm = j.imm64_;
1453 bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
1454 imm = ShiftAndFixSignExtension(imm, 32);
1455 // If LUI writes 1s to higher bits, we need both DAHI/DATI.
1456 if ((imm & kImm16Mask) ||
1457 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1458 dahi(rd, imm & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001459 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001460 imm = ShiftAndFixSignExtension(imm, 16);
1461 if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
1462 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1463 dati(rd, imm & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001464 }
1465 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001466 if (is_int48(j.imm64_)) {
1467 if ((j.imm64_ >> 32) & kImm16Mask) {
1468 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1469 if ((j.imm64_ >> 16) & kImm16Mask) {
1470 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1471 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001472 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001473 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
1474 }
1475 dsll(rd, rd, 16);
1476 if (j.imm64_ & kImm16Mask) {
1477 ori(rd, rd, j.imm64_ & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001478 }
1479 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001480 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1481 if ((j.imm64_ >> 32) & kImm16Mask) {
1482 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1483 }
1484 if ((j.imm64_ >> 16) & kImm16Mask) {
1485 dsll(rd, rd, 16);
1486 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1487 if (j.imm64_ & kImm16Mask) {
1488 dsll(rd, rd, 16);
1489 ori(rd, rd, j.imm64_ & kImm16Mask);
1490 } else {
1491 dsll(rd, rd, 16);
1492 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001493 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001494 if (j.imm64_ & kImm16Mask) {
1495 dsll32(rd, rd, 0);
1496 ori(rd, rd, j.imm64_ & kImm16Mask);
1497 } else {
1498 dsll32(rd, rd, 0);
1499 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001500 }
1501 }
1502 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001503 }
1504 } else if (MustUseReg(j.rmode_)) {
1505 RecordRelocInfo(j.rmode_, j.imm64_);
1506 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1507 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1508 dsll(rd, rd, 16);
1509 ori(rd, rd, j.imm64_ & kImm16Mask);
1510 } else if (mode == ADDRESS_LOAD) {
1511 // We always need the same number of instructions as we may need to patch
1512 // this code to load another value which may need all 4 instructions.
1513 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1514 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1515 dsll(rd, rd, 16);
1516 ori(rd, rd, j.imm64_ & kImm16Mask);
1517 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001518 if (kArchVariant == kMips64r6) {
1519 int64_t imm = j.imm64_;
1520 lui(rd, (imm >> kLuiShift) & kImm16Mask);
1521 if (imm & kImm16Mask) {
1522 ori(rd, rd, (imm & kImm16Mask));
1523 }
1524 if ((imm >> 31) & 0x1) {
1525 imm = (imm >> 32) + 1;
1526 } else {
1527 imm = imm >> 32;
1528 }
1529 dahi(rd, imm & kImm16Mask);
1530 if ((imm >> 15) & 0x1) {
1531 imm = (imm >> 16) + 1;
1532 } else {
1533 imm = imm >> 16;
1534 }
1535 dati(rd, imm & kImm16Mask);
1536 } else {
1537 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1538 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1539 dsll(rd, rd, 16);
1540 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1541 dsll(rd, rd, 16);
1542 ori(rd, rd, j.imm64_ & kImm16Mask);
1543 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001544 }
1545}
1546
1547
1548void MacroAssembler::MultiPush(RegList regs) {
1549 int16_t num_to_push = NumberOfBitsSet(regs);
1550 int16_t stack_offset = num_to_push * kPointerSize;
1551
1552 Dsubu(sp, sp, Operand(stack_offset));
1553 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1554 if ((regs & (1 << i)) != 0) {
1555 stack_offset -= kPointerSize;
1556 sd(ToRegister(i), MemOperand(sp, stack_offset));
1557 }
1558 }
1559}
1560
1561
1562void MacroAssembler::MultiPushReversed(RegList regs) {
1563 int16_t num_to_push = NumberOfBitsSet(regs);
1564 int16_t stack_offset = num_to_push * kPointerSize;
1565
1566 Dsubu(sp, sp, Operand(stack_offset));
1567 for (int16_t i = 0; i < kNumRegisters; i++) {
1568 if ((regs & (1 << i)) != 0) {
1569 stack_offset -= kPointerSize;
1570 sd(ToRegister(i), MemOperand(sp, stack_offset));
1571 }
1572 }
1573}
1574
1575
1576void MacroAssembler::MultiPop(RegList regs) {
1577 int16_t stack_offset = 0;
1578
1579 for (int16_t i = 0; i < kNumRegisters; i++) {
1580 if ((regs & (1 << i)) != 0) {
1581 ld(ToRegister(i), MemOperand(sp, stack_offset));
1582 stack_offset += kPointerSize;
1583 }
1584 }
1585 daddiu(sp, sp, stack_offset);
1586}
1587
1588
1589void MacroAssembler::MultiPopReversed(RegList regs) {
1590 int16_t stack_offset = 0;
1591
1592 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1593 if ((regs & (1 << i)) != 0) {
1594 ld(ToRegister(i), MemOperand(sp, stack_offset));
1595 stack_offset += kPointerSize;
1596 }
1597 }
1598 daddiu(sp, sp, stack_offset);
1599}
1600
1601
1602void MacroAssembler::MultiPushFPU(RegList regs) {
1603 int16_t num_to_push = NumberOfBitsSet(regs);
1604 int16_t stack_offset = num_to_push * kDoubleSize;
1605
1606 Dsubu(sp, sp, Operand(stack_offset));
1607 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1608 if ((regs & (1 << i)) != 0) {
1609 stack_offset -= kDoubleSize;
1610 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1611 }
1612 }
1613}
1614
1615
1616void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1617 int16_t num_to_push = NumberOfBitsSet(regs);
1618 int16_t stack_offset = num_to_push * kDoubleSize;
1619
1620 Dsubu(sp, sp, Operand(stack_offset));
1621 for (int16_t i = 0; i < kNumRegisters; i++) {
1622 if ((regs & (1 << i)) != 0) {
1623 stack_offset -= kDoubleSize;
1624 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1625 }
1626 }
1627}
1628
1629
1630void MacroAssembler::MultiPopFPU(RegList regs) {
1631 int16_t stack_offset = 0;
1632
1633 for (int16_t i = 0; i < kNumRegisters; i++) {
1634 if ((regs & (1 << i)) != 0) {
1635 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1636 stack_offset += kDoubleSize;
1637 }
1638 }
1639 daddiu(sp, sp, stack_offset);
1640}
1641
1642
1643void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1644 int16_t stack_offset = 0;
1645
1646 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1647 if ((regs & (1 << i)) != 0) {
1648 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1649 stack_offset += kDoubleSize;
1650 }
1651 }
1652 daddiu(sp, sp, stack_offset);
1653}
1654
1655
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001656void MacroAssembler::Ext(Register rt,
1657 Register rs,
1658 uint16_t pos,
1659 uint16_t size) {
1660 DCHECK(pos < 32);
1661 DCHECK(pos + size < 33);
1662 ext_(rt, rs, pos, size);
1663}
1664
1665
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001666void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1667 uint16_t size) {
1668 DCHECK(pos < 32);
1669 DCHECK(pos + size < 33);
1670 dext_(rt, rs, pos, size);
1671}
1672
1673
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001674void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
1675 uint16_t size) {
1676 DCHECK(pos < 32);
1677 DCHECK(size <= 64);
1678 dextm(rt, rs, pos, size);
1679}
1680
1681
1682void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
1683 uint16_t size) {
1684 DCHECK(pos >= 32 && pos < 64);
1685 DCHECK(size < 33);
1686 dextu(rt, rs, pos, size);
1687}
1688
1689
1690void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
1691 uint16_t size) {
1692 DCHECK(pos < 32);
1693 DCHECK(pos + size <= 32);
1694 DCHECK(size != 0);
1695 dins_(rt, rs, pos, size);
1696}
1697
1698
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001699void MacroAssembler::Ins(Register rt,
1700 Register rs,
1701 uint16_t pos,
1702 uint16_t size) {
1703 DCHECK(pos < 32);
1704 DCHECK(pos + size <= 32);
1705 DCHECK(size != 0);
1706 ins_(rt, rs, pos, size);
1707}
1708
1709
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001710void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001711 // Move the data from fs to t8.
1712 mfc1(t8, fs);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001713 Cvt_d_uw(fd, t8);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001714}
1715
1716
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001717void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
1718 // Convert rs to a FP value in fd.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001719 DCHECK(!rs.is(t9));
1720 DCHECK(!rs.is(at));
1721
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001722 // Zero extend int32 in rs.
1723 Dext(t9, rs, 0, 32);
1724 dmtc1(t9, fd);
1725 cvt_d_l(fd, fd);
1726}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001727
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001728
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001729void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
1730 // Move the data from fs to t8.
1731 dmfc1(t8, fs);
1732 Cvt_d_ul(fd, t8);
1733}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001734
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001735
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001736void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
1737 // Convert rs to a FP value in fd.
1738
1739 DCHECK(!rs.is(t9));
1740 DCHECK(!rs.is(at));
1741
1742 Label msb_clear, conversion_done;
1743
1744 Branch(&msb_clear, ge, rs, Operand(zero_reg));
1745
1746 // Rs >= 2^63
1747 andi(t9, rs, 1);
1748 dsrl(rs, rs, 1);
1749 or_(t9, t9, rs);
1750 dmtc1(t9, fd);
1751 cvt_d_l(fd, fd);
1752 Branch(USE_DELAY_SLOT, &conversion_done);
1753 add_d(fd, fd, fd); // In delay slot.
1754
1755 bind(&msb_clear);
1756 // Rs < 2^63, we can do simple conversion.
1757 dmtc1(rs, fd);
1758 cvt_d_l(fd, fd);
1759
1760 bind(&conversion_done);
1761}
1762
Ben Murdoch097c5b22016-05-18 11:27:45 +01001763void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
1764 // Move the data from fs to t8.
1765 mfc1(t8, fs);
1766 Cvt_s_uw(fd, t8);
1767}
1768
1769void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
1770 // Convert rs to a FP value in fd.
1771 DCHECK(!rs.is(t9));
1772 DCHECK(!rs.is(at));
1773
1774 // Zero extend int32 in rs.
1775 Dext(t9, rs, 0, 32);
1776 dmtc1(t9, fd);
1777 cvt_s_l(fd, fd);
1778}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001779
1780void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
1781 // Move the data from fs to t8.
1782 dmfc1(t8, fs);
1783 Cvt_s_ul(fd, t8);
1784}
1785
1786
1787void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
1788 // Convert rs to a FP value in fd.
1789
1790 DCHECK(!rs.is(t9));
1791 DCHECK(!rs.is(at));
1792
1793 Label positive, conversion_done;
1794
1795 Branch(&positive, ge, rs, Operand(zero_reg));
1796
1797 // Rs >= 2^31.
1798 andi(t9, rs, 1);
1799 dsrl(rs, rs, 1);
1800 or_(t9, t9, rs);
1801 dmtc1(t9, fd);
1802 cvt_s_l(fd, fd);
1803 Branch(USE_DELAY_SLOT, &conversion_done);
1804 add_s(fd, fd, fd); // In delay slot.
1805
1806 bind(&positive);
1807 // Rs < 2^31, we can do simple conversion.
1808 dmtc1(rs, fd);
1809 cvt_s_l(fd, fd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001810
1811 bind(&conversion_done);
1812}
1813
1814
1815void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1816 round_l_d(fd, fs);
1817}
1818
1819
1820void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1821 floor_l_d(fd, fs);
1822}
1823
1824
1825void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1826 ceil_l_d(fd, fs);
1827}
1828
1829
1830void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1831 trunc_l_d(fd, fs);
1832}
1833
1834
1835void MacroAssembler::Trunc_l_ud(FPURegister fd,
1836 FPURegister fs,
1837 FPURegister scratch) {
1838 // Load to GPR.
1839 dmfc1(t8, fs);
1840 // Reset sign bit.
1841 li(at, 0x7fffffffffffffff);
1842 and_(t8, t8, at);
1843 dmtc1(t8, fs);
1844 trunc_l_d(fd, fs);
1845}
1846
1847
1848void MacroAssembler::Trunc_uw_d(FPURegister fd,
1849 FPURegister fs,
1850 FPURegister scratch) {
1851 Trunc_uw_d(fs, t8, scratch);
1852 mtc1(t8, fd);
1853}
1854
Ben Murdoch097c5b22016-05-18 11:27:45 +01001855void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
1856 FPURegister scratch) {
1857 Trunc_uw_s(fs, t8, scratch);
1858 mtc1(t8, fd);
1859}
1860
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001861void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
1862 FPURegister scratch, Register result) {
1863 Trunc_ul_d(fs, t8, scratch, result);
1864 dmtc1(t8, fd);
1865}
1866
1867
1868void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
1869 FPURegister scratch, Register result) {
1870 Trunc_ul_s(fs, t8, scratch, result);
1871 dmtc1(t8, fd);
1872}
1873
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001874
1875void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1876 trunc_w_d(fd, fs);
1877}
1878
1879
1880void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1881 round_w_d(fd, fs);
1882}
1883
1884
1885void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1886 floor_w_d(fd, fs);
1887}
1888
1889
1890void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1891 ceil_w_d(fd, fs);
1892}
1893
1894
1895void MacroAssembler::Trunc_uw_d(FPURegister fd,
1896 Register rs,
1897 FPURegister scratch) {
1898 DCHECK(!fd.is(scratch));
1899 DCHECK(!rs.is(at));
1900
1901 // Load 2^31 into scratch as its float representation.
1902 li(at, 0x41E00000);
1903 mtc1(zero_reg, scratch);
1904 mthc1(at, scratch);
1905 // Test if scratch > fd.
1906 // If fd < 2^31 we can convert it normally.
1907 Label simple_convert;
1908 BranchF(&simple_convert, NULL, lt, fd, scratch);
1909
1910 // First we subtract 2^31 from fd, then trunc it to rs
1911 // and add 2^31 to rs.
1912 sub_d(scratch, fd, scratch);
1913 trunc_w_d(scratch, scratch);
1914 mfc1(rs, scratch);
1915 Or(rs, rs, 1 << 31);
1916
1917 Label done;
1918 Branch(&done);
1919 // Simple conversion.
1920 bind(&simple_convert);
1921 trunc_w_d(scratch, fd);
1922 mfc1(rs, scratch);
1923
1924 bind(&done);
1925}
1926
Ben Murdoch097c5b22016-05-18 11:27:45 +01001927void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
1928 FPURegister scratch) {
1929 DCHECK(!fd.is(scratch));
1930 DCHECK(!rs.is(at));
1931
1932 // Load 2^31 into scratch as its float representation.
1933 li(at, 0x4F000000);
1934 mtc1(at, scratch);
1935 // Test if scratch > fd.
1936 // If fd < 2^31 we can convert it normally.
1937 Label simple_convert;
1938 BranchF32(&simple_convert, NULL, lt, fd, scratch);
1939
1940 // First we subtract 2^31 from fd, then trunc it to rs
1941 // and add 2^31 to rs.
1942 sub_s(scratch, fd, scratch);
1943 trunc_w_s(scratch, scratch);
1944 mfc1(rs, scratch);
1945 Or(rs, rs, 1 << 31);
1946
1947 Label done;
1948 Branch(&done);
1949 // Simple conversion.
1950 bind(&simple_convert);
1951 trunc_w_s(scratch, fd);
1952 mfc1(rs, scratch);
1953
1954 bind(&done);
1955}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001956
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001957void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
1958 FPURegister scratch, Register result) {
1959 DCHECK(!fd.is(scratch));
1960 DCHECK(!AreAliased(rs, result, at));
1961
1962 Label simple_convert, done, fail;
1963 if (result.is_valid()) {
1964 mov(result, zero_reg);
1965 Move(scratch, -1.0);
1966 // If fd =< -1 or unordered, then the conversion fails.
1967 BranchF(&fail, &fail, le, fd, scratch);
1968 }
1969
1970 // Load 2^63 into scratch as its double representation.
1971 li(at, 0x43e0000000000000);
1972 dmtc1(at, scratch);
1973
1974 // Test if scratch > fd.
1975 // If fd < 2^63 we can convert it normally.
1976 BranchF(&simple_convert, nullptr, lt, fd, scratch);
1977
1978 // First we subtract 2^63 from fd, then trunc it to rs
1979 // and add 2^63 to rs.
1980 sub_d(scratch, fd, scratch);
1981 trunc_l_d(scratch, scratch);
1982 dmfc1(rs, scratch);
1983 Or(rs, rs, Operand(1UL << 63));
1984 Branch(&done);
1985
1986 // Simple conversion.
1987 bind(&simple_convert);
1988 trunc_l_d(scratch, fd);
1989 dmfc1(rs, scratch);
1990
1991 bind(&done);
1992 if (result.is_valid()) {
1993 // Conversion is failed if the result is negative.
1994 addiu(at, zero_reg, -1);
1995 dsrl(at, at, 1); // Load 2^62.
1996 dmfc1(result, scratch);
1997 xor_(result, result, at);
1998 Slt(result, zero_reg, result);
1999 }
2000
2001 bind(&fail);
2002}
2003
2004
2005void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
2006 FPURegister scratch, Register result) {
2007 DCHECK(!fd.is(scratch));
2008 DCHECK(!AreAliased(rs, result, at));
2009
2010 Label simple_convert, done, fail;
2011 if (result.is_valid()) {
2012 mov(result, zero_reg);
2013 Move(scratch, -1.0f);
2014 // If fd =< -1 or unordered, then the conversion fails.
2015 BranchF32(&fail, &fail, le, fd, scratch);
2016 }
2017
2018 // Load 2^63 into scratch as its float representation.
2019 li(at, 0x5f000000);
2020 mtc1(at, scratch);
2021
2022 // Test if scratch > fd.
2023 // If fd < 2^63 we can convert it normally.
2024 BranchF32(&simple_convert, nullptr, lt, fd, scratch);
2025
2026 // First we subtract 2^63 from fd, then trunc it to rs
2027 // and add 2^63 to rs.
2028 sub_s(scratch, fd, scratch);
2029 trunc_l_s(scratch, scratch);
2030 dmfc1(rs, scratch);
2031 Or(rs, rs, Operand(1UL << 63));
2032 Branch(&done);
2033
2034 // Simple conversion.
2035 bind(&simple_convert);
2036 trunc_l_s(scratch, fd);
2037 dmfc1(rs, scratch);
2038
2039 bind(&done);
2040 if (result.is_valid()) {
2041 // Conversion is failed if the result is negative or unordered.
2042 addiu(at, zero_reg, -1);
2043 dsrl(at, at, 1); // Load 2^62.
2044 dmfc1(result, scratch);
2045 xor_(result, result, at);
2046 Slt(result, zero_reg, result);
2047 }
2048
2049 bind(&fail);
2050}
2051
2052
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002053void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2054 FPURegister ft, FPURegister scratch) {
2055 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
2056 madd_d(fd, fr, fs, ft);
2057 } else {
2058 // Can not change source regs's value.
2059 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2060 mul_d(scratch, fs, ft);
2061 add_d(fd, fr, scratch);
2062 }
2063}
2064
2065
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002066void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
2067 Label* nan, Condition cond, FPURegister cmp1,
2068 FPURegister cmp2, BranchDelaySlot bd) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002069 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002070 if (cond == al) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002071 Branch(bd, target);
2072 return;
2073 }
2074
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002075 if (kArchVariant == kMips64r6) {
2076 sizeField = sizeField == D ? L : W;
2077 }
2078
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002079 DCHECK(nan || target);
2080 // Check for unordered (NaN) cases.
2081 if (nan) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002082 bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002083 if (kArchVariant != kMips64r6) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002084 if (long_branch) {
2085 Label skip;
2086 c(UN, sizeField, cmp1, cmp2);
2087 bc1f(&skip);
2088 nop();
2089 BranchLong(nan, bd);
2090 bind(&skip);
2091 } else {
2092 c(UN, sizeField, cmp1, cmp2);
2093 bc1t(nan);
2094 if (bd == PROTECT) {
2095 nop();
2096 }
2097 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002098 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002099 // Use kDoubleCompareReg for comparison result. It has to be unavailable
2100 // to lithium
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002101 // register allocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002102 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2103 if (long_branch) {
2104 Label skip;
2105 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2106 bc1eqz(&skip, kDoubleCompareReg);
2107 nop();
2108 BranchLong(nan, bd);
2109 bind(&skip);
2110 } else {
2111 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2112 bc1nez(nan, kDoubleCompareReg);
2113 if (bd == PROTECT) {
2114 nop();
2115 }
2116 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002117 }
2118 }
2119
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002120 if (target) {
2121 bool long_branch =
2122 target->is_bound() ? is_near(target) : is_trampoline_emitted();
2123 if (long_branch) {
2124 Label skip;
2125 Condition neg_cond = NegateFpuCondition(cond);
2126 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
2127 BranchLong(target, bd);
2128 bind(&skip);
2129 } else {
2130 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
2131 }
2132 }
2133}
2134
2135
2136void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
2137 Condition cc, FPURegister cmp1,
2138 FPURegister cmp2, BranchDelaySlot bd) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002139 if (kArchVariant != kMips64r6) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002140 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002141 if (target) {
2142 // Here NaN cases were either handled by this function or are assumed to
2143 // have been handled by the caller.
2144 switch (cc) {
2145 case lt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002146 c(OLT, sizeField, cmp1, cmp2);
2147 bc1t(target);
2148 break;
2149 case ult:
2150 c(ULT, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002151 bc1t(target);
2152 break;
2153 case gt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002154 c(ULE, sizeField, cmp1, cmp2);
2155 bc1f(target);
2156 break;
2157 case ugt:
2158 c(OLE, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002159 bc1f(target);
2160 break;
2161 case ge:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002162 c(ULT, sizeField, cmp1, cmp2);
2163 bc1f(target);
2164 break;
2165 case uge:
2166 c(OLT, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002167 bc1f(target);
2168 break;
2169 case le:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002170 c(OLE, sizeField, cmp1, cmp2);
2171 bc1t(target);
2172 break;
2173 case ule:
2174 c(ULE, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002175 bc1t(target);
2176 break;
2177 case eq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002178 c(EQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002179 bc1t(target);
2180 break;
2181 case ueq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002182 c(UEQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002183 bc1t(target);
2184 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002185 case ne: // Unordered or not equal.
2186 c(EQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002187 bc1f(target);
2188 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002189 case ogl:
2190 c(UEQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002191 bc1f(target);
2192 break;
2193 default:
2194 CHECK(0);
2195 }
2196 }
2197 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002198 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002199 if (target) {
2200 // Here NaN cases were either handled by this function or are assumed to
2201 // have been handled by the caller.
2202 // Unsigned conditions are treated as their signed counterpart.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002203 // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
2204 // 1) mode.
2205 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002206 switch (cc) {
2207 case lt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002208 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2209 bc1nez(target, kDoubleCompareReg);
2210 break;
2211 case ult:
2212 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2213 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002214 break;
2215 case gt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002216 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2217 bc1eqz(target, kDoubleCompareReg);
2218 break;
2219 case ugt:
2220 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2221 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002222 break;
2223 case ge:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002224 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2225 bc1eqz(target, kDoubleCompareReg);
2226 break;
2227 case uge:
2228 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2229 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002230 break;
2231 case le:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002232 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2233 bc1nez(target, kDoubleCompareReg);
2234 break;
2235 case ule:
2236 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2237 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002238 break;
2239 case eq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002240 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2241 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002242 break;
2243 case ueq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002244 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2245 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002246 break;
2247 case ne:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002248 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2249 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002250 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002251 case ogl:
2252 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2253 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002254 break;
2255 default:
2256 CHECK(0);
2257 }
2258 }
2259 }
2260
2261 if (bd == PROTECT) {
2262 nop();
2263 }
2264}
2265
2266
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002267void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2268 DCHECK(!src_low.is(at));
2269 mfhc1(at, dst);
2270 mtc1(src_low, dst);
2271 mthc1(at, dst);
2272}
2273
2274
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002275void MacroAssembler::Move(FPURegister dst, float imm) {
2276 li(at, Operand(bit_cast<int32_t>(imm)));
2277 mtc1(at, dst);
2278}
2279
2280
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002281void MacroAssembler::Move(FPURegister dst, double imm) {
2282 static const DoubleRepresentation minus_zero(-0.0);
2283 static const DoubleRepresentation zero(0.0);
2284 DoubleRepresentation value_rep(imm);
2285 // Handle special values first.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002286 if (value_rep == zero && has_double_zero_reg_set_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002287 mov_d(dst, kDoubleRegZero);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002288 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002289 neg_d(dst, kDoubleRegZero);
2290 } else {
2291 uint32_t lo, hi;
2292 DoubleAsTwoUInt32(imm, &lo, &hi);
2293 // Move the low part of the double into the lower bits of the corresponding
2294 // FPU register.
2295 if (lo != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002296 if (!(lo & kImm16Mask)) {
2297 lui(at, (lo >> kLuiShift) & kImm16Mask);
2298 mtc1(at, dst);
2299 } else if (!(lo & kHiMask)) {
2300 ori(at, zero_reg, lo & kImm16Mask);
2301 mtc1(at, dst);
2302 } else {
2303 lui(at, (lo >> kLuiShift) & kImm16Mask);
2304 ori(at, at, lo & kImm16Mask);
2305 mtc1(at, dst);
2306 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002307 } else {
2308 mtc1(zero_reg, dst);
2309 }
2310 // Move the high part of the double into the high bits of the corresponding
2311 // FPU register.
2312 if (hi != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002313 if (!(hi & kImm16Mask)) {
2314 lui(at, (hi >> kLuiShift) & kImm16Mask);
2315 mthc1(at, dst);
2316 } else if (!(hi & kHiMask)) {
2317 ori(at, zero_reg, hi & kImm16Mask);
2318 mthc1(at, dst);
2319 } else {
2320 lui(at, (hi >> kLuiShift) & kImm16Mask);
2321 ori(at, at, hi & kImm16Mask);
2322 mthc1(at, dst);
2323 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002324 } else {
2325 mthc1(zero_reg, dst);
2326 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002327 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002328 }
2329}
2330
2331
2332void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2333 if (kArchVariant == kMips64r6) {
2334 Label done;
2335 Branch(&done, ne, rt, Operand(zero_reg));
2336 mov(rd, rs);
2337 bind(&done);
2338 } else {
2339 movz(rd, rs, rt);
2340 }
2341}
2342
2343
2344void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2345 if (kArchVariant == kMips64r6) {
2346 Label done;
2347 Branch(&done, eq, rt, Operand(zero_reg));
2348 mov(rd, rs);
2349 bind(&done);
2350 } else {
2351 movn(rd, rs, rt);
2352 }
2353}
2354
2355
2356void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2357 movt(rd, rs, cc);
2358}
2359
2360
2361void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2362 movf(rd, rs, cc);
2363}
2364
Ben Murdochda12d292016-06-02 14:46:10 +01002365#define __ masm->
2366
2367static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2368 FPURegister src1, FPURegister src2, Label* equal) {
2369 if (src1.is(src2)) {
2370 __ Move(dst, src1);
2371 return true;
2372 }
2373
2374 Label other, compare_not_equal;
2375 FPURegister left, right;
2376 if (kind == MaxMinKind::kMin) {
2377 left = src1;
2378 right = src2;
2379 } else {
2380 left = src2;
2381 right = src1;
2382 }
2383
2384 __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
2385 // Left and right hand side are equal, check for -0 vs. +0.
2386 __ dmfc1(t8, src1);
2387 __ Branch(&other, eq, t8, Operand(0x8000000000000000));
2388 __ Move_d(dst, right);
2389 __ Branch(equal);
2390 __ bind(&other);
2391 __ Move_d(dst, left);
2392 __ Branch(equal);
2393 __ bind(&compare_not_equal);
2394 return false;
2395}
2396
2397static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2398 FPURegister src1, FPURegister src2, Label* equal) {
2399 if (src1.is(src2)) {
2400 __ Move(dst, src1);
2401 return true;
2402 }
2403
2404 Label other, compare_not_equal;
2405 FPURegister left, right;
2406 if (kind == MaxMinKind::kMin) {
2407 left = src1;
2408 right = src2;
2409 } else {
2410 left = src2;
2411 right = src1;
2412 }
2413
2414 __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
2415 // Left and right hand side are equal, check for -0 vs. +0.
2416 __ FmoveLow(t8, src1);
2417 __ dsll32(t8, t8, 0);
2418 __ Branch(&other, eq, t8, Operand(0x8000000000000000));
2419 __ Move_s(dst, right);
2420 __ Branch(equal);
2421 __ bind(&other);
2422 __ Move_s(dst, left);
2423 __ Branch(equal);
2424 __ bind(&compare_not_equal);
2425 return false;
2426}
2427
2428#undef __
2429
2430void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
2431 FPURegister src2, Label* nan) {
2432 if (nan) {
2433 BranchF64(nullptr, nan, eq, src1, src2);
2434 }
2435 if (kArchVariant >= kMips64r6) {
2436 min_d(dst, src1, src2);
2437 } else {
2438 Label skip;
2439 if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2440 if (dst.is(src1)) {
2441 BranchF64(&skip, nullptr, le, src1, src2);
2442 Move_d(dst, src2);
2443 } else if (dst.is(src2)) {
2444 BranchF64(&skip, nullptr, ge, src1, src2);
2445 Move_d(dst, src1);
2446 } else {
2447 Label right;
2448 BranchF64(&right, nullptr, gt, src1, src2);
2449 Move_d(dst, src1);
2450 Branch(&skip);
2451 bind(&right);
2452 Move_d(dst, src2);
2453 }
2454 }
2455 bind(&skip);
2456 }
2457}
2458
2459void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
2460 FPURegister src2, Label* nan) {
2461 if (nan) {
2462 BranchF64(nullptr, nan, eq, src1, src2);
2463 }
2464 if (kArchVariant >= kMips64r6) {
2465 max_d(dst, src1, src2);
2466 } else {
2467 Label skip;
2468 if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2469 if (dst.is(src1)) {
2470 BranchF64(&skip, nullptr, ge, src1, src2);
2471 Move_d(dst, src2);
2472 } else if (dst.is(src2)) {
2473 BranchF64(&skip, nullptr, le, src1, src2);
2474 Move_d(dst, src1);
2475 } else {
2476 Label right;
2477 BranchF64(&right, nullptr, lt, src1, src2);
2478 Move_d(dst, src1);
2479 Branch(&skip);
2480 bind(&right);
2481 Move_d(dst, src2);
2482 }
2483 }
2484 bind(&skip);
2485 }
2486}
2487
2488void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
2489 FPURegister src2, Label* nan) {
2490 if (nan) {
2491 BranchF32(nullptr, nan, eq, src1, src2);
2492 }
2493 if (kArchVariant >= kMips64r6) {
2494 min_s(dst, src1, src2);
2495 } else {
2496 Label skip;
2497 if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2498 if (dst.is(src1)) {
2499 BranchF32(&skip, nullptr, le, src1, src2);
2500 Move_s(dst, src2);
2501 } else if (dst.is(src2)) {
2502 BranchF32(&skip, nullptr, ge, src1, src2);
2503 Move_s(dst, src1);
2504 } else {
2505 Label right;
2506 BranchF32(&right, nullptr, gt, src1, src2);
2507 Move_s(dst, src1);
2508 Branch(&skip);
2509 bind(&right);
2510 Move_s(dst, src2);
2511 }
2512 }
2513 bind(&skip);
2514 }
2515}
2516
2517void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
2518 FPURegister src2, Label* nan) {
2519 if (nan) {
2520 BranchF32(nullptr, nan, eq, src1, src2);
2521 }
2522 if (kArchVariant >= kMips64r6) {
2523 max_s(dst, src1, src2);
2524 } else {
2525 Label skip;
2526 if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2527 if (dst.is(src1)) {
2528 BranchF32(&skip, nullptr, ge, src1, src2);
2529 Move_s(dst, src2);
2530 } else if (dst.is(src2)) {
2531 BranchF32(&skip, nullptr, le, src1, src2);
2532 Move_s(dst, src1);
2533 } else {
2534 Label right;
2535 BranchF32(&right, nullptr, lt, src1, src2);
2536 Move_s(dst, src1);
2537 Branch(&skip);
2538 bind(&right);
2539 Move_s(dst, src2);
2540 }
2541 }
2542 bind(&skip);
2543 }
2544}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002545
2546void MacroAssembler::Clz(Register rd, Register rs) {
2547 clz(rd, rs);
2548}
2549
2550
2551void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2552 Register result,
2553 DoubleRegister double_input,
2554 Register scratch,
2555 DoubleRegister double_scratch,
2556 Register except_flag,
2557 CheckForInexactConversion check_inexact) {
2558 DCHECK(!result.is(scratch));
2559 DCHECK(!double_input.is(double_scratch));
2560 DCHECK(!except_flag.is(scratch));
2561
2562 Label done;
2563
2564 // Clear the except flag (0 = no exception)
2565 mov(except_flag, zero_reg);
2566
2567 // Test for values that can be exactly represented as a signed 32-bit integer.
2568 cvt_w_d(double_scratch, double_input);
2569 mfc1(result, double_scratch);
2570 cvt_d_w(double_scratch, double_scratch);
2571 BranchF(&done, NULL, eq, double_input, double_scratch);
2572
2573 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
2574
2575 if (check_inexact == kDontCheckForInexactConversion) {
2576 // Ignore inexact exceptions.
2577 except_mask &= ~kFCSRInexactFlagMask;
2578 }
2579
2580 // Save FCSR.
2581 cfc1(scratch, FCSR);
2582 // Disable FPU exceptions.
2583 ctc1(zero_reg, FCSR);
2584
2585 // Do operation based on rounding mode.
2586 switch (rounding_mode) {
2587 case kRoundToNearest:
2588 Round_w_d(double_scratch, double_input);
2589 break;
2590 case kRoundToZero:
2591 Trunc_w_d(double_scratch, double_input);
2592 break;
2593 case kRoundToPlusInf:
2594 Ceil_w_d(double_scratch, double_input);
2595 break;
2596 case kRoundToMinusInf:
2597 Floor_w_d(double_scratch, double_input);
2598 break;
2599 } // End of switch-statement.
2600
2601 // Retrieve FCSR.
2602 cfc1(except_flag, FCSR);
2603 // Restore FCSR.
2604 ctc1(scratch, FCSR);
2605 // Move the converted value into the result register.
2606 mfc1(result, double_scratch);
2607
2608 // Check for fpu exceptions.
2609 And(except_flag, except_flag, Operand(except_mask));
2610
2611 bind(&done);
2612}
2613
2614
2615void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2616 DoubleRegister double_input,
2617 Label* done) {
2618 DoubleRegister single_scratch = kLithiumScratchDouble.low();
2619 Register scratch = at;
2620 Register scratch2 = t9;
2621
2622 // Clear cumulative exception flags and save the FCSR.
2623 cfc1(scratch2, FCSR);
2624 ctc1(zero_reg, FCSR);
2625 // Try a conversion to a signed integer.
2626 trunc_w_d(single_scratch, double_input);
2627 mfc1(result, single_scratch);
2628 // Retrieve and restore the FCSR.
2629 cfc1(scratch, FCSR);
2630 ctc1(scratch2, FCSR);
2631 // Check for overflow and NaNs.
2632 And(scratch,
2633 scratch,
2634 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2635 // If we had no exceptions we are done.
2636 Branch(done, eq, scratch, Operand(zero_reg));
2637}
2638
2639
2640void MacroAssembler::TruncateDoubleToI(Register result,
2641 DoubleRegister double_input) {
2642 Label done;
2643
2644 TryInlineTruncateDoubleToI(result, double_input, &done);
2645
2646 // If we fell through then inline version didn't succeed - call stub instead.
2647 push(ra);
2648 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2649 sdc1(double_input, MemOperand(sp, 0));
2650
2651 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2652 CallStub(&stub);
2653
2654 Daddu(sp, sp, Operand(kDoubleSize));
2655 pop(ra);
2656
2657 bind(&done);
2658}
2659
2660
2661void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2662 Label done;
2663 DoubleRegister double_scratch = f12;
2664 DCHECK(!result.is(object));
2665
2666 ldc1(double_scratch,
2667 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2668 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2669
2670 // If we fell through then inline version didn't succeed - call stub instead.
2671 push(ra);
2672 DoubleToIStub stub(isolate(),
2673 object,
2674 result,
2675 HeapNumber::kValueOffset - kHeapObjectTag,
2676 true,
2677 true);
2678 CallStub(&stub);
2679 pop(ra);
2680
2681 bind(&done);
2682}
2683
2684
2685void MacroAssembler::TruncateNumberToI(Register object,
2686 Register result,
2687 Register heap_number_map,
2688 Register scratch,
2689 Label* not_number) {
2690 Label done;
2691 DCHECK(!result.is(object));
2692
2693 UntagAndJumpIfSmi(result, object, &done);
2694 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2695 TruncateHeapNumberToI(result, object);
2696
2697 bind(&done);
2698}
2699
2700
2701void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2702 Register src,
2703 int num_least_bits) {
2704 // Ext(dst, src, kSmiTagSize, num_least_bits);
2705 SmiUntag(dst, src);
2706 And(dst, dst, Operand((1 << num_least_bits) - 1));
2707}
2708
2709
2710void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2711 Register src,
2712 int num_least_bits) {
2713 DCHECK(!src.is(dst));
2714 And(dst, src, Operand((1 << num_least_bits) - 1));
2715}
2716
2717
2718// Emulated condtional branches do not emit a nop in the branch delay slot.
2719//
2720// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2721#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
2722 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
2723 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2724
2725
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002726void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2727 DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002728 BranchShort(offset, bdslot);
2729}
2730
2731
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002732void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2733 const Operand& rt, BranchDelaySlot bdslot) {
2734 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2735 DCHECK(is_near);
2736 USE(is_near);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002737}
2738
2739
2740void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2741 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002742 if (is_near_branch(L)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002743 BranchShort(L, bdslot);
2744 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002745 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002746 }
2747 } else {
2748 if (is_trampoline_emitted()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002749 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002750 } else {
2751 BranchShort(L, bdslot);
2752 }
2753 }
2754}
2755
2756
2757void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2758 const Operand& rt,
2759 BranchDelaySlot bdslot) {
2760 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002761 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002762 if (cond != cc_always) {
2763 Label skip;
2764 Condition neg_cond = NegateCondition(cond);
2765 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002766 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002767 bind(&skip);
2768 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002769 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002770 }
2771 }
2772 } else {
2773 if (is_trampoline_emitted()) {
2774 if (cond != cc_always) {
2775 Label skip;
2776 Condition neg_cond = NegateCondition(cond);
2777 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002778 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002779 bind(&skip);
2780 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002781 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002782 }
2783 } else {
2784 BranchShort(L, cond, rs, rt, bdslot);
2785 }
2786 }
2787}
2788
2789
2790void MacroAssembler::Branch(Label* L,
2791 Condition cond,
2792 Register rs,
2793 Heap::RootListIndex index,
2794 BranchDelaySlot bdslot) {
2795 LoadRoot(at, index);
2796 Branch(L, cond, rs, Operand(at), bdslot);
2797}
2798
2799
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002800void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2801 BranchDelaySlot bdslot) {
2802 DCHECK(L == nullptr || offset == 0);
2803 offset = GetOffset(offset, L, OffsetSize::kOffset16);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002804 b(offset);
2805
2806 // Emit a nop in the branch delay slot if required.
2807 if (bdslot == PROTECT)
2808 nop();
2809}
2810
2811
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002812void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2813 DCHECK(L == nullptr || offset == 0);
2814 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2815 bc(offset);
2816}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002817
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002818
2819void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2820 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2821 DCHECK(is_int26(offset));
2822 BranchShortHelperR6(offset, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002823 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002824 DCHECK(is_int16(offset));
2825 BranchShortHelper(offset, nullptr, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002826 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002827}
2828
2829
2830void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002831 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2832 BranchShortHelperR6(0, L);
2833 } else {
2834 BranchShortHelper(0, L, bdslot);
2835 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002836}
2837
2838
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002839static inline bool IsZero(const Operand& rt) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002840 if (rt.is_reg()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002841 return rt.rm().is(zero_reg);
2842 } else {
2843 return rt.immediate() == 0;
2844 }
2845}
2846
2847
2848int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2849 if (L) {
2850 offset = branch_offset_helper(L, bits) >> 2;
2851 } else {
2852 DCHECK(is_intn(offset, bits));
2853 }
2854 return offset;
2855}
2856
2857
2858Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
2859 Register scratch) {
2860 Register r2 = no_reg;
2861 if (rt.is_reg()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002862 r2 = rt.rm_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002863 } else {
2864 r2 = scratch;
2865 li(r2, rt);
2866 }
2867
2868 return r2;
2869}
2870
2871
2872bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2873 Condition cond, Register rs,
2874 const Operand& rt) {
2875 DCHECK(L == nullptr || offset == 0);
2876 Register scratch = rs.is(at) ? t8 : at;
2877 OffsetSize bits = OffsetSize::kOffset16;
2878
2879 // Be careful to always use shifted_branch_offset only just before the
2880 // branch instruction, as the location will be remember for patching the
2881 // target.
2882 {
2883 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002884 switch (cond) {
2885 case cc_always:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002886 bits = OffsetSize::kOffset26;
2887 if (!is_near(L, bits)) return false;
2888 offset = GetOffset(offset, L, bits);
2889 bc(offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002890 break;
2891 case eq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002892 if (rs.code() == rt.rm_.reg_code) {
2893 // Pre R6 beq is used here to make the code patchable. Otherwise bc
2894 // should be used which has no condition field so is not patchable.
2895 bits = OffsetSize::kOffset16;
2896 if (!is_near(L, bits)) return false;
2897 scratch = GetRtAsRegisterHelper(rt, scratch);
2898 offset = GetOffset(offset, L, bits);
2899 beq(rs, scratch, offset);
2900 nop();
2901 } else if (IsZero(rt)) {
2902 bits = OffsetSize::kOffset21;
2903 if (!is_near(L, bits)) return false;
2904 offset = GetOffset(offset, L, bits);
2905 beqzc(rs, offset);
2906 } else {
2907 // We don't want any other register but scratch clobbered.
2908 bits = OffsetSize::kOffset16;
2909 if (!is_near(L, bits)) return false;
2910 scratch = GetRtAsRegisterHelper(rt, scratch);
2911 offset = GetOffset(offset, L, bits);
2912 beqc(rs, scratch, offset);
2913 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002914 break;
2915 case ne:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002916 if (rs.code() == rt.rm_.reg_code) {
2917 // Pre R6 bne is used here to make the code patchable. Otherwise we
2918 // should not generate any instruction.
2919 bits = OffsetSize::kOffset16;
2920 if (!is_near(L, bits)) return false;
2921 scratch = GetRtAsRegisterHelper(rt, scratch);
2922 offset = GetOffset(offset, L, bits);
2923 bne(rs, scratch, offset);
2924 nop();
2925 } else if (IsZero(rt)) {
2926 bits = OffsetSize::kOffset21;
2927 if (!is_near(L, bits)) return false;
2928 offset = GetOffset(offset, L, bits);
2929 bnezc(rs, offset);
2930 } else {
2931 // We don't want any other register but scratch clobbered.
2932 bits = OffsetSize::kOffset16;
2933 if (!is_near(L, bits)) return false;
2934 scratch = GetRtAsRegisterHelper(rt, scratch);
2935 offset = GetOffset(offset, L, bits);
2936 bnec(rs, scratch, offset);
2937 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002938 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002939
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002940 // Signed comparison.
2941 case greater:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002942 // rs > rt
2943 if (rs.code() == rt.rm_.reg_code) {
2944 break; // No code needs to be emitted.
2945 } else if (rs.is(zero_reg)) {
2946 bits = OffsetSize::kOffset16;
2947 if (!is_near(L, bits)) return false;
2948 scratch = GetRtAsRegisterHelper(rt, scratch);
2949 offset = GetOffset(offset, L, bits);
2950 bltzc(scratch, offset);
2951 } else if (IsZero(rt)) {
2952 bits = OffsetSize::kOffset16;
2953 if (!is_near(L, bits)) return false;
2954 offset = GetOffset(offset, L, bits);
2955 bgtzc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002956 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002957 bits = OffsetSize::kOffset16;
2958 if (!is_near(L, bits)) return false;
2959 scratch = GetRtAsRegisterHelper(rt, scratch);
2960 DCHECK(!rs.is(scratch));
2961 offset = GetOffset(offset, L, bits);
2962 bltc(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002963 }
2964 break;
2965 case greater_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002966 // rs >= rt
2967 if (rs.code() == rt.rm_.reg_code) {
2968 bits = OffsetSize::kOffset26;
2969 if (!is_near(L, bits)) return false;
2970 offset = GetOffset(offset, L, bits);
2971 bc(offset);
2972 } else if (rs.is(zero_reg)) {
2973 bits = OffsetSize::kOffset16;
2974 if (!is_near(L, bits)) return false;
2975 scratch = GetRtAsRegisterHelper(rt, scratch);
2976 offset = GetOffset(offset, L, bits);
2977 blezc(scratch, offset);
2978 } else if (IsZero(rt)) {
2979 bits = OffsetSize::kOffset16;
2980 if (!is_near(L, bits)) return false;
2981 offset = GetOffset(offset, L, bits);
2982 bgezc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002983 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002984 bits = OffsetSize::kOffset16;
2985 if (!is_near(L, bits)) return false;
2986 scratch = GetRtAsRegisterHelper(rt, scratch);
2987 DCHECK(!rs.is(scratch));
2988 offset = GetOffset(offset, L, bits);
2989 bgec(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002990 }
2991 break;
2992 case less:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002993 // rs < rt
2994 if (rs.code() == rt.rm_.reg_code) {
2995 break; // No code needs to be emitted.
2996 } else if (rs.is(zero_reg)) {
2997 bits = OffsetSize::kOffset16;
2998 if (!is_near(L, bits)) return false;
2999 scratch = GetRtAsRegisterHelper(rt, scratch);
3000 offset = GetOffset(offset, L, bits);
3001 bgtzc(scratch, offset);
3002 } else if (IsZero(rt)) {
3003 bits = OffsetSize::kOffset16;
3004 if (!is_near(L, bits)) return false;
3005 offset = GetOffset(offset, L, bits);
3006 bltzc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003007 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003008 bits = OffsetSize::kOffset16;
3009 if (!is_near(L, bits)) return false;
3010 scratch = GetRtAsRegisterHelper(rt, scratch);
3011 DCHECK(!rs.is(scratch));
3012 offset = GetOffset(offset, L, bits);
3013 bltc(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003014 }
3015 break;
3016 case less_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003017 // rs <= rt
3018 if (rs.code() == rt.rm_.reg_code) {
3019 bits = OffsetSize::kOffset26;
3020 if (!is_near(L, bits)) return false;
3021 offset = GetOffset(offset, L, bits);
3022 bc(offset);
3023 } else if (rs.is(zero_reg)) {
3024 bits = OffsetSize::kOffset16;
3025 if (!is_near(L, bits)) return false;
3026 scratch = GetRtAsRegisterHelper(rt, scratch);
3027 offset = GetOffset(offset, L, bits);
3028 bgezc(scratch, offset);
3029 } else if (IsZero(rt)) {
3030 bits = OffsetSize::kOffset16;
3031 if (!is_near(L, bits)) return false;
3032 offset = GetOffset(offset, L, bits);
3033 blezc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003034 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003035 bits = OffsetSize::kOffset16;
3036 if (!is_near(L, bits)) return false;
3037 scratch = GetRtAsRegisterHelper(rt, scratch);
3038 DCHECK(!rs.is(scratch));
3039 offset = GetOffset(offset, L, bits);
3040 bgec(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003041 }
3042 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003043
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003044 // Unsigned comparison.
3045 case Ugreater:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003046 // rs > rt
3047 if (rs.code() == rt.rm_.reg_code) {
3048 break; // No code needs to be emitted.
3049 } else if (rs.is(zero_reg)) {
3050 bits = OffsetSize::kOffset21;
3051 if (!is_near(L, bits)) return false;
3052 scratch = GetRtAsRegisterHelper(rt, scratch);
3053 offset = GetOffset(offset, L, bits);
3054 bnezc(scratch, offset);
3055 } else if (IsZero(rt)) {
3056 bits = OffsetSize::kOffset21;
3057 if (!is_near(L, bits)) return false;
3058 offset = GetOffset(offset, L, bits);
3059 bnezc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003060 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003061 bits = OffsetSize::kOffset16;
3062 if (!is_near(L, bits)) return false;
3063 scratch = GetRtAsRegisterHelper(rt, scratch);
3064 DCHECK(!rs.is(scratch));
3065 offset = GetOffset(offset, L, bits);
3066 bltuc(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003067 }
3068 break;
3069 case Ugreater_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003070 // rs >= rt
3071 if (rs.code() == rt.rm_.reg_code) {
3072 bits = OffsetSize::kOffset26;
3073 if (!is_near(L, bits)) return false;
3074 offset = GetOffset(offset, L, bits);
3075 bc(offset);
3076 } else if (rs.is(zero_reg)) {
3077 bits = OffsetSize::kOffset21;
3078 if (!is_near(L, bits)) return false;
3079 scratch = GetRtAsRegisterHelper(rt, scratch);
3080 offset = GetOffset(offset, L, bits);
3081 beqzc(scratch, offset);
3082 } else if (IsZero(rt)) {
3083 bits = OffsetSize::kOffset26;
3084 if (!is_near(L, bits)) return false;
3085 offset = GetOffset(offset, L, bits);
3086 bc(offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003087 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003088 bits = OffsetSize::kOffset16;
3089 if (!is_near(L, bits)) return false;
3090 scratch = GetRtAsRegisterHelper(rt, scratch);
3091 DCHECK(!rs.is(scratch));
3092 offset = GetOffset(offset, L, bits);
3093 bgeuc(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003094 }
3095 break;
3096 case Uless:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003097 // rs < rt
3098 if (rs.code() == rt.rm_.reg_code) {
3099 break; // No code needs to be emitted.
3100 } else if (rs.is(zero_reg)) {
3101 bits = OffsetSize::kOffset21;
3102 if (!is_near(L, bits)) return false;
3103 scratch = GetRtAsRegisterHelper(rt, scratch);
3104 offset = GetOffset(offset, L, bits);
3105 bnezc(scratch, offset);
3106 } else if (IsZero(rt)) {
3107 break; // No code needs to be emitted.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003108 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003109 bits = OffsetSize::kOffset16;
3110 if (!is_near(L, bits)) return false;
3111 scratch = GetRtAsRegisterHelper(rt, scratch);
3112 DCHECK(!rs.is(scratch));
3113 offset = GetOffset(offset, L, bits);
3114 bltuc(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003115 }
3116 break;
3117 case Uless_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003118 // rs <= rt
3119 if (rs.code() == rt.rm_.reg_code) {
3120 bits = OffsetSize::kOffset26;
3121 if (!is_near(L, bits)) return false;
3122 offset = GetOffset(offset, L, bits);
3123 bc(offset);
3124 } else if (rs.is(zero_reg)) {
3125 bits = OffsetSize::kOffset26;
3126 if (!is_near(L, bits)) return false;
3127 scratch = GetRtAsRegisterHelper(rt, scratch);
3128 offset = GetOffset(offset, L, bits);
3129 bc(offset);
3130 } else if (IsZero(rt)) {
3131 bits = OffsetSize::kOffset21;
3132 if (!is_near(L, bits)) return false;
3133 offset = GetOffset(offset, L, bits);
3134 beqzc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003135 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003136 bits = OffsetSize::kOffset16;
3137 if (!is_near(L, bits)) return false;
3138 scratch = GetRtAsRegisterHelper(rt, scratch);
3139 DCHECK(!rs.is(scratch));
3140 offset = GetOffset(offset, L, bits);
3141 bgeuc(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003142 }
3143 break;
3144 default:
3145 UNREACHABLE();
3146 }
3147 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003148 CheckTrampolinePoolQuick(1);
3149 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003150}
3151
3152
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003153bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3154 Register rs, const Operand& rt,
3155 BranchDelaySlot bdslot) {
3156 DCHECK(L == nullptr || offset == 0);
3157 if (!is_near(L, OffsetSize::kOffset16)) return false;
3158
3159 Register scratch = at;
3160 int32_t offset32;
3161
3162 // Be careful to always use shifted_branch_offset only just before the
3163 // branch instruction, as the location will be remember for patching the
3164 // target.
3165 {
3166 BlockTrampolinePoolScope block_trampoline_pool(this);
3167 switch (cond) {
3168 case cc_always:
3169 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3170 b(offset32);
3171 break;
3172 case eq:
3173 if (IsZero(rt)) {
3174 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3175 beq(rs, zero_reg, offset32);
3176 } else {
3177 // We don't want any other register but scratch clobbered.
3178 scratch = GetRtAsRegisterHelper(rt, scratch);
3179 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3180 beq(rs, scratch, offset32);
3181 }
3182 break;
3183 case ne:
3184 if (IsZero(rt)) {
3185 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3186 bne(rs, zero_reg, offset32);
3187 } else {
3188 // We don't want any other register but scratch clobbered.
3189 scratch = GetRtAsRegisterHelper(rt, scratch);
3190 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3191 bne(rs, scratch, offset32);
3192 }
3193 break;
3194
3195 // Signed comparison.
3196 case greater:
3197 if (IsZero(rt)) {
3198 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3199 bgtz(rs, offset32);
3200 } else {
3201 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3202 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3203 bne(scratch, zero_reg, offset32);
3204 }
3205 break;
3206 case greater_equal:
3207 if (IsZero(rt)) {
3208 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3209 bgez(rs, offset32);
3210 } else {
3211 Slt(scratch, rs, rt);
3212 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3213 beq(scratch, zero_reg, offset32);
3214 }
3215 break;
3216 case less:
3217 if (IsZero(rt)) {
3218 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3219 bltz(rs, offset32);
3220 } else {
3221 Slt(scratch, rs, rt);
3222 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3223 bne(scratch, zero_reg, offset32);
3224 }
3225 break;
3226 case less_equal:
3227 if (IsZero(rt)) {
3228 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3229 blez(rs, offset32);
3230 } else {
3231 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3232 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3233 beq(scratch, zero_reg, offset32);
3234 }
3235 break;
3236
3237 // Unsigned comparison.
3238 case Ugreater:
3239 if (IsZero(rt)) {
3240 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3241 bne(rs, zero_reg, offset32);
3242 } else {
3243 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3244 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3245 bne(scratch, zero_reg, offset32);
3246 }
3247 break;
3248 case Ugreater_equal:
3249 if (IsZero(rt)) {
3250 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3251 b(offset32);
3252 } else {
3253 Sltu(scratch, rs, rt);
3254 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3255 beq(scratch, zero_reg, offset32);
3256 }
3257 break;
3258 case Uless:
3259 if (IsZero(rt)) {
3260 return true; // No code needs to be emitted.
3261 } else {
3262 Sltu(scratch, rs, rt);
3263 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3264 bne(scratch, zero_reg, offset32);
3265 }
3266 break;
3267 case Uless_equal:
3268 if (IsZero(rt)) {
3269 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3270 beq(rs, zero_reg, offset32);
3271 } else {
3272 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3273 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3274 beq(scratch, zero_reg, offset32);
3275 }
3276 break;
3277 default:
3278 UNREACHABLE();
3279 }
3280 }
3281
3282 // Emit a nop in the branch delay slot if required.
3283 if (bdslot == PROTECT)
3284 nop();
3285
3286 return true;
3287}
3288
3289
3290bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3291 Register rs, const Operand& rt,
3292 BranchDelaySlot bdslot) {
3293 BRANCH_ARGS_CHECK(cond, rs, rt);
3294
3295 if (!L) {
3296 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3297 DCHECK(is_int26(offset));
3298 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3299 } else {
3300 DCHECK(is_int16(offset));
3301 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3302 }
3303 } else {
3304 DCHECK(offset == 0);
3305 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3306 return BranchShortHelperR6(0, L, cond, rs, rt);
3307 } else {
3308 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3309 }
3310 }
3311 return false;
3312}
3313
3314
3315void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3316 const Operand& rt, BranchDelaySlot bdslot) {
3317 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3318}
3319
3320
3321void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3322 const Operand& rt, BranchDelaySlot bdslot) {
3323 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3324}
3325
3326
3327void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003328 BranchAndLinkShort(offset, bdslot);
3329}
3330
3331
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003332void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3333 const Operand& rt, BranchDelaySlot bdslot) {
3334 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3335 DCHECK(is_near);
3336 USE(is_near);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003337}
3338
3339
3340void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3341 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003342 if (is_near_branch(L)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003343 BranchAndLinkShort(L, bdslot);
3344 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003345 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003346 }
3347 } else {
3348 if (is_trampoline_emitted()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003349 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003350 } else {
3351 BranchAndLinkShort(L, bdslot);
3352 }
3353 }
3354}
3355
3356
3357void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3358 const Operand& rt,
3359 BranchDelaySlot bdslot) {
3360 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003361 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003362 Label skip;
3363 Condition neg_cond = NegateCondition(cond);
3364 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003365 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003366 bind(&skip);
3367 }
3368 } else {
3369 if (is_trampoline_emitted()) {
3370 Label skip;
3371 Condition neg_cond = NegateCondition(cond);
3372 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003373 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003374 bind(&skip);
3375 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003376 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003377 }
3378 }
3379}
3380
3381
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003382void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3383 BranchDelaySlot bdslot) {
3384 DCHECK(L == nullptr || offset == 0);
3385 offset = GetOffset(offset, L, OffsetSize::kOffset16);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003386 bal(offset);
3387
3388 // Emit a nop in the branch delay slot if required.
3389 if (bdslot == PROTECT)
3390 nop();
3391}
3392
3393
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003394void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3395 DCHECK(L == nullptr || offset == 0);
3396 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3397 balc(offset);
3398}
3399
3400
3401void MacroAssembler::BranchAndLinkShort(int32_t offset,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003402 BranchDelaySlot bdslot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003403 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3404 DCHECK(is_int26(offset));
3405 BranchAndLinkShortHelperR6(offset, nullptr);
3406 } else {
3407 DCHECK(is_int16(offset));
3408 BranchAndLinkShortHelper(offset, nullptr, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003409 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003410}
3411
3412
3413void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003414 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3415 BranchAndLinkShortHelperR6(0, L);
3416 } else {
3417 BranchAndLinkShortHelper(0, L, bdslot);
3418 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003419}
3420
3421
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003422bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3423 Condition cond, Register rs,
3424 const Operand& rt) {
3425 DCHECK(L == nullptr || offset == 0);
3426 Register scratch = rs.is(at) ? t8 : at;
3427 OffsetSize bits = OffsetSize::kOffset16;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003428
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003429 BlockTrampolinePoolScope block_trampoline_pool(this);
3430 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3431 switch (cond) {
3432 case cc_always:
3433 bits = OffsetSize::kOffset26;
3434 if (!is_near(L, bits)) return false;
3435 offset = GetOffset(offset, L, bits);
3436 balc(offset);
3437 break;
3438 case eq:
3439 if (!is_near(L, bits)) return false;
3440 Subu(scratch, rs, rt);
3441 offset = GetOffset(offset, L, bits);
3442 beqzalc(scratch, offset);
3443 break;
3444 case ne:
3445 if (!is_near(L, bits)) return false;
3446 Subu(scratch, rs, rt);
3447 offset = GetOffset(offset, L, bits);
3448 bnezalc(scratch, offset);
3449 break;
3450
3451 // Signed comparison.
3452 case greater:
3453 // rs > rt
3454 if (rs.code() == rt.rm_.reg_code) {
3455 break; // No code needs to be emitted.
3456 } else if (rs.is(zero_reg)) {
3457 if (!is_near(L, bits)) return false;
3458 scratch = GetRtAsRegisterHelper(rt, scratch);
3459 offset = GetOffset(offset, L, bits);
3460 bltzalc(scratch, offset);
3461 } else if (IsZero(rt)) {
3462 if (!is_near(L, bits)) return false;
3463 offset = GetOffset(offset, L, bits);
3464 bgtzalc(rs, offset);
3465 } else {
3466 if (!is_near(L, bits)) return false;
3467 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3468 offset = GetOffset(offset, L, bits);
3469 bnezalc(scratch, offset);
3470 }
3471 break;
3472 case greater_equal:
3473 // rs >= rt
3474 if (rs.code() == rt.rm_.reg_code) {
3475 bits = OffsetSize::kOffset26;
3476 if (!is_near(L, bits)) return false;
3477 offset = GetOffset(offset, L, bits);
3478 balc(offset);
3479 } else if (rs.is(zero_reg)) {
3480 if (!is_near(L, bits)) return false;
3481 scratch = GetRtAsRegisterHelper(rt, scratch);
3482 offset = GetOffset(offset, L, bits);
3483 blezalc(scratch, offset);
3484 } else if (IsZero(rt)) {
3485 if (!is_near(L, bits)) return false;
3486 offset = GetOffset(offset, L, bits);
3487 bgezalc(rs, offset);
3488 } else {
3489 if (!is_near(L, bits)) return false;
3490 Slt(scratch, rs, rt);
3491 offset = GetOffset(offset, L, bits);
3492 beqzalc(scratch, offset);
3493 }
3494 break;
3495 case less:
3496 // rs < rt
3497 if (rs.code() == rt.rm_.reg_code) {
3498 break; // No code needs to be emitted.
3499 } else if (rs.is(zero_reg)) {
3500 if (!is_near(L, bits)) return false;
3501 scratch = GetRtAsRegisterHelper(rt, scratch);
3502 offset = GetOffset(offset, L, bits);
3503 bgtzalc(scratch, offset);
3504 } else if (IsZero(rt)) {
3505 if (!is_near(L, bits)) return false;
3506 offset = GetOffset(offset, L, bits);
3507 bltzalc(rs, offset);
3508 } else {
3509 if (!is_near(L, bits)) return false;
3510 Slt(scratch, rs, rt);
3511 offset = GetOffset(offset, L, bits);
3512 bnezalc(scratch, offset);
3513 }
3514 break;
3515 case less_equal:
3516 // rs <= r2
3517 if (rs.code() == rt.rm_.reg_code) {
3518 bits = OffsetSize::kOffset26;
3519 if (!is_near(L, bits)) return false;
3520 offset = GetOffset(offset, L, bits);
3521 balc(offset);
3522 } else if (rs.is(zero_reg)) {
3523 if (!is_near(L, bits)) return false;
3524 scratch = GetRtAsRegisterHelper(rt, scratch);
3525 offset = GetOffset(offset, L, bits);
3526 bgezalc(scratch, offset);
3527 } else if (IsZero(rt)) {
3528 if (!is_near(L, bits)) return false;
3529 offset = GetOffset(offset, L, bits);
3530 blezalc(rs, offset);
3531 } else {
3532 if (!is_near(L, bits)) return false;
3533 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3534 offset = GetOffset(offset, L, bits);
3535 beqzalc(scratch, offset);
3536 }
3537 break;
3538
3539
3540 // Unsigned comparison.
3541 case Ugreater:
3542 // rs > r2
3543 if (!is_near(L, bits)) return false;
3544 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3545 offset = GetOffset(offset, L, bits);
3546 bnezalc(scratch, offset);
3547 break;
3548 case Ugreater_equal:
3549 // rs >= r2
3550 if (!is_near(L, bits)) return false;
3551 Sltu(scratch, rs, rt);
3552 offset = GetOffset(offset, L, bits);
3553 beqzalc(scratch, offset);
3554 break;
3555 case Uless:
3556 // rs < r2
3557 if (!is_near(L, bits)) return false;
3558 Sltu(scratch, rs, rt);
3559 offset = GetOffset(offset, L, bits);
3560 bnezalc(scratch, offset);
3561 break;
3562 case Uless_equal:
3563 // rs <= r2
3564 if (!is_near(L, bits)) return false;
3565 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3566 offset = GetOffset(offset, L, bits);
3567 beqzalc(scratch, offset);
3568 break;
3569 default:
3570 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003571 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003572 return true;
3573}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003574
3575
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003576// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3577// with the slt instructions. We could use sub or add instead but we would miss
3578// overflow cases, so we keep slt and add an intermediate third instruction.
3579bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3580 Condition cond, Register rs,
3581 const Operand& rt,
3582 BranchDelaySlot bdslot) {
3583 DCHECK(L == nullptr || offset == 0);
3584 if (!is_near(L, OffsetSize::kOffset16)) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003585
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003586 Register scratch = t8;
3587 BlockTrampolinePoolScope block_trampoline_pool(this);
3588
3589 switch (cond) {
3590 case cc_always:
3591 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3592 bal(offset);
3593 break;
3594 case eq:
3595 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3596 nop();
3597 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3598 bal(offset);
3599 break;
3600 case ne:
3601 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3602 nop();
3603 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3604 bal(offset);
3605 break;
3606
3607 // Signed comparison.
3608 case greater:
3609 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3610 addiu(scratch, scratch, -1);
3611 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3612 bgezal(scratch, offset);
3613 break;
3614 case greater_equal:
3615 Slt(scratch, rs, rt);
3616 addiu(scratch, scratch, -1);
3617 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3618 bltzal(scratch, offset);
3619 break;
3620 case less:
3621 Slt(scratch, rs, rt);
3622 addiu(scratch, scratch, -1);
3623 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3624 bgezal(scratch, offset);
3625 break;
3626 case less_equal:
3627 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3628 addiu(scratch, scratch, -1);
3629 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3630 bltzal(scratch, offset);
3631 break;
3632
3633 // Unsigned comparison.
3634 case Ugreater:
3635 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3636 addiu(scratch, scratch, -1);
3637 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3638 bgezal(scratch, offset);
3639 break;
3640 case Ugreater_equal:
3641 Sltu(scratch, rs, rt);
3642 addiu(scratch, scratch, -1);
3643 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3644 bltzal(scratch, offset);
3645 break;
3646 case Uless:
3647 Sltu(scratch, rs, rt);
3648 addiu(scratch, scratch, -1);
3649 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3650 bgezal(scratch, offset);
3651 break;
3652 case Uless_equal:
3653 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3654 addiu(scratch, scratch, -1);
3655 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3656 bltzal(scratch, offset);
3657 break;
3658
3659 default:
3660 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003661 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003662
3663 // Emit a nop in the branch delay slot if required.
3664 if (bdslot == PROTECT)
3665 nop();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003666
3667 return true;
3668}
3669
3670
3671bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3672 Condition cond, Register rs,
3673 const Operand& rt,
3674 BranchDelaySlot bdslot) {
3675 BRANCH_ARGS_CHECK(cond, rs, rt);
3676
3677 if (!L) {
3678 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3679 DCHECK(is_int26(offset));
3680 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3681 } else {
3682 DCHECK(is_int16(offset));
3683 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3684 }
3685 } else {
3686 DCHECK(offset == 0);
3687 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3688 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3689 } else {
3690 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3691 }
3692 }
3693 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003694}
3695
3696
3697void MacroAssembler::Jump(Register target,
3698 Condition cond,
3699 Register rs,
3700 const Operand& rt,
3701 BranchDelaySlot bd) {
3702 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochda12d292016-06-02 14:46:10 +01003703 if (kArchVariant == kMips64r6 && bd == PROTECT) {
3704 if (cond == cc_always) {
3705 jic(target, 0);
3706 } else {
3707 BRANCH_ARGS_CHECK(cond, rs, rt);
3708 Branch(2, NegateCondition(cond), rs, rt);
3709 jic(target, 0);
3710 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003711 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003712 if (cond == cc_always) {
3713 jr(target);
3714 } else {
3715 BRANCH_ARGS_CHECK(cond, rs, rt);
3716 Branch(2, NegateCondition(cond), rs, rt);
3717 jr(target);
3718 }
3719 // Emit a nop in the branch delay slot if required.
3720 if (bd == PROTECT) nop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003721 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003722}
3723
3724
3725void MacroAssembler::Jump(intptr_t target,
3726 RelocInfo::Mode rmode,
3727 Condition cond,
3728 Register rs,
3729 const Operand& rt,
3730 BranchDelaySlot bd) {
3731 Label skip;
3732 if (cond != cc_always) {
3733 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3734 }
3735 // The first instruction of 'li' may be placed in the delay slot.
3736 // This is not an issue, t9 is expected to be clobbered anyway.
3737 li(t9, Operand(target, rmode));
3738 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3739 bind(&skip);
3740}
3741
3742
3743void MacroAssembler::Jump(Address target,
3744 RelocInfo::Mode rmode,
3745 Condition cond,
3746 Register rs,
3747 const Operand& rt,
3748 BranchDelaySlot bd) {
3749 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3750 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3751}
3752
3753
3754void MacroAssembler::Jump(Handle<Code> code,
3755 RelocInfo::Mode rmode,
3756 Condition cond,
3757 Register rs,
3758 const Operand& rt,
3759 BranchDelaySlot bd) {
3760 DCHECK(RelocInfo::IsCodeTarget(rmode));
3761 AllowDeferredHandleDereference embedding_raw_address;
3762 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3763}
3764
3765
3766int MacroAssembler::CallSize(Register target,
3767 Condition cond,
3768 Register rs,
3769 const Operand& rt,
3770 BranchDelaySlot bd) {
3771 int size = 0;
3772
3773 if (cond == cc_always) {
3774 size += 1;
3775 } else {
3776 size += 3;
3777 }
3778
Ben Murdochda12d292016-06-02 14:46:10 +01003779 if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003780
3781 return size * kInstrSize;
3782}
3783
3784
3785// Note: To call gcc-compiled C code on mips, you must call thru t9.
3786void MacroAssembler::Call(Register target,
3787 Condition cond,
3788 Register rs,
3789 const Operand& rt,
3790 BranchDelaySlot bd) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003791#ifdef DEBUG
3792 int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3793#endif
3794
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003795 BlockTrampolinePoolScope block_trampoline_pool(this);
3796 Label start;
3797 bind(&start);
Ben Murdochda12d292016-06-02 14:46:10 +01003798 if (kArchVariant == kMips64r6 && bd == PROTECT) {
3799 if (cond == cc_always) {
3800 jialc(target, 0);
3801 } else {
3802 BRANCH_ARGS_CHECK(cond, rs, rt);
3803 Branch(2, NegateCondition(cond), rs, rt);
3804 jialc(target, 0);
3805 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003806 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003807 if (cond == cc_always) {
3808 jalr(target);
3809 } else {
3810 BRANCH_ARGS_CHECK(cond, rs, rt);
3811 Branch(2, NegateCondition(cond), rs, rt);
3812 jalr(target);
3813 }
3814 // Emit a nop in the branch delay slot if required.
3815 if (bd == PROTECT) nop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003816 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003817
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003818#ifdef DEBUG
3819 CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3820 SizeOfCodeGeneratedSince(&start));
3821#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003822}
3823
3824
3825int MacroAssembler::CallSize(Address target,
3826 RelocInfo::Mode rmode,
3827 Condition cond,
3828 Register rs,
3829 const Operand& rt,
3830 BranchDelaySlot bd) {
3831 int size = CallSize(t9, cond, rs, rt, bd);
3832 return size + 4 * kInstrSize;
3833}
3834
3835
3836void MacroAssembler::Call(Address target,
3837 RelocInfo::Mode rmode,
3838 Condition cond,
3839 Register rs,
3840 const Operand& rt,
3841 BranchDelaySlot bd) {
3842 BlockTrampolinePoolScope block_trampoline_pool(this);
3843 Label start;
3844 bind(&start);
3845 int64_t target_int = reinterpret_cast<int64_t>(target);
3846 // Must record previous source positions before the
3847 // li() generates a new code target.
3848 positions_recorder()->WriteRecordedPositions();
3849 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
3850 Call(t9, cond, rs, rt, bd);
3851 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3852 SizeOfCodeGeneratedSince(&start));
3853}
3854
3855
3856int MacroAssembler::CallSize(Handle<Code> code,
3857 RelocInfo::Mode rmode,
3858 TypeFeedbackId ast_id,
3859 Condition cond,
3860 Register rs,
3861 const Operand& rt,
3862 BranchDelaySlot bd) {
3863 AllowDeferredHandleDereference using_raw_address;
3864 return CallSize(reinterpret_cast<Address>(code.location()),
3865 rmode, cond, rs, rt, bd);
3866}
3867
3868
3869void MacroAssembler::Call(Handle<Code> code,
3870 RelocInfo::Mode rmode,
3871 TypeFeedbackId ast_id,
3872 Condition cond,
3873 Register rs,
3874 const Operand& rt,
3875 BranchDelaySlot bd) {
3876 BlockTrampolinePoolScope block_trampoline_pool(this);
3877 Label start;
3878 bind(&start);
3879 DCHECK(RelocInfo::IsCodeTarget(rmode));
3880 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3881 SetRecordedAstId(ast_id);
3882 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3883 }
3884 AllowDeferredHandleDereference embedding_raw_address;
3885 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3886 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3887 SizeOfCodeGeneratedSince(&start));
3888}
3889
3890
3891void MacroAssembler::Ret(Condition cond,
3892 Register rs,
3893 const Operand& rt,
3894 BranchDelaySlot bd) {
3895 Jump(ra, cond, rs, rt, bd);
3896}
3897
3898
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003899void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3900 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3901 (!L->is_bound() || is_near_r6(L))) {
3902 BranchShortHelperR6(0, L);
3903 } else {
3904 EmitForbiddenSlotInstruction();
3905 BlockTrampolinePoolScope block_trampoline_pool(this);
3906 {
3907 BlockGrowBufferScope block_buf_growth(this);
3908 // Buffer growth (and relocation) must be blocked for internal references
3909 // until associated instructions are emitted and available to be patched.
3910 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3911 j(L);
3912 }
3913 // Emit a nop in the branch delay slot if required.
3914 if (bdslot == PROTECT) nop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003915 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003916}
3917
3918
3919void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
3920 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3921 (!L->is_bound() || is_near_r6(L))) {
3922 BranchAndLinkShortHelperR6(0, L);
3923 } else {
3924 EmitForbiddenSlotInstruction();
3925 BlockTrampolinePoolScope block_trampoline_pool(this);
3926 {
3927 BlockGrowBufferScope block_buf_growth(this);
3928 // Buffer growth (and relocation) must be blocked for internal references
3929 // until associated instructions are emitted and available to be patched.
3930 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3931 jal(L);
3932 }
3933 // Emit a nop in the branch delay slot if required.
3934 if (bdslot == PROTECT) nop();
3935 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003936}
3937
3938
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003939void MacroAssembler::DropAndRet(int drop) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003940 DCHECK(is_int16(drop * kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003941 Ret(USE_DELAY_SLOT);
3942 daddiu(sp, sp, drop * kPointerSize);
3943}
3944
3945void MacroAssembler::DropAndRet(int drop,
3946 Condition cond,
3947 Register r1,
3948 const Operand& r2) {
3949 // Both Drop and Ret need to be conditional.
3950 Label skip;
3951 if (cond != cc_always) {
3952 Branch(&skip, NegateCondition(cond), r1, r2);
3953 }
3954
3955 Drop(drop);
3956 Ret();
3957
3958 if (cond != cc_always) {
3959 bind(&skip);
3960 }
3961}
3962
3963
3964void MacroAssembler::Drop(int count,
3965 Condition cond,
3966 Register reg,
3967 const Operand& op) {
3968 if (count <= 0) {
3969 return;
3970 }
3971
3972 Label skip;
3973
3974 if (cond != al) {
3975 Branch(&skip, NegateCondition(cond), reg, op);
3976 }
3977
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003978 Daddu(sp, sp, Operand(count * kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003979
3980 if (cond != al) {
3981 bind(&skip);
3982 }
3983}
3984
3985
3986
3987void MacroAssembler::Swap(Register reg1,
3988 Register reg2,
3989 Register scratch) {
3990 if (scratch.is(no_reg)) {
3991 Xor(reg1, reg1, Operand(reg2));
3992 Xor(reg2, reg2, Operand(reg1));
3993 Xor(reg1, reg1, Operand(reg2));
3994 } else {
3995 mov(scratch, reg1);
3996 mov(reg1, reg2);
3997 mov(reg2, scratch);
3998 }
3999}
4000
4001
4002void MacroAssembler::Call(Label* target) {
4003 BranchAndLink(target);
4004}
4005
4006
4007void MacroAssembler::Push(Handle<Object> handle) {
4008 li(at, Operand(handle));
4009 push(at);
4010}
4011
4012
4013void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
4014 DCHECK(!src.is(scratch));
4015 mov(scratch, src);
4016 dsrl32(src, src, 0);
4017 dsll32(src, src, 0);
4018 push(src);
4019 dsll32(scratch, scratch, 0);
4020 push(scratch);
4021}
4022
4023
4024void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
4025 DCHECK(!dst.is(scratch));
4026 pop(scratch);
4027 dsrl32(scratch, scratch, 0);
4028 pop(dst);
4029 dsrl32(dst, dst, 0);
4030 dsll32(dst, dst, 0);
4031 or_(dst, dst, scratch);
4032}
4033
4034
4035void MacroAssembler::DebugBreak() {
4036 PrepareCEntryArgs(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004037 PrepareCEntryFunction(
4038 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004039 CEntryStub ces(isolate(), 1);
4040 DCHECK(AllowThisStubCall(&ces));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004041 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004042}
4043
4044
4045// ---------------------------------------------------------------------------
4046// Exception handling.
4047
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004048void MacroAssembler::PushStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004049 // Adjust this code if not the case.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004050 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004051 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004052
4053 // Link the current handler as the next handler.
4054 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4055 ld(a5, MemOperand(a6));
4056 push(a5);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004057
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004058 // Set this new handler as the current one.
4059 sd(sp, MemOperand(a6));
4060}
4061
4062
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004063void MacroAssembler::PopStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004064 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4065 pop(a1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004066 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
4067 kPointerSize)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004068 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4069 sd(a1, MemOperand(at));
4070}
4071
4072
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004073void MacroAssembler::Allocate(int object_size,
4074 Register result,
4075 Register scratch1,
4076 Register scratch2,
4077 Label* gc_required,
4078 AllocationFlags flags) {
4079 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4080 if (!FLAG_inline_new) {
4081 if (emit_debug_code()) {
4082 // Trash the registers to simulate an allocation failure.
4083 li(result, 0x7091);
4084 li(scratch1, 0x7191);
4085 li(scratch2, 0x7291);
4086 }
4087 jmp(gc_required);
4088 return;
4089 }
4090
Ben Murdoch097c5b22016-05-18 11:27:45 +01004091 DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004092
4093 // Make object size into bytes.
4094 if ((flags & SIZE_IN_WORDS) != 0) {
4095 object_size *= kPointerSize;
4096 }
4097 DCHECK(0 == (object_size & kObjectAlignmentMask));
4098
4099 // Check relative positions of allocation top and limit addresses.
4100 // ARM adds additional checks to make sure the ldm instruction can be
4101 // used. On MIPS we don't have ldm so we don't need additional checks either.
4102 ExternalReference allocation_top =
4103 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4104 ExternalReference allocation_limit =
4105 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4106
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004107 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4108 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004109 DCHECK((limit - top) == kPointerSize);
4110
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004111 // Set up allocation top address and allocation limit registers.
4112 Register top_address = scratch1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004113 // This code stores a temporary value in t9.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004114 Register alloc_limit = t9;
4115 Register result_end = scratch2;
4116 li(top_address, Operand(allocation_top));
4117
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004118 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004119 // Load allocation top into result and allocation limit into alloc_limit.
4120 ld(result, MemOperand(top_address));
4121 ld(alloc_limit, MemOperand(top_address, kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004122 } else {
4123 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004124 // Assert that result actually contains top on entry.
4125 ld(alloc_limit, MemOperand(top_address));
4126 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004127 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004128 // Load allocation limit. Result already contains allocation top.
4129 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004130 }
4131
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004132 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4133 // the same alignment on ARM64.
4134 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4135
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004136 if (emit_debug_code()) {
4137 And(at, result, Operand(kDoubleAlignmentMask));
4138 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4139 }
4140
4141 // Calculate new top and bail out if new space is exhausted. Use result
4142 // to calculate the new top.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004143 Daddu(result_end, result, Operand(object_size));
4144 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4145 sd(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004146
4147 // Tag object if requested.
4148 if ((flags & TAG_OBJECT) != 0) {
4149 Daddu(result, result, Operand(kHeapObjectTag));
4150 }
4151}
4152
4153
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004154void MacroAssembler::Allocate(Register object_size, Register result,
4155 Register result_end, Register scratch,
4156 Label* gc_required, AllocationFlags flags) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004157 if (!FLAG_inline_new) {
4158 if (emit_debug_code()) {
4159 // Trash the registers to simulate an allocation failure.
4160 li(result, 0x7091);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004161 li(scratch, 0x7191);
4162 li(result_end, 0x7291);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004163 }
4164 jmp(gc_required);
4165 return;
4166 }
4167
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004168 // |object_size| and |result_end| may overlap, other registers must not.
Ben Murdoch097c5b22016-05-18 11:27:45 +01004169 DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4170 DCHECK(!AreAliased(result_end, result, scratch, t9, at));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004171
4172 // Check relative positions of allocation top and limit addresses.
4173 // ARM adds additional checks to make sure the ldm instruction can be
4174 // used. On MIPS we don't have ldm so we don't need additional checks either.
4175 ExternalReference allocation_top =
4176 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4177 ExternalReference allocation_limit =
4178 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004179 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4180 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004181 DCHECK((limit - top) == kPointerSize);
4182
4183 // Set up allocation top address and object size registers.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004184 Register top_address = scratch;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004185 // This code stores a temporary value in t9.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004186 Register alloc_limit = t9;
4187 li(top_address, Operand(allocation_top));
4188
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004189 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004190 // Load allocation top into result and allocation limit into alloc_limit.
4191 ld(result, MemOperand(top_address));
4192 ld(alloc_limit, MemOperand(top_address, kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004193 } else {
4194 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004195 // Assert that result actually contains top on entry.
4196 ld(alloc_limit, MemOperand(top_address));
4197 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004198 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004199 // Load allocation limit. Result already contains allocation top.
4200 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004201 }
4202
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004203 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4204 // the same alignment on ARM64.
4205 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4206
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004207 if (emit_debug_code()) {
4208 And(at, result, Operand(kDoubleAlignmentMask));
4209 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4210 }
4211
4212 // Calculate new top and bail out if new space is exhausted. Use result
4213 // to calculate the new top. Object size may be in words so a shift is
4214 // required to get the number of bytes.
4215 if ((flags & SIZE_IN_WORDS) != 0) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004216 Dlsa(result_end, result, object_size, kPointerSizeLog2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004217 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004218 Daddu(result_end, result, Operand(object_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004219 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004220 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004221
4222 // Update allocation top. result temporarily holds the new top.
4223 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004224 And(at, result_end, Operand(kObjectAlignmentMask));
4225 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004226 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004227 sd(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004228
4229 // Tag object if requested.
4230 if ((flags & TAG_OBJECT) != 0) {
4231 Daddu(result, result, Operand(kHeapObjectTag));
4232 }
4233}
4234
4235
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004236void MacroAssembler::AllocateTwoByteString(Register result,
4237 Register length,
4238 Register scratch1,
4239 Register scratch2,
4240 Register scratch3,
4241 Label* gc_required) {
4242 // Calculate the number of bytes needed for the characters in the string while
4243 // observing object alignment.
4244 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4245 dsll(scratch1, length, 1); // Length in bytes, not chars.
4246 daddiu(scratch1, scratch1,
4247 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
4248 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4249
4250 // Allocate two-byte string in new space.
4251 Allocate(scratch1,
4252 result,
4253 scratch2,
4254 scratch3,
4255 gc_required,
4256 TAG_OBJECT);
4257
4258 // Set the map, length and hash field.
4259 InitializeNewString(result,
4260 length,
4261 Heap::kStringMapRootIndex,
4262 scratch1,
4263 scratch2);
4264}
4265
4266
4267void MacroAssembler::AllocateOneByteString(Register result, Register length,
4268 Register scratch1, Register scratch2,
4269 Register scratch3,
4270 Label* gc_required) {
4271 // Calculate the number of bytes needed for the characters in the string
4272 // while observing object alignment.
4273 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4274 DCHECK(kCharSize == 1);
4275 daddiu(scratch1, length,
4276 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
4277 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4278
4279 // Allocate one-byte string in new space.
4280 Allocate(scratch1,
4281 result,
4282 scratch2,
4283 scratch3,
4284 gc_required,
4285 TAG_OBJECT);
4286
4287 // Set the map, length and hash field.
4288 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
4289 scratch1, scratch2);
4290}
4291
4292
4293void MacroAssembler::AllocateTwoByteConsString(Register result,
4294 Register length,
4295 Register scratch1,
4296 Register scratch2,
4297 Label* gc_required) {
4298 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4299 TAG_OBJECT);
4300 InitializeNewString(result,
4301 length,
4302 Heap::kConsStringMapRootIndex,
4303 scratch1,
4304 scratch2);
4305}
4306
4307
4308void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
4309 Register scratch1,
4310 Register scratch2,
4311 Label* gc_required) {
4312 Allocate(ConsString::kSize,
4313 result,
4314 scratch1,
4315 scratch2,
4316 gc_required,
4317 TAG_OBJECT);
4318
4319 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
4320 scratch1, scratch2);
4321}
4322
4323
4324void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4325 Register length,
4326 Register scratch1,
4327 Register scratch2,
4328 Label* gc_required) {
4329 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4330 TAG_OBJECT);
4331
4332 InitializeNewString(result,
4333 length,
4334 Heap::kSlicedStringMapRootIndex,
4335 scratch1,
4336 scratch2);
4337}
4338
4339
4340void MacroAssembler::AllocateOneByteSlicedString(Register result,
4341 Register length,
4342 Register scratch1,
4343 Register scratch2,
4344 Label* gc_required) {
4345 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4346 TAG_OBJECT);
4347
4348 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
4349 scratch1, scratch2);
4350}
4351
4352
4353void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
4354 Label* not_unique_name) {
4355 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4356 Label succeed;
4357 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4358 Branch(&succeed, eq, at, Operand(zero_reg));
4359 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
4360
4361 bind(&succeed);
4362}
4363
4364
4365// Allocates a heap number or jumps to the label if the young space is full and
4366// a scavenge is needed.
4367void MacroAssembler::AllocateHeapNumber(Register result,
4368 Register scratch1,
4369 Register scratch2,
4370 Register heap_number_map,
4371 Label* need_gc,
4372 TaggingMode tagging_mode,
4373 MutableMode mode) {
4374 // Allocate an object in the heap for the heap number and tag it as a heap
4375 // object.
4376 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
4377 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
4378
4379 Heap::RootListIndex map_index = mode == MUTABLE
4380 ? Heap::kMutableHeapNumberMapRootIndex
4381 : Heap::kHeapNumberMapRootIndex;
4382 AssertIsRoot(heap_number_map, map_index);
4383
4384 // Store heap number map in the allocated object.
4385 if (tagging_mode == TAG_RESULT) {
4386 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
4387 } else {
4388 sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
4389 }
4390}
4391
4392
4393void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4394 FPURegister value,
4395 Register scratch1,
4396 Register scratch2,
4397 Label* gc_required) {
4398 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4399 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4400 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4401}
4402
4403
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004404void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4405 Register value, Register scratch1,
4406 Register scratch2, Label* gc_required) {
4407 DCHECK(!result.is(constructor));
4408 DCHECK(!result.is(scratch1));
4409 DCHECK(!result.is(scratch2));
4410 DCHECK(!result.is(value));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004411
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004412 // Allocate JSValue in new space.
4413 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004414
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004415 // Initialize the JSValue.
4416 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4417 sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4418 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4419 sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4420 sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4421 sd(value, FieldMemOperand(result, JSValue::kValueOffset));
4422 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004423}
4424
4425
4426void MacroAssembler::CopyBytes(Register src,
4427 Register dst,
4428 Register length,
4429 Register scratch) {
4430 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
4431
4432 // Align src before copying in word size chunks.
4433 Branch(&byte_loop, le, length, Operand(kPointerSize));
4434 bind(&align_loop_1);
4435 And(scratch, src, kPointerSize - 1);
4436 Branch(&word_loop, eq, scratch, Operand(zero_reg));
4437 lbu(scratch, MemOperand(src));
4438 Daddu(src, src, 1);
4439 sb(scratch, MemOperand(dst));
4440 Daddu(dst, dst, 1);
4441 Dsubu(length, length, Operand(1));
4442 Branch(&align_loop_1, ne, length, Operand(zero_reg));
4443
4444 // Copy bytes in word size chunks.
4445 bind(&word_loop);
4446 if (emit_debug_code()) {
4447 And(scratch, src, kPointerSize - 1);
4448 Assert(eq, kExpectingAlignmentForCopyBytes,
4449 scratch, Operand(zero_reg));
4450 }
4451 Branch(&byte_loop, lt, length, Operand(kPointerSize));
4452 ld(scratch, MemOperand(src));
4453 Daddu(src, src, kPointerSize);
4454
4455 // TODO(kalmard) check if this can be optimized to use sw in most cases.
4456 // Can't use unaligned access - copy byte by byte.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004457 if (kArchEndian == kLittle) {
4458 sb(scratch, MemOperand(dst, 0));
4459 dsrl(scratch, scratch, 8);
4460 sb(scratch, MemOperand(dst, 1));
4461 dsrl(scratch, scratch, 8);
4462 sb(scratch, MemOperand(dst, 2));
4463 dsrl(scratch, scratch, 8);
4464 sb(scratch, MemOperand(dst, 3));
4465 dsrl(scratch, scratch, 8);
4466 sb(scratch, MemOperand(dst, 4));
4467 dsrl(scratch, scratch, 8);
4468 sb(scratch, MemOperand(dst, 5));
4469 dsrl(scratch, scratch, 8);
4470 sb(scratch, MemOperand(dst, 6));
4471 dsrl(scratch, scratch, 8);
4472 sb(scratch, MemOperand(dst, 7));
4473 } else {
4474 sb(scratch, MemOperand(dst, 7));
4475 dsrl(scratch, scratch, 8);
4476 sb(scratch, MemOperand(dst, 6));
4477 dsrl(scratch, scratch, 8);
4478 sb(scratch, MemOperand(dst, 5));
4479 dsrl(scratch, scratch, 8);
4480 sb(scratch, MemOperand(dst, 4));
4481 dsrl(scratch, scratch, 8);
4482 sb(scratch, MemOperand(dst, 3));
4483 dsrl(scratch, scratch, 8);
4484 sb(scratch, MemOperand(dst, 2));
4485 dsrl(scratch, scratch, 8);
4486 sb(scratch, MemOperand(dst, 1));
4487 dsrl(scratch, scratch, 8);
4488 sb(scratch, MemOperand(dst, 0));
4489 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004490 Daddu(dst, dst, 8);
4491
4492 Dsubu(length, length, Operand(kPointerSize));
4493 Branch(&word_loop);
4494
4495 // Copy the last bytes if any left.
4496 bind(&byte_loop);
4497 Branch(&done, eq, length, Operand(zero_reg));
4498 bind(&byte_loop_1);
4499 lbu(scratch, MemOperand(src));
4500 Daddu(src, src, 1);
4501 sb(scratch, MemOperand(dst));
4502 Daddu(dst, dst, 1);
4503 Dsubu(length, length, Operand(1));
4504 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
4505 bind(&done);
4506}
4507
4508
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004509void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4510 Register end_address,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004511 Register filler) {
4512 Label loop, entry;
4513 Branch(&entry);
4514 bind(&loop);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004515 sd(filler, MemOperand(current_address));
4516 Daddu(current_address, current_address, kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004517 bind(&entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004518 Branch(&loop, ult, current_address, Operand(end_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004519}
4520
4521
4522void MacroAssembler::CheckFastElements(Register map,
4523 Register scratch,
4524 Label* fail) {
4525 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4526 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4527 STATIC_ASSERT(FAST_ELEMENTS == 2);
4528 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4529 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4530 Branch(fail, hi, scratch,
4531 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4532}
4533
4534
4535void MacroAssembler::CheckFastObjectElements(Register map,
4536 Register scratch,
4537 Label* fail) {
4538 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4539 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4540 STATIC_ASSERT(FAST_ELEMENTS == 2);
4541 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4542 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4543 Branch(fail, ls, scratch,
4544 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4545 Branch(fail, hi, scratch,
4546 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4547}
4548
4549
4550void MacroAssembler::CheckFastSmiElements(Register map,
4551 Register scratch,
4552 Label* fail) {
4553 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4554 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4555 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4556 Branch(fail, hi, scratch,
4557 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4558}
4559
4560
4561void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
4562 Register key_reg,
4563 Register elements_reg,
4564 Register scratch1,
4565 Register scratch2,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004566 Label* fail,
4567 int elements_offset) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004568 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
4569 Label smi_value, done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004570
4571 // Handle smi values specially.
4572 JumpIfSmi(value_reg, &smi_value);
4573
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004574 // Ensure that the object is a heap number.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004575 CheckMap(value_reg,
4576 scratch1,
4577 Heap::kHeapNumberMapRootIndex,
4578 fail,
4579 DONT_DO_SMI_CHECK);
4580
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004581 // Double value, turn potential sNaN into qNan.
4582 DoubleRegister double_result = f0;
4583 DoubleRegister double_scratch = f2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004584
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004585 ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
4586 Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
4587 FPUCanonicalizeNaN(double_result, double_result);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004588
4589 bind(&smi_value);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004590 // Untag and transfer.
4591 dsrl32(scratch1, value_reg, 0);
4592 mtc1(scratch1, double_scratch);
4593 cvt_d_w(double_result, double_scratch);
4594
4595 bind(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004596 Daddu(scratch1, elements_reg,
4597 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
4598 elements_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004599 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
4600 Daddu(scratch1, scratch1, scratch2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004601 // scratch1 is now effective address of the double element.
4602 sdc1(double_result, MemOperand(scratch1, 0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004603}
4604
4605
4606void MacroAssembler::CompareMapAndBranch(Register obj,
4607 Register scratch,
4608 Handle<Map> map,
4609 Label* early_success,
4610 Condition cond,
4611 Label* branch_to) {
4612 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4613 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4614}
4615
4616
4617void MacroAssembler::CompareMapAndBranch(Register obj_map,
4618 Handle<Map> map,
4619 Label* early_success,
4620 Condition cond,
4621 Label* branch_to) {
4622 Branch(branch_to, cond, obj_map, Operand(map));
4623}
4624
4625
4626void MacroAssembler::CheckMap(Register obj,
4627 Register scratch,
4628 Handle<Map> map,
4629 Label* fail,
4630 SmiCheckType smi_check_type) {
4631 if (smi_check_type == DO_SMI_CHECK) {
4632 JumpIfSmi(obj, fail);
4633 }
4634 Label success;
4635 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4636 bind(&success);
4637}
4638
4639
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004640void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4641 Register scratch2, Handle<WeakCell> cell,
4642 Handle<Code> success,
4643 SmiCheckType smi_check_type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004644 Label fail;
4645 if (smi_check_type == DO_SMI_CHECK) {
4646 JumpIfSmi(obj, &fail);
4647 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004648 ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4649 GetWeakValue(scratch2, cell);
4650 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004651 bind(&fail);
4652}
4653
4654
4655void MacroAssembler::CheckMap(Register obj,
4656 Register scratch,
4657 Heap::RootListIndex index,
4658 Label* fail,
4659 SmiCheckType smi_check_type) {
4660 if (smi_check_type == DO_SMI_CHECK) {
4661 JumpIfSmi(obj, fail);
4662 }
4663 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4664 LoadRoot(at, index);
4665 Branch(fail, ne, scratch, Operand(at));
4666}
4667
4668
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004669void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4670 li(value, Operand(cell));
4671 ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
4672}
4673
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004674void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4675 const DoubleRegister src) {
4676 sub_d(dst, src, kDoubleRegZero);
4677}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004678
4679void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4680 Label* miss) {
4681 GetWeakValue(value, cell);
4682 JumpIfSmi(value, miss);
4683}
4684
4685
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004686void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
4687 if (IsMipsSoftFloatABI) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004688 if (kArchEndian == kLittle) {
4689 Move(dst, v0, v1);
4690 } else {
4691 Move(dst, v1, v0);
4692 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004693 } else {
4694 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4695 }
4696}
4697
4698
4699void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
4700 if (IsMipsSoftFloatABI) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004701 if (kArchEndian == kLittle) {
4702 Move(dst, a0, a1);
4703 } else {
4704 Move(dst, a1, a0);
4705 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004706 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004707 Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004708 }
4709}
4710
4711
4712void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4713 if (!IsMipsSoftFloatABI) {
4714 Move(f12, src);
4715 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004716 if (kArchEndian == kLittle) {
4717 Move(a0, a1, src);
4718 } else {
4719 Move(a1, a0, src);
4720 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004721 }
4722}
4723
4724
4725void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4726 if (!IsMipsSoftFloatABI) {
4727 Move(f0, src);
4728 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004729 if (kArchEndian == kLittle) {
4730 Move(v0, v1, src);
4731 } else {
4732 Move(v1, v0, src);
4733 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004734 }
4735}
4736
4737
4738void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4739 DoubleRegister src2) {
4740 if (!IsMipsSoftFloatABI) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004741 const DoubleRegister fparg2 = f13;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004742 if (src2.is(f12)) {
4743 DCHECK(!src1.is(fparg2));
4744 Move(fparg2, src2);
4745 Move(f12, src1);
4746 } else {
4747 Move(f12, src1);
4748 Move(fparg2, src2);
4749 }
4750 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004751 if (kArchEndian == kLittle) {
4752 Move(a0, a1, src1);
4753 Move(a2, a3, src2);
4754 } else {
4755 Move(a1, a0, src1);
4756 Move(a3, a2, src2);
4757 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004758 }
4759}
4760
4761
4762// -----------------------------------------------------------------------------
4763// JavaScript invokes.
4764
Ben Murdochda12d292016-06-02 14:46:10 +01004765void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
4766 Register caller_args_count_reg,
4767 Register scratch0, Register scratch1) {
4768#if DEBUG
4769 if (callee_args_count.is_reg()) {
4770 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
4771 scratch1));
4772 } else {
4773 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
4774 }
4775#endif
4776
4777 // Calculate the end of destination area where we will put the arguments
4778 // after we drop current frame. We add kPointerSize to count the receiver
4779 // argument which is not included into formal parameters count.
4780 Register dst_reg = scratch0;
4781 Dlsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
4782 Daddu(dst_reg, dst_reg,
4783 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
4784
4785 Register src_reg = caller_args_count_reg;
4786 // Calculate the end of source area. +kPointerSize is for the receiver.
4787 if (callee_args_count.is_reg()) {
4788 Dlsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
4789 Daddu(src_reg, src_reg, Operand(kPointerSize));
4790 } else {
4791 Daddu(src_reg, sp,
4792 Operand((callee_args_count.immediate() + 1) * kPointerSize));
4793 }
4794
4795 if (FLAG_debug_code) {
4796 Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
4797 }
4798
4799 // Restore caller's frame pointer and return address now as they will be
4800 // overwritten by the copying loop.
4801 ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
4802 ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4803
4804 // Now copy callee arguments to the caller frame going backwards to avoid
4805 // callee arguments corruption (source and destination areas could overlap).
4806
4807 // Both src_reg and dst_reg are pointing to the word after the one to copy,
4808 // so they must be pre-decremented in the loop.
4809 Register tmp_reg = scratch1;
4810 Label loop, entry;
4811 Branch(&entry);
4812 bind(&loop);
4813 Dsubu(src_reg, src_reg, Operand(kPointerSize));
4814 Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
4815 ld(tmp_reg, MemOperand(src_reg));
4816 sd(tmp_reg, MemOperand(dst_reg));
4817 bind(&entry);
4818 Branch(&loop, ne, sp, Operand(src_reg));
4819
4820 // Leave current frame.
4821 mov(sp, dst_reg);
4822}
4823
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004824void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4825 const ParameterCount& actual,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004826 Label* done,
4827 bool* definitely_mismatches,
4828 InvokeFlag flag,
4829 const CallWrapper& call_wrapper) {
4830 bool definitely_matches = false;
4831 *definitely_mismatches = false;
4832 Label regular_invoke;
4833
4834 // Check whether the expected and actual arguments count match. If not,
4835 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4836 // a0: actual arguments count
4837 // a1: function (passed through to callee)
4838 // a2: expected arguments count
4839
4840 // The code below is made a lot easier because the calling code already sets
4841 // up actual and expected registers according to the contract if values are
4842 // passed in registers.
4843 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4844 DCHECK(expected.is_immediate() || expected.reg().is(a2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004845
4846 if (expected.is_immediate()) {
4847 DCHECK(actual.is_immediate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004848 li(a0, Operand(actual.immediate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004849 if (expected.immediate() == actual.immediate()) {
4850 definitely_matches = true;
4851 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004852 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4853 if (expected.immediate() == sentinel) {
4854 // Don't worry about adapting arguments for builtins that
4855 // don't want that done. Skip adaption code by making it look
4856 // like we have a match between expected and actual number of
4857 // arguments.
4858 definitely_matches = true;
4859 } else {
4860 *definitely_mismatches = true;
4861 li(a2, Operand(expected.immediate()));
4862 }
4863 }
4864 } else if (actual.is_immediate()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004865 li(a0, Operand(actual.immediate()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004866 Branch(&regular_invoke, eq, expected.reg(), Operand(a0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004867 } else {
4868 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
4869 }
4870
4871 if (!definitely_matches) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004872 Handle<Code> adaptor =
4873 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4874 if (flag == CALL_FUNCTION) {
4875 call_wrapper.BeforeCall(CallSize(adaptor));
4876 Call(adaptor);
4877 call_wrapper.AfterCall();
4878 if (!*definitely_mismatches) {
4879 Branch(done);
4880 }
4881 } else {
4882 Jump(adaptor, RelocInfo::CODE_TARGET);
4883 }
4884 bind(&regular_invoke);
4885 }
4886}
4887
4888
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004889void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
4890 const ParameterCount& expected,
4891 const ParameterCount& actual) {
4892 Label skip_flooding;
4893 ExternalReference step_in_enabled =
4894 ExternalReference::debug_step_in_enabled_address(isolate());
4895 li(t0, Operand(step_in_enabled));
4896 lb(t0, MemOperand(t0));
4897 Branch(&skip_flooding, eq, t0, Operand(zero_reg));
4898 {
4899 FrameScope frame(this,
4900 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4901 if (expected.is_reg()) {
4902 SmiTag(expected.reg());
4903 Push(expected.reg());
4904 }
4905 if (actual.is_reg()) {
4906 SmiTag(actual.reg());
4907 Push(actual.reg());
4908 }
4909 if (new_target.is_valid()) {
4910 Push(new_target);
4911 }
4912 Push(fun);
4913 Push(fun);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004914 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004915 Pop(fun);
4916 if (new_target.is_valid()) {
4917 Pop(new_target);
4918 }
4919 if (actual.is_reg()) {
4920 Pop(actual.reg());
4921 SmiUntag(actual.reg());
4922 }
4923 if (expected.is_reg()) {
4924 Pop(expected.reg());
4925 SmiUntag(expected.reg());
4926 }
4927 }
4928 bind(&skip_flooding);
4929}
4930
4931
4932void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4933 const ParameterCount& expected,
4934 const ParameterCount& actual,
4935 InvokeFlag flag,
4936 const CallWrapper& call_wrapper) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004937 // You can't call a function without a valid frame.
4938 DCHECK(flag == JUMP_FUNCTION || has_frame());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004939 DCHECK(function.is(a1));
4940 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
4941
4942 if (call_wrapper.NeedsDebugStepCheck()) {
4943 FloodFunctionIfStepping(function, new_target, expected, actual);
4944 }
4945
4946 // Clear the new.target register if not given.
4947 if (!new_target.is_valid()) {
4948 LoadRoot(a3, Heap::kUndefinedValueRootIndex);
4949 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004950
4951 Label done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004952 bool definitely_mismatches = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004953 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004954 call_wrapper);
4955 if (!definitely_mismatches) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004956 // We call indirectly through the code field in the function to
4957 // allow recompilation to take effect without changing any of the
4958 // call sites.
4959 Register code = t0;
4960 ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004961 if (flag == CALL_FUNCTION) {
4962 call_wrapper.BeforeCall(CallSize(code));
4963 Call(code);
4964 call_wrapper.AfterCall();
4965 } else {
4966 DCHECK(flag == JUMP_FUNCTION);
4967 Jump(code);
4968 }
4969 // Continue here if InvokePrologue does handle the invocation due to
4970 // mismatched parameter counts.
4971 bind(&done);
4972 }
4973}
4974
4975
4976void MacroAssembler::InvokeFunction(Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004977 Register new_target,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004978 const ParameterCount& actual,
4979 InvokeFlag flag,
4980 const CallWrapper& call_wrapper) {
4981 // You can't call a function without a valid frame.
4982 DCHECK(flag == JUMP_FUNCTION || has_frame());
4983
4984 // Contract with called JS functions requires that function is passed in a1.
4985 DCHECK(function.is(a1));
4986 Register expected_reg = a2;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004987 Register temp_reg = t0;
4988 ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004989 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4990 // The argument count is stored as int32_t on 64-bit platforms.
4991 // TODO(plind): Smi on 32-bit platforms.
4992 lw(expected_reg,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004993 FieldMemOperand(temp_reg,
4994 SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004995 ParameterCount expected(expected_reg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004996 InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004997}
4998
4999
5000void MacroAssembler::InvokeFunction(Register function,
5001 const ParameterCount& expected,
5002 const ParameterCount& actual,
5003 InvokeFlag flag,
5004 const CallWrapper& call_wrapper) {
5005 // You can't call a function without a valid frame.
5006 DCHECK(flag == JUMP_FUNCTION || has_frame());
5007
5008 // Contract with called JS functions requires that function is passed in a1.
5009 DCHECK(function.is(a1));
5010
5011 // Get the function and setup the context.
5012 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5013
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005014 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005015}
5016
5017
5018void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
5019 const ParameterCount& expected,
5020 const ParameterCount& actual,
5021 InvokeFlag flag,
5022 const CallWrapper& call_wrapper) {
5023 li(a1, function);
5024 InvokeFunction(a1, expected, actual, flag, call_wrapper);
5025}
5026
5027
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005028void MacroAssembler::IsObjectJSStringType(Register object,
5029 Register scratch,
5030 Label* fail) {
5031 DCHECK(kNotStringTag != 0);
5032
5033 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5034 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5035 And(scratch, scratch, Operand(kIsNotStringMask));
5036 Branch(fail, ne, scratch, Operand(zero_reg));
5037}
5038
5039
5040void MacroAssembler::IsObjectNameType(Register object,
5041 Register scratch,
5042 Label* fail) {
5043 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5044 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5045 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
5046}
5047
5048
5049// ---------------------------------------------------------------------------
5050// Support functions.
5051
5052
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005053void MacroAssembler::GetMapConstructor(Register result, Register map,
5054 Register temp, Register temp2) {
5055 Label done, loop;
5056 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
5057 bind(&loop);
5058 JumpIfSmi(result, &done);
5059 GetObjectType(result, temp, temp2);
5060 Branch(&done, ne, temp2, Operand(MAP_TYPE));
5061 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
5062 Branch(&loop);
5063 bind(&done);
5064}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005065
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005066
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005067void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
5068 Register scratch, Label* miss) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005069 // Get the prototype or initial map from the function.
5070 ld(result,
5071 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5072
5073 // If the prototype or initial map is the hole, don't return it and
5074 // simply miss the cache instead. This will allow us to allocate a
5075 // prototype object on-demand in the runtime system.
5076 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
5077 Branch(miss, eq, result, Operand(t8));
5078
5079 // If the function does not have an initial map, we're done.
5080 Label done;
5081 GetObjectType(result, scratch, scratch);
5082 Branch(&done, ne, scratch, Operand(MAP_TYPE));
5083
5084 // Get the prototype from the initial map.
5085 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
5086
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005087 // All done.
5088 bind(&done);
5089}
5090
5091
5092void MacroAssembler::GetObjectType(Register object,
5093 Register map,
5094 Register type_reg) {
5095 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
5096 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5097}
5098
5099
5100// -----------------------------------------------------------------------------
5101// Runtime calls.
5102
5103void MacroAssembler::CallStub(CodeStub* stub,
5104 TypeFeedbackId ast_id,
5105 Condition cond,
5106 Register r1,
5107 const Operand& r2,
5108 BranchDelaySlot bd) {
5109 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
5110 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
5111 cond, r1, r2, bd);
5112}
5113
5114
5115void MacroAssembler::TailCallStub(CodeStub* stub,
5116 Condition cond,
5117 Register r1,
5118 const Operand& r2,
5119 BranchDelaySlot bd) {
5120 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
5121}
5122
5123
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005124bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
5125 return has_frame_ || !stub->SometimesSetsUpAFrame();
5126}
5127
5128
5129void MacroAssembler::IndexFromHash(Register hash, Register index) {
5130 // If the hash field contains an array index pick it out. The assert checks
5131 // that the constants for the maximum number of digits for an array index
5132 // cached in the hash field and the number of bits reserved for it does not
5133 // conflict.
5134 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
5135 (1 << String::kArrayIndexValueBits));
5136 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
5137}
5138
5139
5140void MacroAssembler::ObjectToDoubleFPURegister(Register object,
5141 FPURegister result,
5142 Register scratch1,
5143 Register scratch2,
5144 Register heap_number_map,
5145 Label* not_number,
5146 ObjectToDoubleFlags flags) {
5147 Label done;
5148 if ((flags & OBJECT_NOT_SMI) == 0) {
5149 Label not_smi;
5150 JumpIfNotSmi(object, &not_smi);
5151 // Remove smi tag and convert to double.
5152 // dsra(scratch1, object, kSmiTagSize);
5153 dsra32(scratch1, object, 0);
5154 mtc1(scratch1, result);
5155 cvt_d_w(result, result);
5156 Branch(&done);
5157 bind(&not_smi);
5158 }
5159 // Check for heap number and load double value from it.
5160 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
5161 Branch(not_number, ne, scratch1, Operand(heap_number_map));
5162
5163 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
5164 // If exponent is all ones the number is either a NaN or +/-Infinity.
5165 Register exponent = scratch1;
5166 Register mask_reg = scratch2;
5167 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
5168 li(mask_reg, HeapNumber::kExponentMask);
5169
5170 And(exponent, exponent, mask_reg);
5171 Branch(not_number, eq, exponent, Operand(mask_reg));
5172 }
5173 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
5174 bind(&done);
5175}
5176
5177
5178void MacroAssembler::SmiToDoubleFPURegister(Register smi,
5179 FPURegister value,
5180 Register scratch1) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005181 dsra32(scratch1, smi, 0);
5182 mtc1(scratch1, value);
5183 cvt_d_w(value, value);
5184}
5185
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005186static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
5187 Label* overflow_label,
5188 Label* no_overflow_label) {
5189 DCHECK(overflow_label || no_overflow_label);
5190 if (!overflow_label) {
5191 DCHECK(no_overflow_label);
5192 masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
5193 } else {
5194 masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
5195 if (no_overflow_label) masm->Branch(no_overflow_label);
5196 }
5197}
5198
Ben Murdochda12d292016-06-02 14:46:10 +01005199void MacroAssembler::AddBranchOvf(Register dst, Register left,
5200 const Operand& right, Label* overflow_label,
5201 Label* no_overflow_label, Register scratch) {
5202 if (right.is_reg()) {
5203 AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5204 scratch);
5205 } else {
5206 if (kArchVariant == kMips64r6) {
5207 Register right_reg = t9;
5208 DCHECK(!left.is(right_reg));
5209 li(right_reg, Operand(right));
5210 AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
5211 } else {
5212 Register overflow_dst = t9;
5213 DCHECK(!dst.is(scratch));
5214 DCHECK(!dst.is(overflow_dst));
5215 DCHECK(!scratch.is(overflow_dst));
5216 DCHECK(!left.is(overflow_dst));
5217 if (dst.is(left)) {
5218 mov(scratch, left); // Preserve left.
5219 // Left is overwritten.
5220 Addu(dst, left, static_cast<int32_t>(right.immediate()));
5221 xor_(scratch, dst, scratch); // Original left.
5222 // Load right since xori takes uint16 as immediate.
5223 Addu(overflow_dst, zero_reg, right);
5224 xor_(overflow_dst, dst, overflow_dst);
5225 and_(overflow_dst, overflow_dst, scratch);
5226 } else {
5227 Addu(dst, left, static_cast<int32_t>(right.immediate()));
5228 xor_(overflow_dst, dst, left);
5229 // Load right since xori takes uint16 as immediate.
5230 Addu(scratch, zero_reg, right);
5231 xor_(scratch, dst, scratch);
5232 and_(overflow_dst, scratch, overflow_dst);
5233 }
5234 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5235 }
5236 }
5237}
5238
5239void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
5240 Label* overflow_label,
5241 Label* no_overflow_label, Register scratch) {
5242 if (kArchVariant == kMips64r6) {
5243 if (!overflow_label) {
5244 DCHECK(no_overflow_label);
5245 DCHECK(!dst.is(scratch));
5246 Register left_reg = left.is(dst) ? scratch : left;
5247 Register right_reg = right.is(dst) ? t9 : right;
5248 DCHECK(!dst.is(left_reg));
5249 DCHECK(!dst.is(right_reg));
5250 Move(left_reg, left);
5251 Move(right_reg, right);
5252 addu(dst, left, right);
5253 bnvc(left_reg, right_reg, no_overflow_label);
5254 } else {
5255 bovc(left, right, overflow_label);
5256 addu(dst, left, right);
5257 if (no_overflow_label) bc(no_overflow_label);
5258 }
5259 } else {
5260 Register overflow_dst = t9;
5261 DCHECK(!dst.is(scratch));
5262 DCHECK(!dst.is(overflow_dst));
5263 DCHECK(!scratch.is(overflow_dst));
5264 DCHECK(!left.is(overflow_dst));
5265 DCHECK(!right.is(overflow_dst));
5266 DCHECK(!left.is(scratch));
5267 DCHECK(!right.is(scratch));
5268
5269 if (left.is(right) && dst.is(left)) {
5270 mov(overflow_dst, right);
5271 right = overflow_dst;
5272 }
5273
5274 if (dst.is(left)) {
5275 mov(scratch, left); // Preserve left.
5276 addu(dst, left, right); // Left is overwritten.
5277 xor_(scratch, dst, scratch); // Original left.
5278 xor_(overflow_dst, dst, right);
5279 and_(overflow_dst, overflow_dst, scratch);
5280 } else if (dst.is(right)) {
5281 mov(scratch, right); // Preserve right.
5282 addu(dst, left, right); // Right is overwritten.
5283 xor_(scratch, dst, scratch); // Original right.
5284 xor_(overflow_dst, dst, left);
5285 and_(overflow_dst, overflow_dst, scratch);
5286 } else {
5287 addu(dst, left, right);
5288 xor_(overflow_dst, dst, left);
5289 xor_(scratch, dst, right);
5290 and_(overflow_dst, scratch, overflow_dst);
5291 }
5292 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5293 }
5294}
5295
5296void MacroAssembler::SubBranchOvf(Register dst, Register left,
5297 const Operand& right, Label* overflow_label,
5298 Label* no_overflow_label, Register scratch) {
5299 DCHECK(overflow_label || no_overflow_label);
5300 if (right.is_reg()) {
5301 SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5302 scratch);
5303 } else {
5304 Register overflow_dst = t9;
5305 DCHECK(!dst.is(scratch));
5306 DCHECK(!dst.is(overflow_dst));
5307 DCHECK(!scratch.is(overflow_dst));
5308 DCHECK(!left.is(overflow_dst));
5309 DCHECK(!left.is(scratch));
5310 if (dst.is(left)) {
5311 mov(scratch, left); // Preserve left.
5312 // Left is overwritten.
5313 Subu(dst, left, static_cast<int32_t>(right.immediate()));
5314 // Load right since xori takes uint16 as immediate.
5315 Addu(overflow_dst, zero_reg, right);
5316 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5317 xor_(scratch, dst, scratch); // scratch is original left.
5318 and_(overflow_dst, scratch, overflow_dst);
5319 } else {
5320 Subu(dst, left, right);
5321 xor_(overflow_dst, dst, left);
5322 // Load right since xori takes uint16 as immediate.
5323 Addu(scratch, zero_reg, right);
5324 xor_(scratch, left, scratch);
5325 and_(overflow_dst, scratch, overflow_dst);
5326 }
5327 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5328 }
5329}
5330
5331void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
5332 Label* overflow_label,
5333 Label* no_overflow_label, Register scratch) {
5334 DCHECK(overflow_label || no_overflow_label);
5335 Register overflow_dst = t9;
5336 DCHECK(!dst.is(scratch));
5337 DCHECK(!dst.is(overflow_dst));
5338 DCHECK(!scratch.is(overflow_dst));
5339 DCHECK(!overflow_dst.is(left));
5340 DCHECK(!overflow_dst.is(right));
5341 DCHECK(!scratch.is(left));
5342 DCHECK(!scratch.is(right));
5343
5344 // This happens with some crankshaft code. Since Subu works fine if
5345 // left == right, let's not make that restriction here.
5346 if (left.is(right)) {
5347 mov(dst, zero_reg);
5348 if (no_overflow_label) {
5349 Branch(no_overflow_label);
5350 }
5351 }
5352
5353 if (dst.is(left)) {
5354 mov(scratch, left); // Preserve left.
5355 subu(dst, left, right); // Left is overwritten.
5356 xor_(overflow_dst, dst, scratch); // scratch is original left.
5357 xor_(scratch, scratch, right); // scratch is original left.
5358 and_(overflow_dst, scratch, overflow_dst);
5359 } else if (dst.is(right)) {
5360 mov(scratch, right); // Preserve right.
5361 subu(dst, left, right); // Right is overwritten.
5362 xor_(overflow_dst, dst, left);
5363 xor_(scratch, left, scratch); // Original right.
5364 and_(overflow_dst, scratch, overflow_dst);
5365 } else {
5366 subu(dst, left, right);
5367 xor_(overflow_dst, dst, left);
5368 xor_(scratch, left, right);
5369 and_(overflow_dst, scratch, overflow_dst);
5370 }
5371 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5372}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005373
5374void MacroAssembler::DaddBranchOvf(Register dst, Register left,
5375 const Operand& right, Label* overflow_label,
5376 Label* no_overflow_label, Register scratch) {
5377 if (right.is_reg()) {
5378 DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5379 scratch);
5380 } else {
5381 Register overflow_dst = t9;
5382 DCHECK(!dst.is(scratch));
5383 DCHECK(!dst.is(overflow_dst));
5384 DCHECK(!scratch.is(overflow_dst));
5385 DCHECK(!left.is(overflow_dst));
5386 li(overflow_dst, right); // Load right.
5387 if (dst.is(left)) {
5388 mov(scratch, left); // Preserve left.
5389 Daddu(dst, left, overflow_dst); // Left is overwritten.
5390 xor_(scratch, dst, scratch); // Original left.
5391 xor_(overflow_dst, dst, overflow_dst);
5392 and_(overflow_dst, overflow_dst, scratch);
5393 } else {
5394 Daddu(dst, left, overflow_dst);
5395 xor_(scratch, dst, overflow_dst);
5396 xor_(overflow_dst, dst, left);
5397 and_(overflow_dst, scratch, overflow_dst);
5398 }
5399 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5400 }
5401}
5402
5403
5404void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
5405 Label* overflow_label,
5406 Label* no_overflow_label, Register scratch) {
5407 Register overflow_dst = t9;
5408 DCHECK(!dst.is(scratch));
5409 DCHECK(!dst.is(overflow_dst));
5410 DCHECK(!scratch.is(overflow_dst));
5411 DCHECK(!left.is(overflow_dst));
5412 DCHECK(!right.is(overflow_dst));
5413 DCHECK(!left.is(scratch));
5414 DCHECK(!right.is(scratch));
5415
5416 if (left.is(right) && dst.is(left)) {
5417 mov(overflow_dst, right);
5418 right = overflow_dst;
5419 }
5420
5421 if (dst.is(left)) {
5422 mov(scratch, left); // Preserve left.
5423 daddu(dst, left, right); // Left is overwritten.
5424 xor_(scratch, dst, scratch); // Original left.
5425 xor_(overflow_dst, dst, right);
5426 and_(overflow_dst, overflow_dst, scratch);
5427 } else if (dst.is(right)) {
5428 mov(scratch, right); // Preserve right.
5429 daddu(dst, left, right); // Right is overwritten.
5430 xor_(scratch, dst, scratch); // Original right.
5431 xor_(overflow_dst, dst, left);
5432 and_(overflow_dst, overflow_dst, scratch);
5433 } else {
5434 daddu(dst, left, right);
5435 xor_(overflow_dst, dst, left);
5436 xor_(scratch, dst, right);
5437 and_(overflow_dst, scratch, overflow_dst);
5438 }
5439 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5440}
5441
5442
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005443void MacroAssembler::DsubBranchOvf(Register dst, Register left,
5444 const Operand& right, Label* overflow_label,
5445 Label* no_overflow_label, Register scratch) {
5446 DCHECK(overflow_label || no_overflow_label);
5447 if (right.is_reg()) {
5448 DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5449 scratch);
5450 } else {
5451 Register overflow_dst = t9;
5452 DCHECK(!dst.is(scratch));
5453 DCHECK(!dst.is(overflow_dst));
5454 DCHECK(!scratch.is(overflow_dst));
5455 DCHECK(!left.is(overflow_dst));
5456 DCHECK(!left.is(scratch));
5457 li(overflow_dst, right); // Load right.
5458 if (dst.is(left)) {
5459 mov(scratch, left); // Preserve left.
5460 Dsubu(dst, left, overflow_dst); // Left is overwritten.
5461 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5462 xor_(scratch, dst, scratch); // scratch is original left.
5463 and_(overflow_dst, scratch, overflow_dst);
5464 } else {
5465 Dsubu(dst, left, overflow_dst);
5466 xor_(scratch, left, overflow_dst);
5467 xor_(overflow_dst, dst, left);
5468 and_(overflow_dst, scratch, overflow_dst);
5469 }
5470 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5471 }
5472}
5473
5474
5475void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
5476 Label* overflow_label,
5477 Label* no_overflow_label, Register scratch) {
5478 DCHECK(overflow_label || no_overflow_label);
5479 Register overflow_dst = t9;
5480 DCHECK(!dst.is(scratch));
5481 DCHECK(!dst.is(overflow_dst));
5482 DCHECK(!scratch.is(overflow_dst));
5483 DCHECK(!overflow_dst.is(left));
5484 DCHECK(!overflow_dst.is(right));
5485 DCHECK(!scratch.is(left));
5486 DCHECK(!scratch.is(right));
5487
5488 // This happens with some crankshaft code. Since Subu works fine if
5489 // left == right, let's not make that restriction here.
5490 if (left.is(right)) {
5491 mov(dst, zero_reg);
5492 if (no_overflow_label) {
5493 Branch(no_overflow_label);
5494 }
5495 }
5496
5497 if (dst.is(left)) {
5498 mov(scratch, left); // Preserve left.
5499 dsubu(dst, left, right); // Left is overwritten.
5500 xor_(overflow_dst, dst, scratch); // scratch is original left.
5501 xor_(scratch, scratch, right); // scratch is original left.
5502 and_(overflow_dst, scratch, overflow_dst);
5503 } else if (dst.is(right)) {
5504 mov(scratch, right); // Preserve right.
5505 dsubu(dst, left, right); // Right is overwritten.
5506 xor_(overflow_dst, dst, left);
5507 xor_(scratch, left, scratch); // Original right.
5508 and_(overflow_dst, scratch, overflow_dst);
5509 } else {
5510 dsubu(dst, left, right);
5511 xor_(overflow_dst, dst, left);
5512 xor_(scratch, left, right);
5513 and_(overflow_dst, scratch, overflow_dst);
5514 }
5515 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5516}
5517
5518
5519void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5520 SaveFPRegsMode save_doubles,
5521 BranchDelaySlot bd) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005522 // All parameters are on the stack. v0 has the return value after call.
5523
5524 // If the expected number of arguments of the runtime function is
5525 // constant, we check that the actual number of arguments match the
5526 // expectation.
5527 CHECK(f->nargs < 0 || f->nargs == num_arguments);
5528
5529 // TODO(1236192): Most runtime routines don't need the number of
5530 // arguments passed in because it is constant. At some point we
5531 // should remove this need and make the runtime routine entry code
5532 // smarter.
5533 PrepareCEntryArgs(num_arguments);
5534 PrepareCEntryFunction(ExternalReference(f, isolate()));
5535 CEntryStub stub(isolate(), 1, save_doubles);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005536 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005537}
5538
5539
5540void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5541 int num_arguments,
5542 BranchDelaySlot bd) {
5543 PrepareCEntryArgs(num_arguments);
5544 PrepareCEntryFunction(ext);
5545
5546 CEntryStub stub(isolate(), 1);
5547 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5548}
5549
5550
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005551void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5552 const Runtime::Function* function = Runtime::FunctionForId(fid);
5553 DCHECK_EQ(1, function->result_size);
5554 if (function->nargs >= 0) {
5555 PrepareCEntryArgs(function->nargs);
5556 }
5557 JumpToExternalReference(ExternalReference(fid, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005558}
5559
5560
5561void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5562 BranchDelaySlot bd) {
5563 PrepareCEntryFunction(builtin);
5564 CEntryStub stub(isolate(), 1);
5565 Jump(stub.GetCode(),
5566 RelocInfo::CODE_TARGET,
5567 al,
5568 zero_reg,
5569 Operand(zero_reg),
5570 bd);
5571}
5572
5573
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005574void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5575 Register scratch1, Register scratch2) {
5576 if (FLAG_native_code_counters && counter->Enabled()) {
5577 li(scratch1, Operand(value));
5578 li(scratch2, Operand(ExternalReference(counter)));
5579 sd(scratch1, MemOperand(scratch2));
5580 }
5581}
5582
5583
5584void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5585 Register scratch1, Register scratch2) {
5586 DCHECK(value > 0);
5587 if (FLAG_native_code_counters && counter->Enabled()) {
5588 li(scratch2, Operand(ExternalReference(counter)));
5589 ld(scratch1, MemOperand(scratch2));
5590 Daddu(scratch1, scratch1, Operand(value));
5591 sd(scratch1, MemOperand(scratch2));
5592 }
5593}
5594
5595
5596void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5597 Register scratch1, Register scratch2) {
5598 DCHECK(value > 0);
5599 if (FLAG_native_code_counters && counter->Enabled()) {
5600 li(scratch2, Operand(ExternalReference(counter)));
5601 ld(scratch1, MemOperand(scratch2));
5602 Dsubu(scratch1, scratch1, Operand(value));
5603 sd(scratch1, MemOperand(scratch2));
5604 }
5605}
5606
5607
5608// -----------------------------------------------------------------------------
5609// Debugging.
5610
5611void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5612 Register rs, Operand rt) {
5613 if (emit_debug_code())
5614 Check(cc, reason, rs, rt);
5615}
5616
5617
5618void MacroAssembler::AssertFastElements(Register elements) {
5619 if (emit_debug_code()) {
5620 DCHECK(!elements.is(at));
5621 Label ok;
5622 push(elements);
5623 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5624 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5625 Branch(&ok, eq, elements, Operand(at));
5626 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5627 Branch(&ok, eq, elements, Operand(at));
5628 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5629 Branch(&ok, eq, elements, Operand(at));
5630 Abort(kJSObjectWithFastElementsMapHasSlowElements);
5631 bind(&ok);
5632 pop(elements);
5633 }
5634}
5635
5636
5637void MacroAssembler::Check(Condition cc, BailoutReason reason,
5638 Register rs, Operand rt) {
5639 Label L;
5640 Branch(&L, cc, rs, rt);
5641 Abort(reason);
5642 // Will not return here.
5643 bind(&L);
5644}
5645
5646
5647void MacroAssembler::Abort(BailoutReason reason) {
5648 Label abort_start;
5649 bind(&abort_start);
5650#ifdef DEBUG
5651 const char* msg = GetBailoutReason(reason);
5652 if (msg != NULL) {
5653 RecordComment("Abort message: ");
5654 RecordComment(msg);
5655 }
5656
5657 if (FLAG_trap_on_abort) {
5658 stop(msg);
5659 return;
5660 }
5661#endif
5662
5663 li(a0, Operand(Smi::FromInt(reason)));
5664 push(a0);
5665 // Disable stub call restrictions to always allow calls to abort.
5666 if (!has_frame_) {
5667 // We don't actually want to generate a pile of code for this, so just
5668 // claim there is a stack frame, without generating one.
5669 FrameScope scope(this, StackFrame::NONE);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005670 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005671 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005672 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005673 }
5674 // Will not return here.
5675 if (is_trampoline_pool_blocked()) {
5676 // If the calling code cares about the exact number of
5677 // instructions generated, we insert padding here to keep the size
5678 // of the Abort macro constant.
5679 // Currently in debug mode with debug_code enabled the number of
5680 // generated instructions is 10, so we use this as a maximum value.
5681 static const int kExpectedAbortInstructions = 10;
5682 int abort_instructions = InstructionsGeneratedSince(&abort_start);
5683 DCHECK(abort_instructions <= kExpectedAbortInstructions);
5684 while (abort_instructions++ < kExpectedAbortInstructions) {
5685 nop();
5686 }
5687 }
5688}
5689
5690
5691void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5692 if (context_chain_length > 0) {
5693 // Move up the chain of contexts to the context containing the slot.
5694 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5695 for (int i = 1; i < context_chain_length; i++) {
5696 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5697 }
5698 } else {
5699 // Slot is in the current function context. Move it into the
5700 // destination register in case we store into it (the write barrier
5701 // cannot be allowed to destroy the context in esi).
5702 Move(dst, cp);
5703 }
5704}
5705
5706
5707void MacroAssembler::LoadTransitionedArrayMapConditional(
5708 ElementsKind expected_kind,
5709 ElementsKind transitioned_kind,
5710 Register map_in_out,
5711 Register scratch,
5712 Label* no_map_match) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005713 DCHECK(IsFastElementsKind(expected_kind));
5714 DCHECK(IsFastElementsKind(transitioned_kind));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005715
5716 // Check that the function's map is the same as the expected cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005717 ld(scratch, NativeContextMemOperand());
5718 ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005719 Branch(no_map_match, ne, map_in_out, Operand(at));
5720
5721 // Use the transitioned cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005722 ld(map_in_out,
5723 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005724}
5725
5726
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005727void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5728 ld(dst, NativeContextMemOperand());
5729 ld(dst, ContextMemOperand(dst, index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005730}
5731
5732
5733void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5734 Register map,
5735 Register scratch) {
5736 // Load the initial map. The global functions all have initial maps.
5737 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5738 if (emit_debug_code()) {
5739 Label ok, fail;
5740 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
5741 Branch(&ok);
5742 bind(&fail);
5743 Abort(kGlobalFunctionsMustHaveInitialMap);
5744 bind(&ok);
5745 }
5746}
5747
Ben Murdochda12d292016-06-02 14:46:10 +01005748void MacroAssembler::StubPrologue(StackFrame::Type type) {
5749 li(at, Operand(Smi::FromInt(type)));
5750 PushCommonFrame(at);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005751}
5752
5753
5754void MacroAssembler::Prologue(bool code_pre_aging) {
5755 PredictableCodeSizeScope predictible_code_size_scope(
5756 this, kNoCodeAgeSequenceLength);
5757 // The following three instructions must remain together and unmodified
5758 // for code aging to work properly.
5759 if (code_pre_aging) {
5760 // Pre-age the code.
5761 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
5762 nop(Assembler::CODE_AGE_MARKER_NOP);
5763 // Load the stub address to t9 and call it,
5764 // GetCodeAgeAndParity() extracts the stub address from this instruction.
5765 li(t9,
5766 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
5767 ADDRESS_LOAD);
5768 nop(); // Prevent jalr to jal optimization.
5769 jalr(t9, a0);
5770 nop(); // Branch delay slot nop.
5771 nop(); // Pad the empty space.
5772 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01005773 PushStandardFrame(a1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005774 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5775 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5776 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005777 }
5778}
5779
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005780void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
5781 ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5782 ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
5783 ld(vector,
5784 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
5785}
5786
5787
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005788void MacroAssembler::EnterFrame(StackFrame::Type type,
5789 bool load_constant_pool_pointer_reg) {
5790 // Out-of-line constant pool not implemented on mips64.
5791 UNREACHABLE();
5792}
5793
5794
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005795void MacroAssembler::EnterFrame(StackFrame::Type type) {
Ben Murdochda12d292016-06-02 14:46:10 +01005796 int stack_offset, fp_offset;
5797 if (type == StackFrame::INTERNAL) {
5798 stack_offset = -4 * kPointerSize;
5799 fp_offset = 2 * kPointerSize;
5800 } else {
5801 stack_offset = -3 * kPointerSize;
5802 fp_offset = 1 * kPointerSize;
5803 }
5804 daddiu(sp, sp, stack_offset);
5805 stack_offset = -stack_offset - kPointerSize;
5806 sd(ra, MemOperand(sp, stack_offset));
5807 stack_offset -= kPointerSize;
5808 sd(fp, MemOperand(sp, stack_offset));
5809 stack_offset -= kPointerSize;
5810 li(t9, Operand(Smi::FromInt(type)));
5811 sd(t9, MemOperand(sp, stack_offset));
5812 if (type == StackFrame::INTERNAL) {
5813 DCHECK_EQ(stack_offset, kPointerSize);
5814 li(t9, Operand(CodeObject()));
5815 sd(t9, MemOperand(sp, 0));
5816 } else {
5817 DCHECK_EQ(stack_offset, 0);
5818 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005819 // Adjust FP to point to saved FP.
Ben Murdochda12d292016-06-02 14:46:10 +01005820 Daddu(fp, sp, Operand(fp_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005821}
5822
5823
5824void MacroAssembler::LeaveFrame(StackFrame::Type type) {
Ben Murdochda12d292016-06-02 14:46:10 +01005825 daddiu(sp, fp, 2 * kPointerSize);
5826 ld(ra, MemOperand(fp, 1 * kPointerSize));
5827 ld(fp, MemOperand(fp, 0 * kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005828}
5829
Ben Murdochda12d292016-06-02 14:46:10 +01005830void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005831 // Set up the frame structure on the stack.
5832 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5833 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5834 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5835
5836 // This is how the stack will look:
5837 // fp + 2 (==kCallerSPDisplacement) - old stack's end
5838 // [fp + 1 (==kCallerPCOffset)] - saved old ra
5839 // [fp + 0 (==kCallerFPOffset)] - saved old fp
Ben Murdochda12d292016-06-02 14:46:10 +01005840 // [fp - 1 StackFrame::EXIT Smi
5841 // [fp - 2 (==kSPOffset)] - sp of the called function
5842 // [fp - 3 (==kCodeOffset)] - CodeObject
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005843 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5844 // new stack (will contain saved ra)
5845
Ben Murdochda12d292016-06-02 14:46:10 +01005846 // Save registers and reserve room for saved entry sp and code object.
5847 daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
5848 sd(ra, MemOperand(sp, 4 * kPointerSize));
5849 sd(fp, MemOperand(sp, 3 * kPointerSize));
5850 li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
5851 sd(at, MemOperand(sp, 2 * kPointerSize));
5852 // Set up new frame pointer.
5853 daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005854
5855 if (emit_debug_code()) {
5856 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5857 }
5858
5859 // Accessed from ExitFrame::code_slot.
5860 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
5861 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5862
5863 // Save the frame pointer and the context in top.
5864 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5865 sd(fp, MemOperand(t8));
5866 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5867 sd(cp, MemOperand(t8));
5868
5869 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5870 if (save_doubles) {
5871 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
5872 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005873 int space = kNumOfSavedRegisters * kDoubleSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005874 Dsubu(sp, sp, Operand(space));
5875 // Remember: we only need to save every 2nd double FPU value.
5876 for (int i = 0; i < kNumOfSavedRegisters; i++) {
5877 FPURegister reg = FPURegister::from_code(2 * i);
5878 sdc1(reg, MemOperand(sp, i * kDoubleSize));
5879 }
5880 }
5881
5882 // Reserve place for the return address, stack space and an optional slot
5883 // (used by the DirectCEntryStub to hold the return value if a struct is
5884 // returned) and align the frame preparing for calling the runtime function.
5885 DCHECK(stack_space >= 0);
5886 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5887 if (frame_alignment > 0) {
5888 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5889 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5890 }
5891
5892 // Set the exit frame sp value to point just before the return address
5893 // location.
5894 daddiu(at, sp, kPointerSize);
5895 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5896}
5897
5898
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005899void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
5900 bool restore_context, bool do_return,
5901 bool argument_count_is_length) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005902 // Optionally restore all double registers.
5903 if (save_doubles) {
5904 // Remember: we only need to restore every 2nd double FPU value.
5905 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
Ben Murdochda12d292016-06-02 14:46:10 +01005906 Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
5907 kNumOfSavedRegisters * kDoubleSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005908 for (int i = 0; i < kNumOfSavedRegisters; i++) {
5909 FPURegister reg = FPURegister::from_code(2 * i);
5910 ldc1(reg, MemOperand(t8, i * kDoubleSize));
5911 }
5912 }
5913
5914 // Clear top frame.
5915 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5916 sd(zero_reg, MemOperand(t8));
5917
5918 // Restore current context from top and clear it in debug mode.
5919 if (restore_context) {
5920 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5921 ld(cp, MemOperand(t8));
5922 }
5923#ifdef DEBUG
5924 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5925 sd(a3, MemOperand(t8));
5926#endif
5927
5928 // Pop the arguments, restore registers, and return.
5929 mov(sp, fp); // Respect ABI stack constraint.
5930 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5931 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5932
5933 if (argument_count.is_valid()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005934 if (argument_count_is_length) {
5935 daddu(sp, sp, argument_count);
5936 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005937 Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005938 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005939 }
5940
5941 if (do_return) {
5942 Ret(USE_DELAY_SLOT);
5943 // If returning, the instruction in the delay slot will be the addiu below.
5944 }
5945 daddiu(sp, sp, 2 * kPointerSize);
5946}
5947
5948
5949void MacroAssembler::InitializeNewString(Register string,
5950 Register length,
5951 Heap::RootListIndex map_index,
5952 Register scratch1,
5953 Register scratch2) {
5954 // dsll(scratch1, length, kSmiTagSize);
5955 dsll32(scratch1, length, 0);
5956 LoadRoot(scratch2, map_index);
5957 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
5958 li(scratch1, Operand(String::kEmptyHashField));
5959 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005960 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005961}
5962
5963
5964int MacroAssembler::ActivationFrameAlignment() {
5965#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5966 // Running on the real platform. Use the alignment as mandated by the local
5967 // environment.
5968 // Note: This will break if we ever start generating snapshots on one Mips
5969 // platform for another Mips platform with a different alignment.
5970 return base::OS::ActivationFrameAlignment();
5971#else // V8_HOST_ARCH_MIPS
5972 // If we are using the simulator then we should always align to the expected
5973 // alignment. As the simulator is used to generate snapshots we do not know
5974 // if the target platform will need alignment, so this is controlled from a
5975 // flag.
5976 return FLAG_sim_stack_alignment;
5977#endif // V8_HOST_ARCH_MIPS
5978}
5979
5980
5981void MacroAssembler::AssertStackIsAligned() {
5982 if (emit_debug_code()) {
5983 const int frame_alignment = ActivationFrameAlignment();
5984 const int frame_alignment_mask = frame_alignment - 1;
5985
5986 if (frame_alignment > kPointerSize) {
5987 Label alignment_as_expected;
5988 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5989 andi(at, sp, frame_alignment_mask);
5990 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5991 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5992 stop("Unexpected stack alignment");
5993 bind(&alignment_as_expected);
5994 }
5995 }
5996}
5997
5998
5999void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
6000 Register reg,
6001 Register scratch,
6002 Label* not_power_of_two_or_zero) {
6003 Dsubu(scratch, reg, Operand(1));
6004 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
6005 scratch, Operand(zero_reg));
6006 and_(at, scratch, reg); // In the delay slot.
6007 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
6008}
6009
6010
6011void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
6012 DCHECK(!reg.is(overflow));
6013 mov(overflow, reg); // Save original value.
6014 SmiTag(reg);
6015 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
6016}
6017
6018
6019void MacroAssembler::SmiTagCheckOverflow(Register dst,
6020 Register src,
6021 Register overflow) {
6022 if (dst.is(src)) {
6023 // Fall back to slower case.
6024 SmiTagCheckOverflow(dst, overflow);
6025 } else {
6026 DCHECK(!dst.is(src));
6027 DCHECK(!dst.is(overflow));
6028 DCHECK(!src.is(overflow));
6029 SmiTag(dst, src);
6030 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
6031 }
6032}
6033
6034
6035void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
6036 if (SmiValuesAre32Bits()) {
6037 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
6038 } else {
6039 lw(dst, src);
6040 SmiUntag(dst);
6041 }
6042}
6043
6044
6045void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
6046 if (SmiValuesAre32Bits()) {
6047 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
6048 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
6049 dsll(dst, dst, scale);
6050 } else {
6051 lw(dst, src);
6052 DCHECK(scale >= kSmiTagSize);
6053 sll(dst, dst, scale - kSmiTagSize);
6054 }
6055}
6056
6057
6058// Returns 2 values: the Smi and a scaled version of the int within the Smi.
6059void MacroAssembler::SmiLoadWithScale(Register d_smi,
6060 Register d_scaled,
6061 MemOperand src,
6062 int scale) {
6063 if (SmiValuesAre32Bits()) {
6064 ld(d_smi, src);
6065 dsra(d_scaled, d_smi, kSmiShift - scale);
6066 } else {
6067 lw(d_smi, src);
6068 DCHECK(scale >= kSmiTagSize);
6069 sll(d_scaled, d_smi, scale - kSmiTagSize);
6070 }
6071}
6072
6073
6074// Returns 2 values: the untagged Smi (int32) and scaled version of that int.
6075void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
6076 Register d_scaled,
6077 MemOperand src,
6078 int scale) {
6079 if (SmiValuesAre32Bits()) {
6080 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
6081 dsll(d_scaled, d_int, scale);
6082 } else {
6083 lw(d_int, src);
6084 // Need both the int and the scaled in, so use two instructions.
6085 SmiUntag(d_int);
6086 sll(d_scaled, d_int, scale);
6087 }
6088}
6089
6090
6091void MacroAssembler::UntagAndJumpIfSmi(Register dst,
6092 Register src,
6093 Label* smi_case) {
6094 // DCHECK(!dst.is(src));
6095 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
6096 SmiUntag(dst, src);
6097}
6098
6099
6100void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
6101 Register src,
6102 Label* non_smi_case) {
6103 // DCHECK(!dst.is(src));
6104 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
6105 SmiUntag(dst, src);
6106}
6107
6108void MacroAssembler::JumpIfSmi(Register value,
6109 Label* smi_label,
6110 Register scratch,
6111 BranchDelaySlot bd) {
6112 DCHECK_EQ(0, kSmiTag);
6113 andi(scratch, value, kSmiTagMask);
6114 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
6115}
6116
6117void MacroAssembler::JumpIfNotSmi(Register value,
6118 Label* not_smi_label,
6119 Register scratch,
6120 BranchDelaySlot bd) {
6121 DCHECK_EQ(0, kSmiTag);
6122 andi(scratch, value, kSmiTagMask);
6123 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
6124}
6125
6126
6127void MacroAssembler::JumpIfNotBothSmi(Register reg1,
6128 Register reg2,
6129 Label* on_not_both_smi) {
6130 STATIC_ASSERT(kSmiTag == 0);
6131 // TODO(plind): Find some better to fix this assert issue.
6132#if defined(__APPLE__)
6133 DCHECK_EQ(1, kSmiTagMask);
6134#else
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006135 DCHECK_EQ((int64_t)1, kSmiTagMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006136#endif
6137 or_(at, reg1, reg2);
6138 JumpIfNotSmi(at, on_not_both_smi);
6139}
6140
6141
6142void MacroAssembler::JumpIfEitherSmi(Register reg1,
6143 Register reg2,
6144 Label* on_either_smi) {
6145 STATIC_ASSERT(kSmiTag == 0);
6146 // TODO(plind): Find some better to fix this assert issue.
6147#if defined(__APPLE__)
6148 DCHECK_EQ(1, kSmiTagMask);
6149#else
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006150 DCHECK_EQ((int64_t)1, kSmiTagMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006151#endif
6152 // Both Smi tags must be 1 (not Smi).
6153 and_(at, reg1, reg2);
6154 JumpIfSmi(at, on_either_smi);
6155}
6156
Ben Murdochda12d292016-06-02 14:46:10 +01006157void MacroAssembler::AssertNotNumber(Register object) {
6158 if (emit_debug_code()) {
6159 STATIC_ASSERT(kSmiTag == 0);
6160 andi(at, object, kSmiTagMask);
6161 Check(ne, kOperandIsANumber, at, Operand(zero_reg));
6162 GetObjectType(object, t8, t8);
6163 Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
6164 }
6165}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006166
6167void MacroAssembler::AssertNotSmi(Register object) {
6168 if (emit_debug_code()) {
6169 STATIC_ASSERT(kSmiTag == 0);
6170 andi(at, object, kSmiTagMask);
6171 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
6172 }
6173}
6174
6175
6176void MacroAssembler::AssertSmi(Register object) {
6177 if (emit_debug_code()) {
6178 STATIC_ASSERT(kSmiTag == 0);
6179 andi(at, object, kSmiTagMask);
6180 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
6181 }
6182}
6183
6184
6185void MacroAssembler::AssertString(Register object) {
6186 if (emit_debug_code()) {
6187 STATIC_ASSERT(kSmiTag == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006188 SmiTst(object, t8);
6189 Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
6190 GetObjectType(object, t8, t8);
6191 Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006192 }
6193}
6194
6195
6196void MacroAssembler::AssertName(Register object) {
6197 if (emit_debug_code()) {
6198 STATIC_ASSERT(kSmiTag == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006199 SmiTst(object, t8);
6200 Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
6201 GetObjectType(object, t8, t8);
6202 Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
6203 }
6204}
6205
6206
6207void MacroAssembler::AssertFunction(Register object) {
6208 if (emit_debug_code()) {
6209 STATIC_ASSERT(kSmiTag == 0);
6210 SmiTst(object, t8);
6211 Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
6212 GetObjectType(object, t8, t8);
6213 Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
6214 }
6215}
6216
6217
6218void MacroAssembler::AssertBoundFunction(Register object) {
6219 if (emit_debug_code()) {
6220 STATIC_ASSERT(kSmiTag == 0);
6221 SmiTst(object, t8);
6222 Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
6223 GetObjectType(object, t8, t8);
6224 Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006225 }
6226}
6227
6228
Ben Murdoch097c5b22016-05-18 11:27:45 +01006229void MacroAssembler::AssertReceiver(Register object) {
6230 if (emit_debug_code()) {
6231 STATIC_ASSERT(kSmiTag == 0);
6232 SmiTst(object, t8);
6233 Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
6234 GetObjectType(object, t8, t8);
6235 Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
6236 }
6237}
6238
6239
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006240void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
6241 Register scratch) {
6242 if (emit_debug_code()) {
6243 Label done_checking;
6244 AssertNotSmi(object);
6245 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
6246 Branch(&done_checking, eq, object, Operand(scratch));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006247 ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006248 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006249 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006250 bind(&done_checking);
6251 }
6252}
6253
6254
6255void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
6256 if (emit_debug_code()) {
6257 DCHECK(!reg.is(at));
6258 LoadRoot(at, index);
6259 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
6260 }
6261}
6262
6263
6264void MacroAssembler::JumpIfNotHeapNumber(Register object,
6265 Register heap_number_map,
6266 Register scratch,
6267 Label* on_not_heap_number) {
6268 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
6269 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
6270 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
6271}
6272
6273
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006274void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
6275 Register first, Register second, Register scratch1, Register scratch2,
6276 Label* failure) {
6277 // Test that both first and second are sequential one-byte strings.
6278 // Assume that they are non-smis.
6279 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
6280 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
6281 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
6282 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
6283
6284 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
6285 scratch2, failure);
6286}
6287
6288
6289void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
6290 Register second,
6291 Register scratch1,
6292 Register scratch2,
6293 Label* failure) {
6294 // Check that neither is a smi.
6295 STATIC_ASSERT(kSmiTag == 0);
6296 And(scratch1, first, Operand(second));
6297 JumpIfSmi(scratch1, failure);
6298 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
6299 scratch2, failure);
6300}
6301
6302
6303void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
6304 Register first, Register second, Register scratch1, Register scratch2,
6305 Label* failure) {
6306 const int kFlatOneByteStringMask =
6307 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6308 const int kFlatOneByteStringTag =
6309 kStringTag | kOneByteStringTag | kSeqStringTag;
6310 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
6311 andi(scratch1, first, kFlatOneByteStringMask);
6312 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
6313 andi(scratch2, second, kFlatOneByteStringMask);
6314 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
6315}
6316
6317
6318void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
6319 Register scratch,
6320 Label* failure) {
6321 const int kFlatOneByteStringMask =
6322 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6323 const int kFlatOneByteStringTag =
6324 kStringTag | kOneByteStringTag | kSeqStringTag;
6325 And(scratch, type, Operand(kFlatOneByteStringMask));
6326 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
6327}
6328
Ben Murdoch097c5b22016-05-18 11:27:45 +01006329static const int kRegisterPassedArguments = 8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006330
6331int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
6332 int num_double_arguments) {
6333 int stack_passed_words = 0;
6334 num_reg_arguments += 2 * num_double_arguments;
6335
6336 // O32: Up to four simple arguments are passed in registers a0..a3.
6337 // N64: Up to eight simple arguments are passed in registers a0..a7.
6338 if (num_reg_arguments > kRegisterPassedArguments) {
6339 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
6340 }
6341 stack_passed_words += kCArgSlotCount;
6342 return stack_passed_words;
6343}
6344
6345
6346void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
6347 Register index,
6348 Register value,
6349 Register scratch,
6350 uint32_t encoding_mask) {
6351 Label is_object;
6352 SmiTst(string, at);
6353 Check(ne, kNonObject, at, Operand(zero_reg));
6354
6355 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
6356 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
6357
6358 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
6359 li(scratch, Operand(encoding_mask));
6360 Check(eq, kUnexpectedStringType, at, Operand(scratch));
6361
6362 // TODO(plind): requires Smi size check code for mips32.
6363
6364 ld(at, FieldMemOperand(string, String::kLengthOffset));
6365 Check(lt, kIndexIsTooLarge, index, Operand(at));
6366
6367 DCHECK(Smi::FromInt(0) == 0);
6368 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6369}
6370
6371
6372void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6373 int num_double_arguments,
6374 Register scratch) {
6375 int frame_alignment = ActivationFrameAlignment();
6376
6377 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
6378 // O32: Up to four simple arguments are passed in registers a0..a3.
6379 // Those four arguments must have reserved argument slots on the stack for
6380 // mips, even though those argument slots are not normally used.
6381 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
6382 // address than) the (O32) argument slots. (arg slot calculation handled by
6383 // CalculateStackPassedWords()).
6384 int stack_passed_arguments = CalculateStackPassedWords(
6385 num_reg_arguments, num_double_arguments);
6386 if (frame_alignment > kPointerSize) {
6387 // Make stack end at alignment and make room for num_arguments - 4 words
6388 // and the original value of sp.
6389 mov(scratch, sp);
6390 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6391 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6392 And(sp, sp, Operand(-frame_alignment));
6393 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6394 } else {
6395 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6396 }
6397}
6398
6399
6400void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6401 Register scratch) {
6402 PrepareCallCFunction(num_reg_arguments, 0, scratch);
6403}
6404
6405
6406void MacroAssembler::CallCFunction(ExternalReference function,
6407 int num_reg_arguments,
6408 int num_double_arguments) {
6409 li(t8, Operand(function));
6410 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6411}
6412
6413
6414void MacroAssembler::CallCFunction(Register function,
6415 int num_reg_arguments,
6416 int num_double_arguments) {
6417 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6418}
6419
6420
6421void MacroAssembler::CallCFunction(ExternalReference function,
6422 int num_arguments) {
6423 CallCFunction(function, num_arguments, 0);
6424}
6425
6426
6427void MacroAssembler::CallCFunction(Register function,
6428 int num_arguments) {
6429 CallCFunction(function, num_arguments, 0);
6430}
6431
6432
6433void MacroAssembler::CallCFunctionHelper(Register function,
6434 int num_reg_arguments,
6435 int num_double_arguments) {
6436 DCHECK(has_frame());
6437 // Make sure that the stack is aligned before calling a C function unless
6438 // running in the simulator. The simulator has its own alignment check which
6439 // provides more information.
6440 // The argument stots are presumed to have been set up by
6441 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6442
6443#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6444 if (emit_debug_code()) {
6445 int frame_alignment = base::OS::ActivationFrameAlignment();
6446 int frame_alignment_mask = frame_alignment - 1;
6447 if (frame_alignment > kPointerSize) {
6448 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6449 Label alignment_as_expected;
6450 And(at, sp, Operand(frame_alignment_mask));
6451 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6452 // Don't use Check here, as it will call Runtime_Abort possibly
6453 // re-entering here.
6454 stop("Unexpected alignment in CallCFunction");
6455 bind(&alignment_as_expected);
6456 }
6457 }
6458#endif // V8_HOST_ARCH_MIPS
6459
6460 // Just call directly. The function called cannot cause a GC, or
6461 // allow preemption, so the return address in the link register
6462 // stays correct.
6463
6464 if (!function.is(t9)) {
6465 mov(t9, function);
6466 function = t9;
6467 }
6468
6469 Call(function);
6470
6471 int stack_passed_arguments = CalculateStackPassedWords(
6472 num_reg_arguments, num_double_arguments);
6473
6474 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6475 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6476 } else {
6477 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6478 }
6479}
6480
6481
6482#undef BRANCH_ARGS_CHECK
6483
6484
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006485void MacroAssembler::CheckPageFlag(
6486 Register object,
6487 Register scratch,
6488 int mask,
6489 Condition cc,
6490 Label* condition_met) {
6491 And(scratch, object, Operand(~Page::kPageAlignmentMask));
6492 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6493 And(scratch, scratch, Operand(mask));
6494 Branch(condition_met, cc, scratch, Operand(zero_reg));
6495}
6496
6497
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006498void MacroAssembler::JumpIfBlack(Register object,
6499 Register scratch0,
6500 Register scratch1,
6501 Label* on_black) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006502 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
6503 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006504}
6505
6506
6507void MacroAssembler::HasColor(Register object,
6508 Register bitmap_scratch,
6509 Register mask_scratch,
6510 Label* has_color,
6511 int first_bit,
6512 int second_bit) {
6513 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6514 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6515
6516 GetMarkBits(object, bitmap_scratch, mask_scratch);
6517
6518 Label other_color;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006519 // Note that we are using two 4-byte aligned loads.
6520 LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006521 And(t8, t9, Operand(mask_scratch));
6522 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6523 // Shift left 1 by adding.
6524 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
6525 And(t8, t9, Operand(mask_scratch));
6526 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6527
6528 bind(&other_color);
6529}
6530
6531
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006532void MacroAssembler::GetMarkBits(Register addr_reg,
6533 Register bitmap_reg,
6534 Register mask_reg) {
6535 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6536 // addr_reg is divided into fields:
6537 // |63 page base 20|19 high 8|7 shift 3|2 0|
6538 // 'high' gives the index of the cell holding color bits for the object.
6539 // 'shift' gives the offset in the cell for this object's color.
6540 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6541 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6542 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6543 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
Ben Murdoch097c5b22016-05-18 11:27:45 +01006544 Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006545 li(t8, Operand(1));
6546 dsllv(mask_reg, t8, mask_reg);
6547}
6548
6549
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006550void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6551 Register mask_scratch, Register load_scratch,
6552 Label* value_is_white) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006553 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6554 GetMarkBits(value, bitmap_scratch, mask_scratch);
6555
6556 // If the value is black or grey we don't need to do anything.
6557 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006558 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6559 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006560 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6561
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006562 // Since both black and grey have a 1 in the first position and white does
6563 // not have a 1 there we only need to check one bit.
6564 // Note that we are using a 4-byte aligned 8-byte load.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006565 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006566 LoadWordPair(load_scratch,
6567 MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6568 } else {
6569 lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006570 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006571 And(t8, mask_scratch, load_scratch);
6572 Branch(value_is_white, eq, t8, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006573}
6574
6575
6576void MacroAssembler::LoadInstanceDescriptors(Register map,
6577 Register descriptors) {
6578 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6579}
6580
6581
6582void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006583 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006584 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6585}
6586
6587
6588void MacroAssembler::EnumLength(Register dst, Register map) {
6589 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006590 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006591 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6592 SmiTag(dst);
6593}
6594
6595
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006596void MacroAssembler::LoadAccessor(Register dst, Register holder,
6597 int accessor_index,
6598 AccessorComponent accessor) {
6599 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6600 LoadInstanceDescriptors(dst, dst);
6601 ld(dst,
6602 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6603 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6604 : AccessorPair::kSetterOffset;
6605 ld(dst, FieldMemOperand(dst, offset));
6606}
6607
6608
Ben Murdoch097c5b22016-05-18 11:27:45 +01006609void MacroAssembler::CheckEnumCache(Label* call_runtime) {
6610 Register null_value = a5;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006611 Register empty_fixed_array_value = a6;
6612 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6613 Label next, start;
6614 mov(a2, a0);
6615
6616 // Check if the enum length field is properly initialized, indicating that
6617 // there is an enum cache.
6618 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6619
6620 EnumLength(a3, a1);
6621 Branch(
6622 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
6623
Ben Murdoch097c5b22016-05-18 11:27:45 +01006624 LoadRoot(null_value, Heap::kNullValueRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006625 jmp(&start);
6626
6627 bind(&next);
6628 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6629
6630 // For all objects but the receiver, check that the cache is empty.
6631 EnumLength(a3, a1);
6632 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
6633
6634 bind(&start);
6635
6636 // Check that there are no elements. Register a2 contains the current JS
6637 // object we've reached through the prototype chain.
6638 Label no_elements;
6639 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
6640 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
6641
6642 // Second chance, the object may be using the empty slow element dictionary.
6643 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
6644 Branch(call_runtime, ne, a2, Operand(at));
6645
6646 bind(&no_elements);
6647 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
6648 Branch(&next, ne, a2, Operand(null_value));
6649}
6650
6651
6652void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
6653 DCHECK(!output_reg.is(input_reg));
6654 Label done;
6655 li(output_reg, Operand(255));
6656 // Normal branch: nop in delay slot.
6657 Branch(&done, gt, input_reg, Operand(output_reg));
6658 // Use delay slot in this branch.
6659 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6660 mov(output_reg, zero_reg); // In delay slot.
6661 mov(output_reg, input_reg); // Value is in range 0..255.
6662 bind(&done);
6663}
6664
6665
6666void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6667 DoubleRegister input_reg,
6668 DoubleRegister temp_double_reg) {
6669 Label above_zero;
6670 Label done;
6671 Label in_bounds;
6672
6673 Move(temp_double_reg, 0.0);
6674 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6675
6676 // Double value is less than zero, NaN or Inf, return 0.
6677 mov(result_reg, zero_reg);
6678 Branch(&done);
6679
6680 // Double value is >= 255, return 255.
6681 bind(&above_zero);
6682 Move(temp_double_reg, 255.0);
6683 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6684 li(result_reg, Operand(255));
6685 Branch(&done);
6686
6687 // In 0-255 range, round and truncate.
6688 bind(&in_bounds);
6689 cvt_w_d(temp_double_reg, input_reg);
6690 mfc1(result_reg, temp_double_reg);
6691 bind(&done);
6692}
6693
Ben Murdochda12d292016-06-02 14:46:10 +01006694void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
6695 Register scratch_reg,
6696 Label* no_memento_found) {
6697 Label map_check;
6698 Label top_check;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006699 ExternalReference new_space_allocation_top =
6700 ExternalReference::new_space_allocation_top_address(isolate());
Ben Murdochda12d292016-06-02 14:46:10 +01006701 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
6702 const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
6703
6704 // Bail out if the object is not in new space.
6705 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
6706 // If the object is in new space, we need to check whether it is on the same
6707 // page as the current top.
6708 Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
6709 Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
6710 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6711 Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
6712 // The object is on a different page than allocation top. Bail out if the
6713 // object sits on the page boundary as no memento can follow and we cannot
6714 // touch the memory following it.
6715 Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
6716 Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
6717 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6718 Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
6719 // Continue with the actual map check.
6720 jmp(&map_check);
6721 // If top is on the same page as the current object, we need to check whether
6722 // we are below top.
6723 bind(&top_check);
6724 Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006725 li(at, Operand(new_space_allocation_top));
Ben Murdochda12d292016-06-02 14:46:10 +01006726 lw(at, MemOperand(at));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006727 Branch(no_memento_found, gt, scratch_reg, Operand(at));
Ben Murdochda12d292016-06-02 14:46:10 +01006728 // Memento map check.
6729 bind(&map_check);
6730 lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
6731 Branch(no_memento_found, ne, scratch_reg,
6732 Operand(isolate()->factory()->allocation_memento_map()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006733}
6734
6735
6736Register GetRegisterThatIsNotOneOf(Register reg1,
6737 Register reg2,
6738 Register reg3,
6739 Register reg4,
6740 Register reg5,
6741 Register reg6) {
6742 RegList regs = 0;
6743 if (reg1.is_valid()) regs |= reg1.bit();
6744 if (reg2.is_valid()) regs |= reg2.bit();
6745 if (reg3.is_valid()) regs |= reg3.bit();
6746 if (reg4.is_valid()) regs |= reg4.bit();
6747 if (reg5.is_valid()) regs |= reg5.bit();
6748 if (reg6.is_valid()) regs |= reg6.bit();
6749
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006750 const RegisterConfiguration* config =
6751 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
6752 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
6753 int code = config->GetAllocatableGeneralCode(i);
6754 Register candidate = Register::from_code(code);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006755 if (regs & candidate.bit()) continue;
6756 return candidate;
6757 }
6758 UNREACHABLE();
6759 return no_reg;
6760}
6761
6762
6763void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6764 Register object,
6765 Register scratch0,
6766 Register scratch1,
6767 Label* found) {
6768 DCHECK(!scratch1.is(scratch0));
6769 Factory* factory = isolate()->factory();
6770 Register current = scratch0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006771 Label loop_again, end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006772
6773 // Scratch contained elements pointer.
6774 Move(current, object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006775 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
6776 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
6777 Branch(&end, eq, current, Operand(factory->null_value()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006778
6779 // Loop based on the map going up the prototype chain.
6780 bind(&loop_again);
6781 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006782 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
6783 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
6784 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
6785 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006786 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
6787 DecodeField<Map::ElementsKindBits>(scratch1);
6788 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
6789 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
6790 Branch(&loop_again, ne, current, Operand(factory->null_value()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006791
6792 bind(&end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006793}
6794
6795
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006796bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
6797 Register reg5, Register reg6, Register reg7, Register reg8,
6798 Register reg9, Register reg10) {
6799 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
6800 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6801 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
6802 reg10.is_valid();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006803
6804 RegList regs = 0;
6805 if (reg1.is_valid()) regs |= reg1.bit();
6806 if (reg2.is_valid()) regs |= reg2.bit();
6807 if (reg3.is_valid()) regs |= reg3.bit();
6808 if (reg4.is_valid()) regs |= reg4.bit();
6809 if (reg5.is_valid()) regs |= reg5.bit();
6810 if (reg6.is_valid()) regs |= reg6.bit();
6811 if (reg7.is_valid()) regs |= reg7.bit();
6812 if (reg8.is_valid()) regs |= reg8.bit();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006813 if (reg9.is_valid()) regs |= reg9.bit();
6814 if (reg10.is_valid()) regs |= reg10.bit();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006815 int n_of_non_aliasing_regs = NumRegs(regs);
6816
6817 return n_of_valid_regs != n_of_non_aliasing_regs;
6818}
6819
6820
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006821CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006822 FlushICache flush_cache)
6823 : address_(address),
6824 size_(instructions * Assembler::kInstrSize),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006825 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006826 flush_cache_(flush_cache) {
6827 // Create a new macro assembler pointing to the address of the code to patch.
6828 // The size is adjusted with kGap on order for the assembler to generate size
6829 // bytes of instructions without failing with buffer size constraints.
6830 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6831}
6832
6833
6834CodePatcher::~CodePatcher() {
6835 // Indicate that code has changed.
6836 if (flush_cache_ == FLUSH) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006837 Assembler::FlushICache(masm_.isolate(), address_, size_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006838 }
6839 // Check that the code was patched as expected.
6840 DCHECK(masm_.pc_ == address_ + size_);
6841 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6842}
6843
6844
6845void CodePatcher::Emit(Instr instr) {
6846 masm()->emit(instr);
6847}
6848
6849
6850void CodePatcher::Emit(Address addr) {
6851 // masm()->emit(reinterpret_cast<Instr>(addr));
6852}
6853
6854
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006855void CodePatcher::ChangeBranchCondition(Instr current_instr,
6856 uint32_t new_opcode) {
6857 current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
6858 masm_.emit(current_instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006859}
6860
6861
6862void MacroAssembler::TruncatingDiv(Register result,
6863 Register dividend,
6864 int32_t divisor) {
6865 DCHECK(!dividend.is(result));
6866 DCHECK(!dividend.is(at));
6867 DCHECK(!result.is(at));
6868 base::MagicNumbersForDivision<uint32_t> mag =
6869 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04006870 li(at, Operand(static_cast<int32_t>(mag.multiplier)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006871 Mulh(result, dividend, Operand(at));
6872 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6873 if (divisor > 0 && neg) {
6874 Addu(result, result, Operand(dividend));
6875 }
6876 if (divisor < 0 && !neg && mag.multiplier > 0) {
6877 Subu(result, result, Operand(dividend));
6878 }
6879 if (mag.shift > 0) sra(result, result, mag.shift);
6880 srl(at, dividend, 31);
6881 Addu(result, result, Operand(at));
6882}
6883
6884
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006885} // namespace internal
6886} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006887
6888#endif // V8_TARGET_ARCH_MIPS64