blob: b49fa76e064a803dc10b7fb81e7f96fde4a84cc4 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <limits.h> // For LONG_MIN, LONG_MAX.
6
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007#if V8_TARGET_ARCH_MIPS64
8
9#include "src/base/division-by-constant.h"
10#include "src/bootstrapper.h"
11#include "src/codegen.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000012#include "src/debug/debug.h"
13#include "src/mips64/macro-assembler-mips64.h"
14#include "src/register-configuration.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040015#include "src/runtime/runtime.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016
17namespace v8 {
18namespace internal {
19
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
21 CodeObjectRequired create_code_object)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022 : Assembler(arg_isolate, buffer, size),
23 generating_stub_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040024 has_frame_(false),
25 has_double_zero_reg_set_(false) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000026 if (create_code_object == CodeObjectRequired::kYes) {
27 code_object_ =
28 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029 }
30}
31
32
33void MacroAssembler::Load(Register dst,
34 const MemOperand& src,
35 Representation r) {
36 DCHECK(!r.IsDouble());
37 if (r.IsInteger8()) {
38 lb(dst, src);
39 } else if (r.IsUInteger8()) {
40 lbu(dst, src);
41 } else if (r.IsInteger16()) {
42 lh(dst, src);
43 } else if (r.IsUInteger16()) {
44 lhu(dst, src);
45 } else if (r.IsInteger32()) {
46 lw(dst, src);
47 } else {
48 ld(dst, src);
49 }
50}
51
52
53void MacroAssembler::Store(Register src,
54 const MemOperand& dst,
55 Representation r) {
56 DCHECK(!r.IsDouble());
57 if (r.IsInteger8() || r.IsUInteger8()) {
58 sb(src, dst);
59 } else if (r.IsInteger16() || r.IsUInteger16()) {
60 sh(src, dst);
61 } else if (r.IsInteger32()) {
62 sw(src, dst);
63 } else {
64 if (r.IsHeapObject()) {
65 AssertNotSmi(src);
66 } else if (r.IsSmi()) {
67 AssertSmi(src);
68 }
69 sd(src, dst);
70 }
71}
72
73
74void MacroAssembler::LoadRoot(Register destination,
75 Heap::RootListIndex index) {
76 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
77}
78
79
80void MacroAssembler::LoadRoot(Register destination,
81 Heap::RootListIndex index,
82 Condition cond,
83 Register src1, const Operand& src2) {
84 Branch(2, NegateCondition(cond), src1, src2);
85 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
86}
87
88
89void MacroAssembler::StoreRoot(Register source,
90 Heap::RootListIndex index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000091 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 sd(source, MemOperand(s6, index << kPointerSizeLog2));
93}
94
95
96void MacroAssembler::StoreRoot(Register source,
97 Heap::RootListIndex index,
98 Condition cond,
99 Register src1, const Operand& src2) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000100 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000101 Branch(2, NegateCondition(cond), src1, src2);
102 sd(source, MemOperand(s6, index << kPointerSizeLog2));
103}
104
105
106// Push and pop all registers that can hold pointers.
107void MacroAssembler::PushSafepointRegisters() {
108 // Safepoints expect a block of kNumSafepointRegisters values on the
109 // stack, so adjust the stack for unsaved registers.
110 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
111 DCHECK(num_unsaved >= 0);
112 if (num_unsaved > 0) {
113 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
114 }
115 MultiPush(kSafepointSavedRegisters);
116}
117
118
119void MacroAssembler::PopSafepointRegisters() {
120 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
121 MultiPop(kSafepointSavedRegisters);
122 if (num_unsaved > 0) {
123 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
124 }
125}
126
127
128void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
129 sd(src, SafepointRegisterSlot(dst));
130}
131
132
133void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
134 ld(dst, SafepointRegisterSlot(src));
135}
136
137
138int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
139 // The registers are pushed starting with the highest encoding,
140 // which means that lowest encodings are closest to the stack pointer.
141 return kSafepointRegisterStackIndexMap[reg_code];
142}
143
144
145MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
146 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
147}
148
149
150MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
151 UNIMPLEMENTED_MIPS();
152 // General purpose registers are pushed last on the stack.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000153 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000154 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
155 return MemOperand(sp, doubles_size + register_offset);
156}
157
158
159void MacroAssembler::InNewSpace(Register object,
160 Register scratch,
161 Condition cc,
162 Label* branch) {
163 DCHECK(cc == eq || cc == ne);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100164 const int mask =
165 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
166 CheckPageFlag(object, scratch, mask, cc, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167}
168
169
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000170// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
171// The register 'object' contains a heap object pointer. The heap object
172// tag is shifted away.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000173void MacroAssembler::RecordWriteField(
174 Register object,
175 int offset,
176 Register value,
177 Register dst,
178 RAStatus ra_status,
179 SaveFPRegsMode save_fp,
180 RememberedSetAction remembered_set_action,
181 SmiCheck smi_check,
182 PointersToHereCheck pointers_to_here_check_for_value) {
183 DCHECK(!AreAliased(value, dst, t8, object));
184 // First, check if a write barrier is even needed. The tests below
185 // catch stores of Smis.
186 Label done;
187
188 // Skip barrier if writing a smi.
189 if (smi_check == INLINE_SMI_CHECK) {
190 JumpIfSmi(value, &done);
191 }
192
193 // Although the object register is tagged, the offset is relative to the start
194 // of the object, so so offset must be a multiple of kPointerSize.
195 DCHECK(IsAligned(offset, kPointerSize));
196
197 Daddu(dst, object, Operand(offset - kHeapObjectTag));
198 if (emit_debug_code()) {
199 Label ok;
200 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
201 Branch(&ok, eq, t8, Operand(zero_reg));
202 stop("Unaligned cell in write barrier");
203 bind(&ok);
204 }
205
206 RecordWrite(object,
207 dst,
208 value,
209 ra_status,
210 save_fp,
211 remembered_set_action,
212 OMIT_SMI_CHECK,
213 pointers_to_here_check_for_value);
214
215 bind(&done);
216
217 // Clobber clobbered input registers when running with the debug-code flag
218 // turned on to provoke errors.
219 if (emit_debug_code()) {
220 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
221 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
222 }
223}
224
225
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000226// Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227void MacroAssembler::RecordWriteForMap(Register object,
228 Register map,
229 Register dst,
230 RAStatus ra_status,
231 SaveFPRegsMode fp_mode) {
232 if (emit_debug_code()) {
233 DCHECK(!dst.is(at));
234 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
235 Check(eq,
236 kWrongAddressOrValuePassedToRecordWrite,
237 dst,
238 Operand(isolate()->factory()->meta_map()));
239 }
240
241 if (!FLAG_incremental_marking) {
242 return;
243 }
244
245 if (emit_debug_code()) {
246 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
247 Check(eq,
248 kWrongAddressOrValuePassedToRecordWrite,
249 map,
250 Operand(at));
251 }
252
253 Label done;
254
255 // A single check of the map's pages interesting flag suffices, since it is
256 // only set during incremental collection, and then it's also guaranteed that
257 // the from object's page's interesting flag is also set. This optimization
258 // relies on the fact that maps can never be in new space.
259 CheckPageFlag(map,
260 map, // Used as scratch.
261 MemoryChunk::kPointersToHereAreInterestingMask,
262 eq,
263 &done);
264
265 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
266 if (emit_debug_code()) {
267 Label ok;
268 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
269 Branch(&ok, eq, at, Operand(zero_reg));
270 stop("Unaligned cell in write barrier");
271 bind(&ok);
272 }
273
274 // Record the actual write.
275 if (ra_status == kRAHasNotBeenSaved) {
276 push(ra);
277 }
278 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
279 fp_mode);
280 CallStub(&stub);
281 if (ra_status == kRAHasNotBeenSaved) {
282 pop(ra);
283 }
284
285 bind(&done);
286
287 // Count number of write barriers in generated code.
288 isolate()->counters()->write_barriers_static()->Increment();
289 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
290
291 // Clobber clobbered registers when running with the debug-code flag
292 // turned on to provoke errors.
293 if (emit_debug_code()) {
294 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
295 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
296 }
297}
298
299
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000300// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
301// The register 'object' contains a heap object pointer. The heap object
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302// tag is shifted away.
303void MacroAssembler::RecordWrite(
304 Register object,
305 Register address,
306 Register value,
307 RAStatus ra_status,
308 SaveFPRegsMode fp_mode,
309 RememberedSetAction remembered_set_action,
310 SmiCheck smi_check,
311 PointersToHereCheck pointers_to_here_check_for_value) {
312 DCHECK(!AreAliased(object, address, value, t8));
313 DCHECK(!AreAliased(object, address, value, t9));
314
315 if (emit_debug_code()) {
316 ld(at, MemOperand(address));
317 Assert(
318 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
319 }
320
321 if (remembered_set_action == OMIT_REMEMBERED_SET &&
322 !FLAG_incremental_marking) {
323 return;
324 }
325
326 // First, check if a write barrier is even needed. The tests below
327 // catch stores of smis and stores into the young generation.
328 Label done;
329
330 if (smi_check == INLINE_SMI_CHECK) {
331 DCHECK_EQ(0, kSmiTag);
332 JumpIfSmi(value, &done);
333 }
334
335 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
336 CheckPageFlag(value,
337 value, // Used as scratch.
338 MemoryChunk::kPointersToHereAreInterestingMask,
339 eq,
340 &done);
341 }
342 CheckPageFlag(object,
343 value, // Used as scratch.
344 MemoryChunk::kPointersFromHereAreInterestingMask,
345 eq,
346 &done);
347
348 // Record the actual write.
349 if (ra_status == kRAHasNotBeenSaved) {
350 push(ra);
351 }
352 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
353 fp_mode);
354 CallStub(&stub);
355 if (ra_status == kRAHasNotBeenSaved) {
356 pop(ra);
357 }
358
359 bind(&done);
360
361 // Count number of write barriers in generated code.
362 isolate()->counters()->write_barriers_static()->Increment();
363 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
364 value);
365
366 // Clobber clobbered registers when running with the debug-code flag
367 // turned on to provoke errors.
368 if (emit_debug_code()) {
369 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
370 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
371 }
372}
373
Ben Murdoch097c5b22016-05-18 11:27:45 +0100374void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
375 Register code_entry,
376 Register scratch) {
377 const int offset = JSFunction::kCodeEntryOffset;
378
379 // Since a code entry (value) is always in old space, we don't need to update
380 // remembered set. If incremental marking is off, there is nothing for us to
381 // do.
382 if (!FLAG_incremental_marking) return;
383
384 DCHECK(js_function.is(a1));
385 DCHECK(code_entry.is(a4));
386 DCHECK(scratch.is(a5));
387 AssertNotSmi(js_function);
388
389 if (emit_debug_code()) {
390 Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
391 ld(at, MemOperand(scratch));
392 Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
393 Operand(code_entry));
394 }
395
396 // First, check if a write barrier is even needed. The tests below
397 // catch stores of Smis and stores into young gen.
398 Label done;
399
400 CheckPageFlag(code_entry, scratch,
401 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
402 CheckPageFlag(js_function, scratch,
403 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
404
405 const Register dst = scratch;
406 Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
407
408 // Save caller-saved registers. js_function and code_entry are in the
409 // caller-saved register list.
410 DCHECK(kJSCallerSaved & js_function.bit());
411 DCHECK(kJSCallerSaved & code_entry.bit());
412 MultiPush(kJSCallerSaved | ra.bit());
413
414 int argument_count = 3;
415
416 PrepareCallCFunction(argument_count, code_entry);
417
418 Move(a0, js_function);
419 Move(a1, dst);
420 li(a2, Operand(ExternalReference::isolate_address(isolate())));
421
422 {
423 AllowExternalCallThatCantCauseGC scope(this);
424 CallCFunction(
425 ExternalReference::incremental_marking_record_write_code_entry_function(
426 isolate()),
427 argument_count);
428 }
429
430 // Restore caller-saved registers.
431 MultiPop(kJSCallerSaved | ra.bit());
432
433 bind(&done);
434}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000435
436void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
437 Register address,
438 Register scratch,
439 SaveFPRegsMode fp_mode,
440 RememberedSetFinalAction and_then) {
441 Label done;
442 if (emit_debug_code()) {
443 Label ok;
444 JumpIfNotInNewSpace(object, scratch, &ok);
445 stop("Remembered set pointer is in new space");
446 bind(&ok);
447 }
448 // Load store buffer top.
449 ExternalReference store_buffer =
450 ExternalReference::store_buffer_top(isolate());
451 li(t8, Operand(store_buffer));
452 ld(scratch, MemOperand(t8));
453 // Store pointer to buffer and increment buffer top.
454 sd(address, MemOperand(scratch));
455 Daddu(scratch, scratch, kPointerSize);
456 // Write back new top of buffer.
457 sd(scratch, MemOperand(t8));
458 // Call stub on end of buffer.
459 // Check for end of buffer.
460 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
461 DCHECK(!scratch.is(t8));
462 if (and_then == kFallThroughAtEnd) {
463 Branch(&done, eq, t8, Operand(zero_reg));
464 } else {
465 DCHECK(and_then == kReturnAtEnd);
466 Ret(eq, t8, Operand(zero_reg));
467 }
468 push(ra);
469 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
470 CallStub(&store_buffer_overflow);
471 pop(ra);
472 bind(&done);
473 if (and_then == kReturnAtEnd) {
474 Ret();
475 }
476}
477
478
479// -----------------------------------------------------------------------------
480// Allocation support.
481
482
483void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
484 Register scratch,
485 Label* miss) {
486 Label same_contexts;
487
488 DCHECK(!holder_reg.is(scratch));
489 DCHECK(!holder_reg.is(at));
490 DCHECK(!scratch.is(at));
491
492 // Load current lexical context from the stack frame.
493 ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
494 // In debug mode, make sure the lexical context is set.
495#ifdef DEBUG
496 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
497 scratch, Operand(zero_reg));
498#endif
499
500 // Load the native context of the current context.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000501 ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000502
503 // Check the context is a native context.
504 if (emit_debug_code()) {
505 push(holder_reg); // Temporarily save holder on the stack.
506 // Read the first word and compare to the native_context_map.
507 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
508 LoadRoot(at, Heap::kNativeContextMapRootIndex);
509 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
510 holder_reg, Operand(at));
511 pop(holder_reg); // Restore holder.
512 }
513
514 // Check if both contexts are the same.
515 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
516 Branch(&same_contexts, eq, scratch, Operand(at));
517
518 // Check the context is a native context.
519 if (emit_debug_code()) {
520 push(holder_reg); // Temporarily save holder on the stack.
521 mov(holder_reg, at); // Move at to its holding place.
522 LoadRoot(at, Heap::kNullValueRootIndex);
523 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
524 holder_reg, Operand(at));
525
526 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
527 LoadRoot(at, Heap::kNativeContextMapRootIndex);
528 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
529 holder_reg, Operand(at));
530 // Restore at is not needed. at is reloaded below.
531 pop(holder_reg); // Restore holder.
532 // Restore at to holder's context.
533 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
534 }
535
536 // Check that the security token in the calling global object is
537 // compatible with the security token in the receiving global
538 // object.
539 int token_offset = Context::kHeaderSize +
540 Context::SECURITY_TOKEN_INDEX * kPointerSize;
541
542 ld(scratch, FieldMemOperand(scratch, token_offset));
543 ld(at, FieldMemOperand(at, token_offset));
544 Branch(miss, ne, scratch, Operand(at));
545
546 bind(&same_contexts);
547}
548
549
550// Compute the hash code from the untagged key. This must be kept in sync with
551// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
552// code-stub-hydrogen.cc
553void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
554 // First of all we assign the hash seed to scratch.
555 LoadRoot(scratch, Heap::kHashSeedRootIndex);
556 SmiUntag(scratch);
557
558 // Xor original key with a seed.
559 xor_(reg0, reg0, scratch);
560
561 // Compute the hash code from the untagged key. This must be kept in sync
562 // with ComputeIntegerHash in utils.h.
563 //
564 // hash = ~hash + (hash << 15);
565 // The algorithm uses 32-bit integer values.
566 nor(scratch, reg0, zero_reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100567 Lsa(reg0, scratch, reg0, 15);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000568
569 // hash = hash ^ (hash >> 12);
570 srl(at, reg0, 12);
571 xor_(reg0, reg0, at);
572
573 // hash = hash + (hash << 2);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100574 Lsa(reg0, reg0, reg0, 2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000575
576 // hash = hash ^ (hash >> 4);
577 srl(at, reg0, 4);
578 xor_(reg0, reg0, at);
579
580 // hash = hash * 2057;
581 sll(scratch, reg0, 11);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100582 Lsa(reg0, reg0, reg0, 3);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000583 addu(reg0, reg0, scratch);
584
585 // hash = hash ^ (hash >> 16);
586 srl(at, reg0, 16);
587 xor_(reg0, reg0, at);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000588 And(reg0, reg0, Operand(0x3fffffff));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000589}
590
591
592void MacroAssembler::LoadFromNumberDictionary(Label* miss,
593 Register elements,
594 Register key,
595 Register result,
596 Register reg0,
597 Register reg1,
598 Register reg2) {
599 // Register use:
600 //
601 // elements - holds the slow-case elements of the receiver on entry.
602 // Unchanged unless 'result' is the same register.
603 //
604 // key - holds the smi key on entry.
605 // Unchanged unless 'result' is the same register.
606 //
607 //
608 // result - holds the result on exit if the load succeeded.
609 // Allowed to be the same as 'key' or 'result'.
610 // Unchanged on bailout so 'key' or 'result' can be used
611 // in further computation.
612 //
613 // Scratch registers:
614 //
615 // reg0 - holds the untagged key on entry and holds the hash once computed.
616 //
617 // reg1 - Used to hold the capacity mask of the dictionary.
618 //
619 // reg2 - Used for the index into the dictionary.
620 // at - Temporary (avoid MacroAssembler instructions also using 'at').
621 Label done;
622
623 GetNumberHash(reg0, reg1);
624
625 // Compute the capacity mask.
626 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
627 SmiUntag(reg1, reg1);
628 Dsubu(reg1, reg1, Operand(1));
629
630 // Generate an unrolled loop that performs a few probes before giving up.
631 for (int i = 0; i < kNumberDictionaryProbes; i++) {
632 // Use reg2 for index calculations and keep the hash intact in reg0.
633 mov(reg2, reg0);
634 // Compute the masked index: (hash + i + i * i) & mask.
635 if (i > 0) {
636 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
637 }
638 and_(reg2, reg2, reg1);
639
640 // Scale the index by multiplying by the element size.
641 DCHECK(SeededNumberDictionary::kEntrySize == 3);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100642 Dlsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000643
644 // Check if the key is identical to the name.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100645 Dlsa(reg2, elements, reg2, kPointerSizeLog2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000646
647 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
648 if (i != kNumberDictionaryProbes - 1) {
649 Branch(&done, eq, key, Operand(at));
650 } else {
651 Branch(miss, ne, key, Operand(at));
652 }
653 }
654
655 bind(&done);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400656 // Check that the value is a field property.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000657 // reg2: elements + (index * kPointerSize).
658 const int kDetailsOffset =
659 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
660 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000661 DCHECK_EQ(DATA, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000662 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
663 Branch(miss, ne, at, Operand(zero_reg));
664
665 // Get the value at the masked, scaled index and return.
666 const int kValueOffset =
667 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
668 ld(result, FieldMemOperand(reg2, kValueOffset));
669}
670
671
672// ---------------------------------------------------------------------------
673// Instruction macros.
674
675void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
676 if (rt.is_reg()) {
677 addu(rd, rs, rt.rm());
678 } else {
679 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000680 addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000681 } else {
682 // li handles the relocation.
683 DCHECK(!rs.is(at));
684 li(at, rt);
685 addu(rd, rs, at);
686 }
687 }
688}
689
690
691void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
692 if (rt.is_reg()) {
693 daddu(rd, rs, rt.rm());
694 } else {
695 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000696 daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000697 } else {
698 // li handles the relocation.
699 DCHECK(!rs.is(at));
700 li(at, rt);
701 daddu(rd, rs, at);
702 }
703 }
704}
705
706
707void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
708 if (rt.is_reg()) {
709 subu(rd, rs, rt.rm());
710 } else {
711 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000712 addiu(rd, rs, static_cast<int32_t>(
713 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000714 } else {
715 // li handles the relocation.
716 DCHECK(!rs.is(at));
717 li(at, rt);
718 subu(rd, rs, at);
719 }
720 }
721}
722
723
724void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
725 if (rt.is_reg()) {
726 dsubu(rd, rs, rt.rm());
727 } else {
728 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000729 daddiu(rd, rs,
730 static_cast<int32_t>(
731 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000732 } else {
733 // li handles the relocation.
734 DCHECK(!rs.is(at));
735 li(at, rt);
736 dsubu(rd, rs, at);
737 }
738 }
739}
740
741
742void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
743 if (rt.is_reg()) {
744 mul(rd, rs, rt.rm());
745 } else {
746 // li handles the relocation.
747 DCHECK(!rs.is(at));
748 li(at, rt);
749 mul(rd, rs, at);
750 }
751}
752
753
754void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
755 if (rt.is_reg()) {
756 if (kArchVariant != kMips64r6) {
757 mult(rs, rt.rm());
758 mfhi(rd);
759 } else {
760 muh(rd, rs, rt.rm());
761 }
762 } else {
763 // li handles the relocation.
764 DCHECK(!rs.is(at));
765 li(at, rt);
766 if (kArchVariant != kMips64r6) {
767 mult(rs, at);
768 mfhi(rd);
769 } else {
770 muh(rd, rs, at);
771 }
772 }
773}
774
775
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400776void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
777 if (rt.is_reg()) {
778 if (kArchVariant != kMips64r6) {
779 multu(rs, rt.rm());
780 mfhi(rd);
781 } else {
782 muhu(rd, rs, rt.rm());
783 }
784 } else {
785 // li handles the relocation.
786 DCHECK(!rs.is(at));
787 li(at, rt);
788 if (kArchVariant != kMips64r6) {
789 multu(rs, at);
790 mfhi(rd);
791 } else {
792 muhu(rd, rs, at);
793 }
794 }
795}
796
797
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000798void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
799 if (rt.is_reg()) {
800 if (kArchVariant == kMips64r6) {
801 dmul(rd, rs, rt.rm());
802 } else {
803 dmult(rs, rt.rm());
804 mflo(rd);
805 }
806 } else {
807 // li handles the relocation.
808 DCHECK(!rs.is(at));
809 li(at, rt);
810 if (kArchVariant == kMips64r6) {
811 dmul(rd, rs, at);
812 } else {
813 dmult(rs, at);
814 mflo(rd);
815 }
816 }
817}
818
819
820void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
821 if (rt.is_reg()) {
822 if (kArchVariant == kMips64r6) {
823 dmuh(rd, rs, rt.rm());
824 } else {
825 dmult(rs, rt.rm());
826 mfhi(rd);
827 }
828 } else {
829 // li handles the relocation.
830 DCHECK(!rs.is(at));
831 li(at, rt);
832 if (kArchVariant == kMips64r6) {
833 dmuh(rd, rs, at);
834 } else {
835 dmult(rs, at);
836 mfhi(rd);
837 }
838 }
839}
840
841
842void MacroAssembler::Mult(Register rs, const Operand& rt) {
843 if (rt.is_reg()) {
844 mult(rs, rt.rm());
845 } else {
846 // li handles the relocation.
847 DCHECK(!rs.is(at));
848 li(at, rt);
849 mult(rs, at);
850 }
851}
852
853
854void MacroAssembler::Dmult(Register rs, const Operand& rt) {
855 if (rt.is_reg()) {
856 dmult(rs, rt.rm());
857 } else {
858 // li handles the relocation.
859 DCHECK(!rs.is(at));
860 li(at, rt);
861 dmult(rs, at);
862 }
863}
864
865
866void MacroAssembler::Multu(Register rs, const Operand& rt) {
867 if (rt.is_reg()) {
868 multu(rs, rt.rm());
869 } else {
870 // li handles the relocation.
871 DCHECK(!rs.is(at));
872 li(at, rt);
873 multu(rs, at);
874 }
875}
876
877
878void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
879 if (rt.is_reg()) {
880 dmultu(rs, rt.rm());
881 } else {
882 // li handles the relocation.
883 DCHECK(!rs.is(at));
884 li(at, rt);
885 dmultu(rs, at);
886 }
887}
888
889
890void MacroAssembler::Div(Register rs, const Operand& rt) {
891 if (rt.is_reg()) {
892 div(rs, rt.rm());
893 } else {
894 // li handles the relocation.
895 DCHECK(!rs.is(at));
896 li(at, rt);
897 div(rs, at);
898 }
899}
900
901
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400902void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
903 if (rt.is_reg()) {
904 if (kArchVariant != kMips64r6) {
905 div(rs, rt.rm());
906 mflo(res);
907 } else {
908 div(res, rs, rt.rm());
909 }
910 } else {
911 // li handles the relocation.
912 DCHECK(!rs.is(at));
913 li(at, rt);
914 if (kArchVariant != kMips64r6) {
915 div(rs, at);
916 mflo(res);
917 } else {
918 div(res, rs, at);
919 }
920 }
921}
922
923
924void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
925 if (rt.is_reg()) {
926 if (kArchVariant != kMips64r6) {
927 div(rs, rt.rm());
928 mfhi(rd);
929 } else {
930 mod(rd, rs, rt.rm());
931 }
932 } else {
933 // li handles the relocation.
934 DCHECK(!rs.is(at));
935 li(at, rt);
936 if (kArchVariant != kMips64r6) {
937 div(rs, at);
938 mfhi(rd);
939 } else {
940 mod(rd, rs, at);
941 }
942 }
943}
944
945
946void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
947 if (rt.is_reg()) {
948 if (kArchVariant != kMips64r6) {
949 divu(rs, rt.rm());
950 mfhi(rd);
951 } else {
952 modu(rd, rs, rt.rm());
953 }
954 } else {
955 // li handles the relocation.
956 DCHECK(!rs.is(at));
957 li(at, rt);
958 if (kArchVariant != kMips64r6) {
959 divu(rs, at);
960 mfhi(rd);
961 } else {
962 modu(rd, rs, at);
963 }
964 }
965}
966
967
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000968void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
969 if (rt.is_reg()) {
970 ddiv(rs, rt.rm());
971 } else {
972 // li handles the relocation.
973 DCHECK(!rs.is(at));
974 li(at, rt);
975 ddiv(rs, at);
976 }
977}
978
979
980void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
981 if (kArchVariant != kMips64r6) {
982 if (rt.is_reg()) {
983 ddiv(rs, rt.rm());
984 mflo(rd);
985 } else {
986 // li handles the relocation.
987 DCHECK(!rs.is(at));
988 li(at, rt);
989 ddiv(rs, at);
990 mflo(rd);
991 }
992 } else {
993 if (rt.is_reg()) {
994 ddiv(rd, rs, rt.rm());
995 } else {
996 // li handles the relocation.
997 DCHECK(!rs.is(at));
998 li(at, rt);
999 ddiv(rd, rs, at);
1000 }
1001 }
1002}
1003
1004
1005void MacroAssembler::Divu(Register rs, const Operand& rt) {
1006 if (rt.is_reg()) {
1007 divu(rs, rt.rm());
1008 } else {
1009 // li handles the relocation.
1010 DCHECK(!rs.is(at));
1011 li(at, rt);
1012 divu(rs, at);
1013 }
1014}
1015
1016
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001017void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
1018 if (rt.is_reg()) {
1019 if (kArchVariant != kMips64r6) {
1020 divu(rs, rt.rm());
1021 mflo(res);
1022 } else {
1023 divu(res, rs, rt.rm());
1024 }
1025 } else {
1026 // li handles the relocation.
1027 DCHECK(!rs.is(at));
1028 li(at, rt);
1029 if (kArchVariant != kMips64r6) {
1030 divu(rs, at);
1031 mflo(res);
1032 } else {
1033 divu(res, rs, at);
1034 }
1035 }
1036}
1037
1038
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001039void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
1040 if (rt.is_reg()) {
1041 ddivu(rs, rt.rm());
1042 } else {
1043 // li handles the relocation.
1044 DCHECK(!rs.is(at));
1045 li(at, rt);
1046 ddivu(rs, at);
1047 }
1048}
1049
1050
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001051void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
1052 if (rt.is_reg()) {
1053 if (kArchVariant != kMips64r6) {
1054 ddivu(rs, rt.rm());
1055 mflo(res);
1056 } else {
1057 ddivu(res, rs, rt.rm());
1058 }
1059 } else {
1060 // li handles the relocation.
1061 DCHECK(!rs.is(at));
1062 li(at, rt);
1063 if (kArchVariant != kMips64r6) {
1064 ddivu(rs, at);
1065 mflo(res);
1066 } else {
1067 ddivu(res, rs, at);
1068 }
1069 }
1070}
1071
1072
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001073void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
1074 if (kArchVariant != kMips64r6) {
1075 if (rt.is_reg()) {
1076 ddiv(rs, rt.rm());
1077 mfhi(rd);
1078 } else {
1079 // li handles the relocation.
1080 DCHECK(!rs.is(at));
1081 li(at, rt);
1082 ddiv(rs, at);
1083 mfhi(rd);
1084 }
1085 } else {
1086 if (rt.is_reg()) {
1087 dmod(rd, rs, rt.rm());
1088 } else {
1089 // li handles the relocation.
1090 DCHECK(!rs.is(at));
1091 li(at, rt);
1092 dmod(rd, rs, at);
1093 }
1094 }
1095}
1096
1097
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001098void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
1099 if (kArchVariant != kMips64r6) {
1100 if (rt.is_reg()) {
1101 ddivu(rs, rt.rm());
1102 mfhi(rd);
1103 } else {
1104 // li handles the relocation.
1105 DCHECK(!rs.is(at));
1106 li(at, rt);
1107 ddivu(rs, at);
1108 mfhi(rd);
1109 }
1110 } else {
1111 if (rt.is_reg()) {
1112 dmodu(rd, rs, rt.rm());
1113 } else {
1114 // li handles the relocation.
1115 DCHECK(!rs.is(at));
1116 li(at, rt);
1117 dmodu(rd, rs, at);
1118 }
1119 }
1120}
1121
1122
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001123void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1124 if (rt.is_reg()) {
1125 and_(rd, rs, rt.rm());
1126 } else {
1127 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001128 andi(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001129 } else {
1130 // li handles the relocation.
1131 DCHECK(!rs.is(at));
1132 li(at, rt);
1133 and_(rd, rs, at);
1134 }
1135 }
1136}
1137
1138
1139void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1140 if (rt.is_reg()) {
1141 or_(rd, rs, rt.rm());
1142 } else {
1143 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001144 ori(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001145 } else {
1146 // li handles the relocation.
1147 DCHECK(!rs.is(at));
1148 li(at, rt);
1149 or_(rd, rs, at);
1150 }
1151 }
1152}
1153
1154
1155void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1156 if (rt.is_reg()) {
1157 xor_(rd, rs, rt.rm());
1158 } else {
1159 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001160 xori(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001161 } else {
1162 // li handles the relocation.
1163 DCHECK(!rs.is(at));
1164 li(at, rt);
1165 xor_(rd, rs, at);
1166 }
1167 }
1168}
1169
1170
1171void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1172 if (rt.is_reg()) {
1173 nor(rd, rs, rt.rm());
1174 } else {
1175 // li handles the relocation.
1176 DCHECK(!rs.is(at));
1177 li(at, rt);
1178 nor(rd, rs, at);
1179 }
1180}
1181
1182
1183void MacroAssembler::Neg(Register rs, const Operand& rt) {
1184 DCHECK(rt.is_reg());
1185 DCHECK(!at.is(rs));
1186 DCHECK(!at.is(rt.rm()));
1187 li(at, -1);
1188 xor_(rs, rt.rm(), at);
1189}
1190
1191
1192void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1193 if (rt.is_reg()) {
1194 slt(rd, rs, rt.rm());
1195 } else {
1196 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001197 slti(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001198 } else {
1199 // li handles the relocation.
1200 DCHECK(!rs.is(at));
1201 li(at, rt);
1202 slt(rd, rs, at);
1203 }
1204 }
1205}
1206
1207
1208void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1209 if (rt.is_reg()) {
1210 sltu(rd, rs, rt.rm());
1211 } else {
1212 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001213 sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001214 } else {
1215 // li handles the relocation.
1216 DCHECK(!rs.is(at));
1217 li(at, rt);
1218 sltu(rd, rs, at);
1219 }
1220 }
1221}
1222
1223
1224void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001225 if (rt.is_reg()) {
1226 rotrv(rd, rs, rt.rm());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001227 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001228 rotr(rd, rs, rt.imm64_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001229 }
1230}
1231
1232
1233void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1234 if (rt.is_reg()) {
1235 drotrv(rd, rs, rt.rm());
1236 } else {
1237 drotr(rd, rs, rt.imm64_);
1238 }
1239}
1240
1241
1242void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1243 pref(hint, rs);
1244}
1245
1246
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001247void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1248 Register scratch) {
1249 if (kArchVariant == kMips64r6 && sa <= 4) {
1250 lsa(rd, rt, rs, sa);
1251 } else {
1252 Register tmp = rd.is(rt) ? scratch : rd;
1253 DCHECK(!tmp.is(rt));
1254 sll(tmp, rs, sa);
1255 Addu(rd, rt, tmp);
1256 }
1257}
1258
1259
1260void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
1261 Register scratch) {
1262 if (kArchVariant == kMips64r6 && sa <= 4) {
1263 dlsa(rd, rt, rs, sa);
1264 } else {
1265 Register tmp = rd.is(rt) ? scratch : rd;
1266 DCHECK(!tmp.is(rt));
1267 dsll(tmp, rs, sa);
1268 Daddu(rd, rt, tmp);
1269 }
1270}
1271
1272
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001273// ------------Pseudo-instructions-------------
1274
1275void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1276 lwr(rd, rs);
1277 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1278}
1279
1280
1281void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1282 swr(rd, rs);
1283 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1284}
1285
1286
1287// Do 64-bit load from unaligned address. Note this only handles
1288// the specific case of 32-bit aligned, but not 64-bit aligned.
1289void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1290 // Assert fail if the offset from start of object IS actually aligned.
1291 // ONLY use with known misalignment, since there is performance cost.
1292 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001293 if (kArchEndian == kLittle) {
1294 lwu(rd, rs);
1295 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1296 dsll32(scratch, scratch, 0);
1297 } else {
1298 lw(rd, rs);
1299 lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1300 dsll32(rd, rd, 0);
1301 }
1302 Daddu(rd, rd, scratch);
1303}
1304
1305
1306// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1307// bits,
1308// second word in high bits.
1309void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1310 Register scratch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001311 lwu(rd, rs);
1312 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1313 dsll32(scratch, scratch, 0);
1314 Daddu(rd, rd, scratch);
1315}
1316
1317
1318// Do 64-bit store to unaligned address. Note this only handles
1319// the specific case of 32-bit aligned, but not 64-bit aligned.
1320void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1321 // Assert fail if the offset from start of object IS actually aligned.
1322 // ONLY use with known misalignment, since there is performance cost.
1323 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001324 if (kArchEndian == kLittle) {
1325 sw(rd, rs);
1326 dsrl32(scratch, rd, 0);
1327 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1328 } else {
1329 sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1330 dsrl32(scratch, rd, 0);
1331 sw(scratch, rs);
1332 }
1333}
1334
1335
1336// Do 64-bit store as two consequent 32-bit stores to unaligned address.
1337void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1338 Register scratch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001339 sw(rd, rs);
1340 dsrl32(scratch, rd, 0);
1341 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1342}
1343
1344
1345void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1346 AllowDeferredHandleDereference smi_check;
1347 if (value->IsSmi()) {
1348 li(dst, Operand(value), mode);
1349 } else {
1350 DCHECK(value->IsHeapObject());
1351 if (isolate()->heap()->InNewSpace(*value)) {
1352 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1353 li(dst, Operand(cell));
1354 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1355 } else {
1356 li(dst, Operand(value));
1357 }
1358 }
1359}
1360
Ben Murdoch097c5b22016-05-18 11:27:45 +01001361static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
1362 if ((imm >> (bitnum - 1)) & 0x1) {
1363 imm = (imm >> bitnum) + 1;
1364 } else {
1365 imm = imm >> bitnum;
1366 }
1367 return imm;
1368}
1369
1370bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1371 bool higher_bits_sign_extended = false;
1372 if (is_int16(j.imm64_)) {
1373 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1374 } else if (!(j.imm64_ & kHiMask)) {
1375 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1376 } else if (!(j.imm64_ & kImm16Mask)) {
1377 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1378 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1379 higher_bits_sign_extended = true;
1380 }
1381 } else {
1382 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1383 ori(rd, rd, (j.imm64_ & kImm16Mask));
1384 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1385 higher_bits_sign_extended = true;
1386 }
1387 }
1388 return higher_bits_sign_extended;
1389}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001390
1391void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1392 DCHECK(!j.is_reg());
1393 BlockTrampolinePoolScope block_trampoline_pool(this);
1394 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1395 // Normal load of an immediate value which does not need Relocation Info.
1396 if (is_int32(j.imm64_)) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001397 LiLower32BitHelper(rd, j);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001398 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001399 if (kArchVariant == kMips64r6) {
1400 int64_t imm = j.imm64_;
1401 bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
1402 imm = ShiftAndFixSignExtension(imm, 32);
1403 // If LUI writes 1s to higher bits, we need both DAHI/DATI.
1404 if ((imm & kImm16Mask) ||
1405 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1406 dahi(rd, imm & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001407 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001408 imm = ShiftAndFixSignExtension(imm, 16);
1409 if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
1410 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1411 dati(rd, imm & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001412 }
1413 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001414 if (is_int48(j.imm64_)) {
1415 if ((j.imm64_ >> 32) & kImm16Mask) {
1416 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1417 if ((j.imm64_ >> 16) & kImm16Mask) {
1418 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1419 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001420 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001421 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
1422 }
1423 dsll(rd, rd, 16);
1424 if (j.imm64_ & kImm16Mask) {
1425 ori(rd, rd, j.imm64_ & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001426 }
1427 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001428 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1429 if ((j.imm64_ >> 32) & kImm16Mask) {
1430 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1431 }
1432 if ((j.imm64_ >> 16) & kImm16Mask) {
1433 dsll(rd, rd, 16);
1434 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1435 if (j.imm64_ & kImm16Mask) {
1436 dsll(rd, rd, 16);
1437 ori(rd, rd, j.imm64_ & kImm16Mask);
1438 } else {
1439 dsll(rd, rd, 16);
1440 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001441 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001442 if (j.imm64_ & kImm16Mask) {
1443 dsll32(rd, rd, 0);
1444 ori(rd, rd, j.imm64_ & kImm16Mask);
1445 } else {
1446 dsll32(rd, rd, 0);
1447 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001448 }
1449 }
1450 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001451 }
1452 } else if (MustUseReg(j.rmode_)) {
1453 RecordRelocInfo(j.rmode_, j.imm64_);
1454 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1455 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1456 dsll(rd, rd, 16);
1457 ori(rd, rd, j.imm64_ & kImm16Mask);
1458 } else if (mode == ADDRESS_LOAD) {
1459 // We always need the same number of instructions as we may need to patch
1460 // this code to load another value which may need all 4 instructions.
1461 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1462 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1463 dsll(rd, rd, 16);
1464 ori(rd, rd, j.imm64_ & kImm16Mask);
1465 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001466 if (kArchVariant == kMips64r6) {
1467 int64_t imm = j.imm64_;
1468 lui(rd, (imm >> kLuiShift) & kImm16Mask);
1469 if (imm & kImm16Mask) {
1470 ori(rd, rd, (imm & kImm16Mask));
1471 }
1472 if ((imm >> 31) & 0x1) {
1473 imm = (imm >> 32) + 1;
1474 } else {
1475 imm = imm >> 32;
1476 }
1477 dahi(rd, imm & kImm16Mask);
1478 if ((imm >> 15) & 0x1) {
1479 imm = (imm >> 16) + 1;
1480 } else {
1481 imm = imm >> 16;
1482 }
1483 dati(rd, imm & kImm16Mask);
1484 } else {
1485 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1486 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1487 dsll(rd, rd, 16);
1488 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1489 dsll(rd, rd, 16);
1490 ori(rd, rd, j.imm64_ & kImm16Mask);
1491 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001492 }
1493}
1494
1495
1496void MacroAssembler::MultiPush(RegList regs) {
1497 int16_t num_to_push = NumberOfBitsSet(regs);
1498 int16_t stack_offset = num_to_push * kPointerSize;
1499
1500 Dsubu(sp, sp, Operand(stack_offset));
1501 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1502 if ((regs & (1 << i)) != 0) {
1503 stack_offset -= kPointerSize;
1504 sd(ToRegister(i), MemOperand(sp, stack_offset));
1505 }
1506 }
1507}
1508
1509
1510void MacroAssembler::MultiPushReversed(RegList regs) {
1511 int16_t num_to_push = NumberOfBitsSet(regs);
1512 int16_t stack_offset = num_to_push * kPointerSize;
1513
1514 Dsubu(sp, sp, Operand(stack_offset));
1515 for (int16_t i = 0; i < kNumRegisters; i++) {
1516 if ((regs & (1 << i)) != 0) {
1517 stack_offset -= kPointerSize;
1518 sd(ToRegister(i), MemOperand(sp, stack_offset));
1519 }
1520 }
1521}
1522
1523
1524void MacroAssembler::MultiPop(RegList regs) {
1525 int16_t stack_offset = 0;
1526
1527 for (int16_t i = 0; i < kNumRegisters; i++) {
1528 if ((regs & (1 << i)) != 0) {
1529 ld(ToRegister(i), MemOperand(sp, stack_offset));
1530 stack_offset += kPointerSize;
1531 }
1532 }
1533 daddiu(sp, sp, stack_offset);
1534}
1535
1536
1537void MacroAssembler::MultiPopReversed(RegList regs) {
1538 int16_t stack_offset = 0;
1539
1540 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1541 if ((regs & (1 << i)) != 0) {
1542 ld(ToRegister(i), MemOperand(sp, stack_offset));
1543 stack_offset += kPointerSize;
1544 }
1545 }
1546 daddiu(sp, sp, stack_offset);
1547}
1548
1549
1550void MacroAssembler::MultiPushFPU(RegList regs) {
1551 int16_t num_to_push = NumberOfBitsSet(regs);
1552 int16_t stack_offset = num_to_push * kDoubleSize;
1553
1554 Dsubu(sp, sp, Operand(stack_offset));
1555 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1556 if ((regs & (1 << i)) != 0) {
1557 stack_offset -= kDoubleSize;
1558 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1559 }
1560 }
1561}
1562
1563
1564void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1565 int16_t num_to_push = NumberOfBitsSet(regs);
1566 int16_t stack_offset = num_to_push * kDoubleSize;
1567
1568 Dsubu(sp, sp, Operand(stack_offset));
1569 for (int16_t i = 0; i < kNumRegisters; i++) {
1570 if ((regs & (1 << i)) != 0) {
1571 stack_offset -= kDoubleSize;
1572 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1573 }
1574 }
1575}
1576
1577
1578void MacroAssembler::MultiPopFPU(RegList regs) {
1579 int16_t stack_offset = 0;
1580
1581 for (int16_t i = 0; i < kNumRegisters; i++) {
1582 if ((regs & (1 << i)) != 0) {
1583 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1584 stack_offset += kDoubleSize;
1585 }
1586 }
1587 daddiu(sp, sp, stack_offset);
1588}
1589
1590
1591void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1592 int16_t stack_offset = 0;
1593
1594 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1595 if ((regs & (1 << i)) != 0) {
1596 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1597 stack_offset += kDoubleSize;
1598 }
1599 }
1600 daddiu(sp, sp, stack_offset);
1601}
1602
1603
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001604void MacroAssembler::Ext(Register rt,
1605 Register rs,
1606 uint16_t pos,
1607 uint16_t size) {
1608 DCHECK(pos < 32);
1609 DCHECK(pos + size < 33);
1610 ext_(rt, rs, pos, size);
1611}
1612
1613
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001614void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1615 uint16_t size) {
1616 DCHECK(pos < 32);
1617 DCHECK(pos + size < 33);
1618 dext_(rt, rs, pos, size);
1619}
1620
1621
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001622void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
1623 uint16_t size) {
1624 DCHECK(pos < 32);
1625 DCHECK(size <= 64);
1626 dextm(rt, rs, pos, size);
1627}
1628
1629
1630void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
1631 uint16_t size) {
1632 DCHECK(pos >= 32 && pos < 64);
1633 DCHECK(size < 33);
1634 dextu(rt, rs, pos, size);
1635}
1636
1637
1638void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
1639 uint16_t size) {
1640 DCHECK(pos < 32);
1641 DCHECK(pos + size <= 32);
1642 DCHECK(size != 0);
1643 dins_(rt, rs, pos, size);
1644}
1645
1646
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001647void MacroAssembler::Ins(Register rt,
1648 Register rs,
1649 uint16_t pos,
1650 uint16_t size) {
1651 DCHECK(pos < 32);
1652 DCHECK(pos + size <= 32);
1653 DCHECK(size != 0);
1654 ins_(rt, rs, pos, size);
1655}
1656
1657
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001658void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001659 // Move the data from fs to t8.
1660 mfc1(t8, fs);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001661 Cvt_d_uw(fd, t8);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001662}
1663
1664
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001665void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
1666 // Convert rs to a FP value in fd.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001667 DCHECK(!rs.is(t9));
1668 DCHECK(!rs.is(at));
1669
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001670 // Zero extend int32 in rs.
1671 Dext(t9, rs, 0, 32);
1672 dmtc1(t9, fd);
1673 cvt_d_l(fd, fd);
1674}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001675
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001676
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001677void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
1678 // Move the data from fs to t8.
1679 dmfc1(t8, fs);
1680 Cvt_d_ul(fd, t8);
1681}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001682
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001683
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001684void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
1685 // Convert rs to a FP value in fd.
1686
1687 DCHECK(!rs.is(t9));
1688 DCHECK(!rs.is(at));
1689
1690 Label msb_clear, conversion_done;
1691
1692 Branch(&msb_clear, ge, rs, Operand(zero_reg));
1693
1694 // Rs >= 2^63
1695 andi(t9, rs, 1);
1696 dsrl(rs, rs, 1);
1697 or_(t9, t9, rs);
1698 dmtc1(t9, fd);
1699 cvt_d_l(fd, fd);
1700 Branch(USE_DELAY_SLOT, &conversion_done);
1701 add_d(fd, fd, fd); // In delay slot.
1702
1703 bind(&msb_clear);
1704 // Rs < 2^63, we can do simple conversion.
1705 dmtc1(rs, fd);
1706 cvt_d_l(fd, fd);
1707
1708 bind(&conversion_done);
1709}
1710
Ben Murdoch097c5b22016-05-18 11:27:45 +01001711void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
1712 // Move the data from fs to t8.
1713 mfc1(t8, fs);
1714 Cvt_s_uw(fd, t8);
1715}
1716
1717void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
1718 // Convert rs to a FP value in fd.
1719 DCHECK(!rs.is(t9));
1720 DCHECK(!rs.is(at));
1721
1722 // Zero extend int32 in rs.
1723 Dext(t9, rs, 0, 32);
1724 dmtc1(t9, fd);
1725 cvt_s_l(fd, fd);
1726}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001727
1728void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
1729 // Move the data from fs to t8.
1730 dmfc1(t8, fs);
1731 Cvt_s_ul(fd, t8);
1732}
1733
1734
1735void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
1736 // Convert rs to a FP value in fd.
1737
1738 DCHECK(!rs.is(t9));
1739 DCHECK(!rs.is(at));
1740
1741 Label positive, conversion_done;
1742
1743 Branch(&positive, ge, rs, Operand(zero_reg));
1744
1745 // Rs >= 2^31.
1746 andi(t9, rs, 1);
1747 dsrl(rs, rs, 1);
1748 or_(t9, t9, rs);
1749 dmtc1(t9, fd);
1750 cvt_s_l(fd, fd);
1751 Branch(USE_DELAY_SLOT, &conversion_done);
1752 add_s(fd, fd, fd); // In delay slot.
1753
1754 bind(&positive);
1755 // Rs < 2^31, we can do simple conversion.
1756 dmtc1(rs, fd);
1757 cvt_s_l(fd, fd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001758
1759 bind(&conversion_done);
1760}
1761
1762
1763void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1764 round_l_d(fd, fs);
1765}
1766
1767
1768void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1769 floor_l_d(fd, fs);
1770}
1771
1772
1773void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1774 ceil_l_d(fd, fs);
1775}
1776
1777
1778void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1779 trunc_l_d(fd, fs);
1780}
1781
1782
1783void MacroAssembler::Trunc_l_ud(FPURegister fd,
1784 FPURegister fs,
1785 FPURegister scratch) {
1786 // Load to GPR.
1787 dmfc1(t8, fs);
1788 // Reset sign bit.
1789 li(at, 0x7fffffffffffffff);
1790 and_(t8, t8, at);
1791 dmtc1(t8, fs);
1792 trunc_l_d(fd, fs);
1793}
1794
1795
1796void MacroAssembler::Trunc_uw_d(FPURegister fd,
1797 FPURegister fs,
1798 FPURegister scratch) {
1799 Trunc_uw_d(fs, t8, scratch);
1800 mtc1(t8, fd);
1801}
1802
Ben Murdoch097c5b22016-05-18 11:27:45 +01001803void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
1804 FPURegister scratch) {
1805 Trunc_uw_s(fs, t8, scratch);
1806 mtc1(t8, fd);
1807}
1808
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001809void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
1810 FPURegister scratch, Register result) {
1811 Trunc_ul_d(fs, t8, scratch, result);
1812 dmtc1(t8, fd);
1813}
1814
1815
1816void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
1817 FPURegister scratch, Register result) {
1818 Trunc_ul_s(fs, t8, scratch, result);
1819 dmtc1(t8, fd);
1820}
1821
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001822
1823void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1824 trunc_w_d(fd, fs);
1825}
1826
1827
1828void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1829 round_w_d(fd, fs);
1830}
1831
1832
1833void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1834 floor_w_d(fd, fs);
1835}
1836
1837
1838void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1839 ceil_w_d(fd, fs);
1840}
1841
1842
1843void MacroAssembler::Trunc_uw_d(FPURegister fd,
1844 Register rs,
1845 FPURegister scratch) {
1846 DCHECK(!fd.is(scratch));
1847 DCHECK(!rs.is(at));
1848
1849 // Load 2^31 into scratch as its float representation.
1850 li(at, 0x41E00000);
1851 mtc1(zero_reg, scratch);
1852 mthc1(at, scratch);
1853 // Test if scratch > fd.
1854 // If fd < 2^31 we can convert it normally.
1855 Label simple_convert;
1856 BranchF(&simple_convert, NULL, lt, fd, scratch);
1857
1858 // First we subtract 2^31 from fd, then trunc it to rs
1859 // and add 2^31 to rs.
1860 sub_d(scratch, fd, scratch);
1861 trunc_w_d(scratch, scratch);
1862 mfc1(rs, scratch);
1863 Or(rs, rs, 1 << 31);
1864
1865 Label done;
1866 Branch(&done);
1867 // Simple conversion.
1868 bind(&simple_convert);
1869 trunc_w_d(scratch, fd);
1870 mfc1(rs, scratch);
1871
1872 bind(&done);
1873}
1874
Ben Murdoch097c5b22016-05-18 11:27:45 +01001875void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
1876 FPURegister scratch) {
1877 DCHECK(!fd.is(scratch));
1878 DCHECK(!rs.is(at));
1879
1880 // Load 2^31 into scratch as its float representation.
1881 li(at, 0x4F000000);
1882 mtc1(at, scratch);
1883 // Test if scratch > fd.
1884 // If fd < 2^31 we can convert it normally.
1885 Label simple_convert;
1886 BranchF32(&simple_convert, NULL, lt, fd, scratch);
1887
1888 // First we subtract 2^31 from fd, then trunc it to rs
1889 // and add 2^31 to rs.
1890 sub_s(scratch, fd, scratch);
1891 trunc_w_s(scratch, scratch);
1892 mfc1(rs, scratch);
1893 Or(rs, rs, 1 << 31);
1894
1895 Label done;
1896 Branch(&done);
1897 // Simple conversion.
1898 bind(&simple_convert);
1899 trunc_w_s(scratch, fd);
1900 mfc1(rs, scratch);
1901
1902 bind(&done);
1903}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001904
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001905void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
1906 FPURegister scratch, Register result) {
1907 DCHECK(!fd.is(scratch));
1908 DCHECK(!AreAliased(rs, result, at));
1909
1910 Label simple_convert, done, fail;
1911 if (result.is_valid()) {
1912 mov(result, zero_reg);
1913 Move(scratch, -1.0);
1914 // If fd =< -1 or unordered, then the conversion fails.
1915 BranchF(&fail, &fail, le, fd, scratch);
1916 }
1917
1918 // Load 2^63 into scratch as its double representation.
1919 li(at, 0x43e0000000000000);
1920 dmtc1(at, scratch);
1921
1922 // Test if scratch > fd.
1923 // If fd < 2^63 we can convert it normally.
1924 BranchF(&simple_convert, nullptr, lt, fd, scratch);
1925
1926 // First we subtract 2^63 from fd, then trunc it to rs
1927 // and add 2^63 to rs.
1928 sub_d(scratch, fd, scratch);
1929 trunc_l_d(scratch, scratch);
1930 dmfc1(rs, scratch);
1931 Or(rs, rs, Operand(1UL << 63));
1932 Branch(&done);
1933
1934 // Simple conversion.
1935 bind(&simple_convert);
1936 trunc_l_d(scratch, fd);
1937 dmfc1(rs, scratch);
1938
1939 bind(&done);
1940 if (result.is_valid()) {
1941 // Conversion is failed if the result is negative.
1942 addiu(at, zero_reg, -1);
1943 dsrl(at, at, 1); // Load 2^62.
1944 dmfc1(result, scratch);
1945 xor_(result, result, at);
1946 Slt(result, zero_reg, result);
1947 }
1948
1949 bind(&fail);
1950}
1951
1952
1953void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
1954 FPURegister scratch, Register result) {
1955 DCHECK(!fd.is(scratch));
1956 DCHECK(!AreAliased(rs, result, at));
1957
1958 Label simple_convert, done, fail;
1959 if (result.is_valid()) {
1960 mov(result, zero_reg);
1961 Move(scratch, -1.0f);
1962 // If fd =< -1 or unordered, then the conversion fails.
1963 BranchF32(&fail, &fail, le, fd, scratch);
1964 }
1965
1966 // Load 2^63 into scratch as its float representation.
1967 li(at, 0x5f000000);
1968 mtc1(at, scratch);
1969
1970 // Test if scratch > fd.
1971 // If fd < 2^63 we can convert it normally.
1972 BranchF32(&simple_convert, nullptr, lt, fd, scratch);
1973
1974 // First we subtract 2^63 from fd, then trunc it to rs
1975 // and add 2^63 to rs.
1976 sub_s(scratch, fd, scratch);
1977 trunc_l_s(scratch, scratch);
1978 dmfc1(rs, scratch);
1979 Or(rs, rs, Operand(1UL << 63));
1980 Branch(&done);
1981
1982 // Simple conversion.
1983 bind(&simple_convert);
1984 trunc_l_s(scratch, fd);
1985 dmfc1(rs, scratch);
1986
1987 bind(&done);
1988 if (result.is_valid()) {
1989 // Conversion is failed if the result is negative or unordered.
1990 addiu(at, zero_reg, -1);
1991 dsrl(at, at, 1); // Load 2^62.
1992 dmfc1(result, scratch);
1993 xor_(result, result, at);
1994 Slt(result, zero_reg, result);
1995 }
1996
1997 bind(&fail);
1998}
1999
2000
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002001void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2002 FPURegister ft, FPURegister scratch) {
2003 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
2004 madd_d(fd, fr, fs, ft);
2005 } else {
2006 // Can not change source regs's value.
2007 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2008 mul_d(scratch, fs, ft);
2009 add_d(fd, fr, scratch);
2010 }
2011}
2012
2013
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002014void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
2015 Label* nan, Condition cond, FPURegister cmp1,
2016 FPURegister cmp2, BranchDelaySlot bd) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002017 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002018 if (cond == al) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002019 Branch(bd, target);
2020 return;
2021 }
2022
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002023 if (kArchVariant == kMips64r6) {
2024 sizeField = sizeField == D ? L : W;
2025 }
2026
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002027 DCHECK(nan || target);
2028 // Check for unordered (NaN) cases.
2029 if (nan) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002030 bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002031 if (kArchVariant != kMips64r6) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002032 if (long_branch) {
2033 Label skip;
2034 c(UN, sizeField, cmp1, cmp2);
2035 bc1f(&skip);
2036 nop();
2037 BranchLong(nan, bd);
2038 bind(&skip);
2039 } else {
2040 c(UN, sizeField, cmp1, cmp2);
2041 bc1t(nan);
2042 if (bd == PROTECT) {
2043 nop();
2044 }
2045 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002046 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002047 // Use kDoubleCompareReg for comparison result. It has to be unavailable
2048 // to lithium
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002049 // register allocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002050 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2051 if (long_branch) {
2052 Label skip;
2053 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2054 bc1eqz(&skip, kDoubleCompareReg);
2055 nop();
2056 BranchLong(nan, bd);
2057 bind(&skip);
2058 } else {
2059 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2060 bc1nez(nan, kDoubleCompareReg);
2061 if (bd == PROTECT) {
2062 nop();
2063 }
2064 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002065 }
2066 }
2067
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002068 if (target) {
2069 bool long_branch =
2070 target->is_bound() ? is_near(target) : is_trampoline_emitted();
2071 if (long_branch) {
2072 Label skip;
2073 Condition neg_cond = NegateFpuCondition(cond);
2074 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
2075 BranchLong(target, bd);
2076 bind(&skip);
2077 } else {
2078 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
2079 }
2080 }
2081}
2082
2083
2084void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
2085 Condition cc, FPURegister cmp1,
2086 FPURegister cmp2, BranchDelaySlot bd) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002087 if (kArchVariant != kMips64r6) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002088 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002089 if (target) {
2090 // Here NaN cases were either handled by this function or are assumed to
2091 // have been handled by the caller.
2092 switch (cc) {
2093 case lt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002094 c(OLT, sizeField, cmp1, cmp2);
2095 bc1t(target);
2096 break;
2097 case ult:
2098 c(ULT, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002099 bc1t(target);
2100 break;
2101 case gt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002102 c(ULE, sizeField, cmp1, cmp2);
2103 bc1f(target);
2104 break;
2105 case ugt:
2106 c(OLE, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002107 bc1f(target);
2108 break;
2109 case ge:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002110 c(ULT, sizeField, cmp1, cmp2);
2111 bc1f(target);
2112 break;
2113 case uge:
2114 c(OLT, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002115 bc1f(target);
2116 break;
2117 case le:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002118 c(OLE, sizeField, cmp1, cmp2);
2119 bc1t(target);
2120 break;
2121 case ule:
2122 c(ULE, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002123 bc1t(target);
2124 break;
2125 case eq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002126 c(EQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002127 bc1t(target);
2128 break;
2129 case ueq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002130 c(UEQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002131 bc1t(target);
2132 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002133 case ne: // Unordered or not equal.
2134 c(EQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002135 bc1f(target);
2136 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002137 case ogl:
2138 c(UEQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002139 bc1f(target);
2140 break;
2141 default:
2142 CHECK(0);
2143 }
2144 }
2145 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002146 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002147 if (target) {
2148 // Here NaN cases were either handled by this function or are assumed to
2149 // have been handled by the caller.
2150 // Unsigned conditions are treated as their signed counterpart.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002151 // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
2152 // 1) mode.
2153 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002154 switch (cc) {
2155 case lt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002156 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2157 bc1nez(target, kDoubleCompareReg);
2158 break;
2159 case ult:
2160 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2161 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002162 break;
2163 case gt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002164 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2165 bc1eqz(target, kDoubleCompareReg);
2166 break;
2167 case ugt:
2168 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2169 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002170 break;
2171 case ge:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002172 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2173 bc1eqz(target, kDoubleCompareReg);
2174 break;
2175 case uge:
2176 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2177 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002178 break;
2179 case le:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002180 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2181 bc1nez(target, kDoubleCompareReg);
2182 break;
2183 case ule:
2184 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2185 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002186 break;
2187 case eq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002188 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2189 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002190 break;
2191 case ueq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002192 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2193 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002194 break;
2195 case ne:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002196 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2197 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002198 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002199 case ogl:
2200 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2201 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002202 break;
2203 default:
2204 CHECK(0);
2205 }
2206 }
2207 }
2208
2209 if (bd == PROTECT) {
2210 nop();
2211 }
2212}
2213
2214
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002215void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2216 DCHECK(!src_low.is(at));
2217 mfhc1(at, dst);
2218 mtc1(src_low, dst);
2219 mthc1(at, dst);
2220}
2221
2222
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002223void MacroAssembler::Move(FPURegister dst, float imm) {
2224 li(at, Operand(bit_cast<int32_t>(imm)));
2225 mtc1(at, dst);
2226}
2227
2228
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002229void MacroAssembler::Move(FPURegister dst, double imm) {
2230 static const DoubleRepresentation minus_zero(-0.0);
2231 static const DoubleRepresentation zero(0.0);
2232 DoubleRepresentation value_rep(imm);
2233 // Handle special values first.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002234 if (value_rep == zero && has_double_zero_reg_set_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002235 mov_d(dst, kDoubleRegZero);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002236 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002237 neg_d(dst, kDoubleRegZero);
2238 } else {
2239 uint32_t lo, hi;
2240 DoubleAsTwoUInt32(imm, &lo, &hi);
2241 // Move the low part of the double into the lower bits of the corresponding
2242 // FPU register.
2243 if (lo != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002244 if (!(lo & kImm16Mask)) {
2245 lui(at, (lo >> kLuiShift) & kImm16Mask);
2246 mtc1(at, dst);
2247 } else if (!(lo & kHiMask)) {
2248 ori(at, zero_reg, lo & kImm16Mask);
2249 mtc1(at, dst);
2250 } else {
2251 lui(at, (lo >> kLuiShift) & kImm16Mask);
2252 ori(at, at, lo & kImm16Mask);
2253 mtc1(at, dst);
2254 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002255 } else {
2256 mtc1(zero_reg, dst);
2257 }
2258 // Move the high part of the double into the high bits of the corresponding
2259 // FPU register.
2260 if (hi != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002261 if (!(hi & kImm16Mask)) {
2262 lui(at, (hi >> kLuiShift) & kImm16Mask);
2263 mthc1(at, dst);
2264 } else if (!(hi & kHiMask)) {
2265 ori(at, zero_reg, hi & kImm16Mask);
2266 mthc1(at, dst);
2267 } else {
2268 lui(at, (hi >> kLuiShift) & kImm16Mask);
2269 ori(at, at, hi & kImm16Mask);
2270 mthc1(at, dst);
2271 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002272 } else {
2273 mthc1(zero_reg, dst);
2274 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002275 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002276 }
2277}
2278
2279
2280void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2281 if (kArchVariant == kMips64r6) {
2282 Label done;
2283 Branch(&done, ne, rt, Operand(zero_reg));
2284 mov(rd, rs);
2285 bind(&done);
2286 } else {
2287 movz(rd, rs, rt);
2288 }
2289}
2290
2291
2292void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2293 if (kArchVariant == kMips64r6) {
2294 Label done;
2295 Branch(&done, eq, rt, Operand(zero_reg));
2296 mov(rd, rs);
2297 bind(&done);
2298 } else {
2299 movn(rd, rs, rt);
2300 }
2301}
2302
2303
2304void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2305 movt(rd, rs, cc);
2306}
2307
2308
2309void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2310 movf(rd, rs, cc);
2311}
2312
2313
2314void MacroAssembler::Clz(Register rd, Register rs) {
2315 clz(rd, rs);
2316}
2317
2318
2319void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2320 Register result,
2321 DoubleRegister double_input,
2322 Register scratch,
2323 DoubleRegister double_scratch,
2324 Register except_flag,
2325 CheckForInexactConversion check_inexact) {
2326 DCHECK(!result.is(scratch));
2327 DCHECK(!double_input.is(double_scratch));
2328 DCHECK(!except_flag.is(scratch));
2329
2330 Label done;
2331
2332 // Clear the except flag (0 = no exception)
2333 mov(except_flag, zero_reg);
2334
2335 // Test for values that can be exactly represented as a signed 32-bit integer.
2336 cvt_w_d(double_scratch, double_input);
2337 mfc1(result, double_scratch);
2338 cvt_d_w(double_scratch, double_scratch);
2339 BranchF(&done, NULL, eq, double_input, double_scratch);
2340
2341 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
2342
2343 if (check_inexact == kDontCheckForInexactConversion) {
2344 // Ignore inexact exceptions.
2345 except_mask &= ~kFCSRInexactFlagMask;
2346 }
2347
2348 // Save FCSR.
2349 cfc1(scratch, FCSR);
2350 // Disable FPU exceptions.
2351 ctc1(zero_reg, FCSR);
2352
2353 // Do operation based on rounding mode.
2354 switch (rounding_mode) {
2355 case kRoundToNearest:
2356 Round_w_d(double_scratch, double_input);
2357 break;
2358 case kRoundToZero:
2359 Trunc_w_d(double_scratch, double_input);
2360 break;
2361 case kRoundToPlusInf:
2362 Ceil_w_d(double_scratch, double_input);
2363 break;
2364 case kRoundToMinusInf:
2365 Floor_w_d(double_scratch, double_input);
2366 break;
2367 } // End of switch-statement.
2368
2369 // Retrieve FCSR.
2370 cfc1(except_flag, FCSR);
2371 // Restore FCSR.
2372 ctc1(scratch, FCSR);
2373 // Move the converted value into the result register.
2374 mfc1(result, double_scratch);
2375
2376 // Check for fpu exceptions.
2377 And(except_flag, except_flag, Operand(except_mask));
2378
2379 bind(&done);
2380}
2381
2382
2383void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2384 DoubleRegister double_input,
2385 Label* done) {
2386 DoubleRegister single_scratch = kLithiumScratchDouble.low();
2387 Register scratch = at;
2388 Register scratch2 = t9;
2389
2390 // Clear cumulative exception flags and save the FCSR.
2391 cfc1(scratch2, FCSR);
2392 ctc1(zero_reg, FCSR);
2393 // Try a conversion to a signed integer.
2394 trunc_w_d(single_scratch, double_input);
2395 mfc1(result, single_scratch);
2396 // Retrieve and restore the FCSR.
2397 cfc1(scratch, FCSR);
2398 ctc1(scratch2, FCSR);
2399 // Check for overflow and NaNs.
2400 And(scratch,
2401 scratch,
2402 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2403 // If we had no exceptions we are done.
2404 Branch(done, eq, scratch, Operand(zero_reg));
2405}
2406
2407
2408void MacroAssembler::TruncateDoubleToI(Register result,
2409 DoubleRegister double_input) {
2410 Label done;
2411
2412 TryInlineTruncateDoubleToI(result, double_input, &done);
2413
2414 // If we fell through then inline version didn't succeed - call stub instead.
2415 push(ra);
2416 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2417 sdc1(double_input, MemOperand(sp, 0));
2418
2419 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2420 CallStub(&stub);
2421
2422 Daddu(sp, sp, Operand(kDoubleSize));
2423 pop(ra);
2424
2425 bind(&done);
2426}
2427
2428
2429void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2430 Label done;
2431 DoubleRegister double_scratch = f12;
2432 DCHECK(!result.is(object));
2433
2434 ldc1(double_scratch,
2435 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2436 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2437
2438 // If we fell through then inline version didn't succeed - call stub instead.
2439 push(ra);
2440 DoubleToIStub stub(isolate(),
2441 object,
2442 result,
2443 HeapNumber::kValueOffset - kHeapObjectTag,
2444 true,
2445 true);
2446 CallStub(&stub);
2447 pop(ra);
2448
2449 bind(&done);
2450}
2451
2452
2453void MacroAssembler::TruncateNumberToI(Register object,
2454 Register result,
2455 Register heap_number_map,
2456 Register scratch,
2457 Label* not_number) {
2458 Label done;
2459 DCHECK(!result.is(object));
2460
2461 UntagAndJumpIfSmi(result, object, &done);
2462 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2463 TruncateHeapNumberToI(result, object);
2464
2465 bind(&done);
2466}
2467
2468
2469void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2470 Register src,
2471 int num_least_bits) {
2472 // Ext(dst, src, kSmiTagSize, num_least_bits);
2473 SmiUntag(dst, src);
2474 And(dst, dst, Operand((1 << num_least_bits) - 1));
2475}
2476
2477
2478void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2479 Register src,
2480 int num_least_bits) {
2481 DCHECK(!src.is(dst));
2482 And(dst, src, Operand((1 << num_least_bits) - 1));
2483}
2484
2485
2486// Emulated condtional branches do not emit a nop in the branch delay slot.
2487//
2488// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2489#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
2490 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
2491 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2492
2493
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002494void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2495 DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002496 BranchShort(offset, bdslot);
2497}
2498
2499
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002500void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2501 const Operand& rt, BranchDelaySlot bdslot) {
2502 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2503 DCHECK(is_near);
2504 USE(is_near);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002505}
2506
2507
2508void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2509 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002510 if (is_near_branch(L)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002511 BranchShort(L, bdslot);
2512 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002513 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002514 }
2515 } else {
2516 if (is_trampoline_emitted()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002517 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002518 } else {
2519 BranchShort(L, bdslot);
2520 }
2521 }
2522}
2523
2524
2525void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2526 const Operand& rt,
2527 BranchDelaySlot bdslot) {
2528 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002529 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002530 if (cond != cc_always) {
2531 Label skip;
2532 Condition neg_cond = NegateCondition(cond);
2533 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002534 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002535 bind(&skip);
2536 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002537 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002538 }
2539 }
2540 } else {
2541 if (is_trampoline_emitted()) {
2542 if (cond != cc_always) {
2543 Label skip;
2544 Condition neg_cond = NegateCondition(cond);
2545 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002546 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002547 bind(&skip);
2548 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002549 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002550 }
2551 } else {
2552 BranchShort(L, cond, rs, rt, bdslot);
2553 }
2554 }
2555}
2556
2557
2558void MacroAssembler::Branch(Label* L,
2559 Condition cond,
2560 Register rs,
2561 Heap::RootListIndex index,
2562 BranchDelaySlot bdslot) {
2563 LoadRoot(at, index);
2564 Branch(L, cond, rs, Operand(at), bdslot);
2565}
2566
2567
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002568void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2569 BranchDelaySlot bdslot) {
2570 DCHECK(L == nullptr || offset == 0);
2571 offset = GetOffset(offset, L, OffsetSize::kOffset16);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002572 b(offset);
2573
2574 // Emit a nop in the branch delay slot if required.
2575 if (bdslot == PROTECT)
2576 nop();
2577}
2578
2579
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002580void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2581 DCHECK(L == nullptr || offset == 0);
2582 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2583 bc(offset);
2584}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002585
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002586
2587void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2588 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2589 DCHECK(is_int26(offset));
2590 BranchShortHelperR6(offset, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002591 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002592 DCHECK(is_int16(offset));
2593 BranchShortHelper(offset, nullptr, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002594 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002595}
2596
2597
2598void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002599 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2600 BranchShortHelperR6(0, L);
2601 } else {
2602 BranchShortHelper(0, L, bdslot);
2603 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002604}
2605
2606
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002607static inline bool IsZero(const Operand& rt) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002608 if (rt.is_reg()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002609 return rt.rm().is(zero_reg);
2610 } else {
2611 return rt.immediate() == 0;
2612 }
2613}
2614
2615
2616int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2617 if (L) {
2618 offset = branch_offset_helper(L, bits) >> 2;
2619 } else {
2620 DCHECK(is_intn(offset, bits));
2621 }
2622 return offset;
2623}
2624
2625
2626Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
2627 Register scratch) {
2628 Register r2 = no_reg;
2629 if (rt.is_reg()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002630 r2 = rt.rm_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002631 } else {
2632 r2 = scratch;
2633 li(r2, rt);
2634 }
2635
2636 return r2;
2637}
2638
2639
2640bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2641 Condition cond, Register rs,
2642 const Operand& rt) {
2643 DCHECK(L == nullptr || offset == 0);
2644 Register scratch = rs.is(at) ? t8 : at;
2645 OffsetSize bits = OffsetSize::kOffset16;
2646
2647 // Be careful to always use shifted_branch_offset only just before the
2648 // branch instruction, as the location will be remember for patching the
2649 // target.
2650 {
2651 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002652 switch (cond) {
2653 case cc_always:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002654 bits = OffsetSize::kOffset26;
2655 if (!is_near(L, bits)) return false;
2656 offset = GetOffset(offset, L, bits);
2657 bc(offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002658 break;
2659 case eq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002660 if (rs.code() == rt.rm_.reg_code) {
2661 // Pre R6 beq is used here to make the code patchable. Otherwise bc
2662 // should be used which has no condition field so is not patchable.
2663 bits = OffsetSize::kOffset16;
2664 if (!is_near(L, bits)) return false;
2665 scratch = GetRtAsRegisterHelper(rt, scratch);
2666 offset = GetOffset(offset, L, bits);
2667 beq(rs, scratch, offset);
2668 nop();
2669 } else if (IsZero(rt)) {
2670 bits = OffsetSize::kOffset21;
2671 if (!is_near(L, bits)) return false;
2672 offset = GetOffset(offset, L, bits);
2673 beqzc(rs, offset);
2674 } else {
2675 // We don't want any other register but scratch clobbered.
2676 bits = OffsetSize::kOffset16;
2677 if (!is_near(L, bits)) return false;
2678 scratch = GetRtAsRegisterHelper(rt, scratch);
2679 offset = GetOffset(offset, L, bits);
2680 beqc(rs, scratch, offset);
2681 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002682 break;
2683 case ne:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002684 if (rs.code() == rt.rm_.reg_code) {
2685 // Pre R6 bne is used here to make the code patchable. Otherwise we
2686 // should not generate any instruction.
2687 bits = OffsetSize::kOffset16;
2688 if (!is_near(L, bits)) return false;
2689 scratch = GetRtAsRegisterHelper(rt, scratch);
2690 offset = GetOffset(offset, L, bits);
2691 bne(rs, scratch, offset);
2692 nop();
2693 } else if (IsZero(rt)) {
2694 bits = OffsetSize::kOffset21;
2695 if (!is_near(L, bits)) return false;
2696 offset = GetOffset(offset, L, bits);
2697 bnezc(rs, offset);
2698 } else {
2699 // We don't want any other register but scratch clobbered.
2700 bits = OffsetSize::kOffset16;
2701 if (!is_near(L, bits)) return false;
2702 scratch = GetRtAsRegisterHelper(rt, scratch);
2703 offset = GetOffset(offset, L, bits);
2704 bnec(rs, scratch, offset);
2705 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002706 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002707
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002708 // Signed comparison.
2709 case greater:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002710 // rs > rt
2711 if (rs.code() == rt.rm_.reg_code) {
2712 break; // No code needs to be emitted.
2713 } else if (rs.is(zero_reg)) {
2714 bits = OffsetSize::kOffset16;
2715 if (!is_near(L, bits)) return false;
2716 scratch = GetRtAsRegisterHelper(rt, scratch);
2717 offset = GetOffset(offset, L, bits);
2718 bltzc(scratch, offset);
2719 } else if (IsZero(rt)) {
2720 bits = OffsetSize::kOffset16;
2721 if (!is_near(L, bits)) return false;
2722 offset = GetOffset(offset, L, bits);
2723 bgtzc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002724 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002725 bits = OffsetSize::kOffset16;
2726 if (!is_near(L, bits)) return false;
2727 scratch = GetRtAsRegisterHelper(rt, scratch);
2728 DCHECK(!rs.is(scratch));
2729 offset = GetOffset(offset, L, bits);
2730 bltc(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002731 }
2732 break;
2733 case greater_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002734 // rs >= rt
2735 if (rs.code() == rt.rm_.reg_code) {
2736 bits = OffsetSize::kOffset26;
2737 if (!is_near(L, bits)) return false;
2738 offset = GetOffset(offset, L, bits);
2739 bc(offset);
2740 } else if (rs.is(zero_reg)) {
2741 bits = OffsetSize::kOffset16;
2742 if (!is_near(L, bits)) return false;
2743 scratch = GetRtAsRegisterHelper(rt, scratch);
2744 offset = GetOffset(offset, L, bits);
2745 blezc(scratch, offset);
2746 } else if (IsZero(rt)) {
2747 bits = OffsetSize::kOffset16;
2748 if (!is_near(L, bits)) return false;
2749 offset = GetOffset(offset, L, bits);
2750 bgezc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002751 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002752 bits = OffsetSize::kOffset16;
2753 if (!is_near(L, bits)) return false;
2754 scratch = GetRtAsRegisterHelper(rt, scratch);
2755 DCHECK(!rs.is(scratch));
2756 offset = GetOffset(offset, L, bits);
2757 bgec(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002758 }
2759 break;
2760 case less:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002761 // rs < rt
2762 if (rs.code() == rt.rm_.reg_code) {
2763 break; // No code needs to be emitted.
2764 } else if (rs.is(zero_reg)) {
2765 bits = OffsetSize::kOffset16;
2766 if (!is_near(L, bits)) return false;
2767 scratch = GetRtAsRegisterHelper(rt, scratch);
2768 offset = GetOffset(offset, L, bits);
2769 bgtzc(scratch, offset);
2770 } else if (IsZero(rt)) {
2771 bits = OffsetSize::kOffset16;
2772 if (!is_near(L, bits)) return false;
2773 offset = GetOffset(offset, L, bits);
2774 bltzc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002775 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002776 bits = OffsetSize::kOffset16;
2777 if (!is_near(L, bits)) return false;
2778 scratch = GetRtAsRegisterHelper(rt, scratch);
2779 DCHECK(!rs.is(scratch));
2780 offset = GetOffset(offset, L, bits);
2781 bltc(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002782 }
2783 break;
2784 case less_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002785 // rs <= rt
2786 if (rs.code() == rt.rm_.reg_code) {
2787 bits = OffsetSize::kOffset26;
2788 if (!is_near(L, bits)) return false;
2789 offset = GetOffset(offset, L, bits);
2790 bc(offset);
2791 } else if (rs.is(zero_reg)) {
2792 bits = OffsetSize::kOffset16;
2793 if (!is_near(L, bits)) return false;
2794 scratch = GetRtAsRegisterHelper(rt, scratch);
2795 offset = GetOffset(offset, L, bits);
2796 bgezc(scratch, offset);
2797 } else if (IsZero(rt)) {
2798 bits = OffsetSize::kOffset16;
2799 if (!is_near(L, bits)) return false;
2800 offset = GetOffset(offset, L, bits);
2801 blezc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002802 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002803 bits = OffsetSize::kOffset16;
2804 if (!is_near(L, bits)) return false;
2805 scratch = GetRtAsRegisterHelper(rt, scratch);
2806 DCHECK(!rs.is(scratch));
2807 offset = GetOffset(offset, L, bits);
2808 bgec(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002809 }
2810 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002811
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002812 // Unsigned comparison.
2813 case Ugreater:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002814 // rs > rt
2815 if (rs.code() == rt.rm_.reg_code) {
2816 break; // No code needs to be emitted.
2817 } else if (rs.is(zero_reg)) {
2818 bits = OffsetSize::kOffset21;
2819 if (!is_near(L, bits)) return false;
2820 scratch = GetRtAsRegisterHelper(rt, scratch);
2821 offset = GetOffset(offset, L, bits);
2822 bnezc(scratch, offset);
2823 } else if (IsZero(rt)) {
2824 bits = OffsetSize::kOffset21;
2825 if (!is_near(L, bits)) return false;
2826 offset = GetOffset(offset, L, bits);
2827 bnezc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002828 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002829 bits = OffsetSize::kOffset16;
2830 if (!is_near(L, bits)) return false;
2831 scratch = GetRtAsRegisterHelper(rt, scratch);
2832 DCHECK(!rs.is(scratch));
2833 offset = GetOffset(offset, L, bits);
2834 bltuc(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002835 }
2836 break;
2837 case Ugreater_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002838 // rs >= rt
2839 if (rs.code() == rt.rm_.reg_code) {
2840 bits = OffsetSize::kOffset26;
2841 if (!is_near(L, bits)) return false;
2842 offset = GetOffset(offset, L, bits);
2843 bc(offset);
2844 } else if (rs.is(zero_reg)) {
2845 bits = OffsetSize::kOffset21;
2846 if (!is_near(L, bits)) return false;
2847 scratch = GetRtAsRegisterHelper(rt, scratch);
2848 offset = GetOffset(offset, L, bits);
2849 beqzc(scratch, offset);
2850 } else if (IsZero(rt)) {
2851 bits = OffsetSize::kOffset26;
2852 if (!is_near(L, bits)) return false;
2853 offset = GetOffset(offset, L, bits);
2854 bc(offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002855 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002856 bits = OffsetSize::kOffset16;
2857 if (!is_near(L, bits)) return false;
2858 scratch = GetRtAsRegisterHelper(rt, scratch);
2859 DCHECK(!rs.is(scratch));
2860 offset = GetOffset(offset, L, bits);
2861 bgeuc(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002862 }
2863 break;
2864 case Uless:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002865 // rs < rt
2866 if (rs.code() == rt.rm_.reg_code) {
2867 break; // No code needs to be emitted.
2868 } else if (rs.is(zero_reg)) {
2869 bits = OffsetSize::kOffset21;
2870 if (!is_near(L, bits)) return false;
2871 scratch = GetRtAsRegisterHelper(rt, scratch);
2872 offset = GetOffset(offset, L, bits);
2873 bnezc(scratch, offset);
2874 } else if (IsZero(rt)) {
2875 break; // No code needs to be emitted.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002876 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002877 bits = OffsetSize::kOffset16;
2878 if (!is_near(L, bits)) return false;
2879 scratch = GetRtAsRegisterHelper(rt, scratch);
2880 DCHECK(!rs.is(scratch));
2881 offset = GetOffset(offset, L, bits);
2882 bltuc(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002883 }
2884 break;
2885 case Uless_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002886 // rs <= rt
2887 if (rs.code() == rt.rm_.reg_code) {
2888 bits = OffsetSize::kOffset26;
2889 if (!is_near(L, bits)) return false;
2890 offset = GetOffset(offset, L, bits);
2891 bc(offset);
2892 } else if (rs.is(zero_reg)) {
2893 bits = OffsetSize::kOffset26;
2894 if (!is_near(L, bits)) return false;
2895 scratch = GetRtAsRegisterHelper(rt, scratch);
2896 offset = GetOffset(offset, L, bits);
2897 bc(offset);
2898 } else if (IsZero(rt)) {
2899 bits = OffsetSize::kOffset21;
2900 if (!is_near(L, bits)) return false;
2901 offset = GetOffset(offset, L, bits);
2902 beqzc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002903 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002904 bits = OffsetSize::kOffset16;
2905 if (!is_near(L, bits)) return false;
2906 scratch = GetRtAsRegisterHelper(rt, scratch);
2907 DCHECK(!rs.is(scratch));
2908 offset = GetOffset(offset, L, bits);
2909 bgeuc(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002910 }
2911 break;
2912 default:
2913 UNREACHABLE();
2914 }
2915 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002916 CheckTrampolinePoolQuick(1);
2917 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002918}
2919
2920
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002921bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
2922 Register rs, const Operand& rt,
2923 BranchDelaySlot bdslot) {
2924 DCHECK(L == nullptr || offset == 0);
2925 if (!is_near(L, OffsetSize::kOffset16)) return false;
2926
2927 Register scratch = at;
2928 int32_t offset32;
2929
2930 // Be careful to always use shifted_branch_offset only just before the
2931 // branch instruction, as the location will be remember for patching the
2932 // target.
2933 {
2934 BlockTrampolinePoolScope block_trampoline_pool(this);
2935 switch (cond) {
2936 case cc_always:
2937 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2938 b(offset32);
2939 break;
2940 case eq:
2941 if (IsZero(rt)) {
2942 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2943 beq(rs, zero_reg, offset32);
2944 } else {
2945 // We don't want any other register but scratch clobbered.
2946 scratch = GetRtAsRegisterHelper(rt, scratch);
2947 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2948 beq(rs, scratch, offset32);
2949 }
2950 break;
2951 case ne:
2952 if (IsZero(rt)) {
2953 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2954 bne(rs, zero_reg, offset32);
2955 } else {
2956 // We don't want any other register but scratch clobbered.
2957 scratch = GetRtAsRegisterHelper(rt, scratch);
2958 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2959 bne(rs, scratch, offset32);
2960 }
2961 break;
2962
2963 // Signed comparison.
2964 case greater:
2965 if (IsZero(rt)) {
2966 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2967 bgtz(rs, offset32);
2968 } else {
2969 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2970 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2971 bne(scratch, zero_reg, offset32);
2972 }
2973 break;
2974 case greater_equal:
2975 if (IsZero(rt)) {
2976 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2977 bgez(rs, offset32);
2978 } else {
2979 Slt(scratch, rs, rt);
2980 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2981 beq(scratch, zero_reg, offset32);
2982 }
2983 break;
2984 case less:
2985 if (IsZero(rt)) {
2986 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2987 bltz(rs, offset32);
2988 } else {
2989 Slt(scratch, rs, rt);
2990 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2991 bne(scratch, zero_reg, offset32);
2992 }
2993 break;
2994 case less_equal:
2995 if (IsZero(rt)) {
2996 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2997 blez(rs, offset32);
2998 } else {
2999 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3000 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3001 beq(scratch, zero_reg, offset32);
3002 }
3003 break;
3004
3005 // Unsigned comparison.
3006 case Ugreater:
3007 if (IsZero(rt)) {
3008 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3009 bne(rs, zero_reg, offset32);
3010 } else {
3011 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3012 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3013 bne(scratch, zero_reg, offset32);
3014 }
3015 break;
3016 case Ugreater_equal:
3017 if (IsZero(rt)) {
3018 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3019 b(offset32);
3020 } else {
3021 Sltu(scratch, rs, rt);
3022 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3023 beq(scratch, zero_reg, offset32);
3024 }
3025 break;
3026 case Uless:
3027 if (IsZero(rt)) {
3028 return true; // No code needs to be emitted.
3029 } else {
3030 Sltu(scratch, rs, rt);
3031 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3032 bne(scratch, zero_reg, offset32);
3033 }
3034 break;
3035 case Uless_equal:
3036 if (IsZero(rt)) {
3037 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3038 beq(rs, zero_reg, offset32);
3039 } else {
3040 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3041 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3042 beq(scratch, zero_reg, offset32);
3043 }
3044 break;
3045 default:
3046 UNREACHABLE();
3047 }
3048 }
3049
3050 // Emit a nop in the branch delay slot if required.
3051 if (bdslot == PROTECT)
3052 nop();
3053
3054 return true;
3055}
3056
3057
3058bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3059 Register rs, const Operand& rt,
3060 BranchDelaySlot bdslot) {
3061 BRANCH_ARGS_CHECK(cond, rs, rt);
3062
3063 if (!L) {
3064 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3065 DCHECK(is_int26(offset));
3066 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3067 } else {
3068 DCHECK(is_int16(offset));
3069 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3070 }
3071 } else {
3072 DCHECK(offset == 0);
3073 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3074 return BranchShortHelperR6(0, L, cond, rs, rt);
3075 } else {
3076 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3077 }
3078 }
3079 return false;
3080}
3081
3082
3083void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3084 const Operand& rt, BranchDelaySlot bdslot) {
3085 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3086}
3087
3088
3089void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3090 const Operand& rt, BranchDelaySlot bdslot) {
3091 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3092}
3093
3094
3095void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003096 BranchAndLinkShort(offset, bdslot);
3097}
3098
3099
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003100void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3101 const Operand& rt, BranchDelaySlot bdslot) {
3102 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3103 DCHECK(is_near);
3104 USE(is_near);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003105}
3106
3107
3108void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3109 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003110 if (is_near_branch(L)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003111 BranchAndLinkShort(L, bdslot);
3112 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003113 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003114 }
3115 } else {
3116 if (is_trampoline_emitted()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003117 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003118 } else {
3119 BranchAndLinkShort(L, bdslot);
3120 }
3121 }
3122}
3123
3124
3125void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3126 const Operand& rt,
3127 BranchDelaySlot bdslot) {
3128 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003129 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003130 Label skip;
3131 Condition neg_cond = NegateCondition(cond);
3132 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003133 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003134 bind(&skip);
3135 }
3136 } else {
3137 if (is_trampoline_emitted()) {
3138 Label skip;
3139 Condition neg_cond = NegateCondition(cond);
3140 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003141 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003142 bind(&skip);
3143 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003144 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003145 }
3146 }
3147}
3148
3149
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003150void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3151 BranchDelaySlot bdslot) {
3152 DCHECK(L == nullptr || offset == 0);
3153 offset = GetOffset(offset, L, OffsetSize::kOffset16);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003154 bal(offset);
3155
3156 // Emit a nop in the branch delay slot if required.
3157 if (bdslot == PROTECT)
3158 nop();
3159}
3160
3161
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003162void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3163 DCHECK(L == nullptr || offset == 0);
3164 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3165 balc(offset);
3166}
3167
3168
3169void MacroAssembler::BranchAndLinkShort(int32_t offset,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003170 BranchDelaySlot bdslot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003171 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3172 DCHECK(is_int26(offset));
3173 BranchAndLinkShortHelperR6(offset, nullptr);
3174 } else {
3175 DCHECK(is_int16(offset));
3176 BranchAndLinkShortHelper(offset, nullptr, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003177 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003178}
3179
3180
3181void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003182 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3183 BranchAndLinkShortHelperR6(0, L);
3184 } else {
3185 BranchAndLinkShortHelper(0, L, bdslot);
3186 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003187}
3188
3189
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003190bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3191 Condition cond, Register rs,
3192 const Operand& rt) {
3193 DCHECK(L == nullptr || offset == 0);
3194 Register scratch = rs.is(at) ? t8 : at;
3195 OffsetSize bits = OffsetSize::kOffset16;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003196
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003197 BlockTrampolinePoolScope block_trampoline_pool(this);
3198 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3199 switch (cond) {
3200 case cc_always:
3201 bits = OffsetSize::kOffset26;
3202 if (!is_near(L, bits)) return false;
3203 offset = GetOffset(offset, L, bits);
3204 balc(offset);
3205 break;
3206 case eq:
3207 if (!is_near(L, bits)) return false;
3208 Subu(scratch, rs, rt);
3209 offset = GetOffset(offset, L, bits);
3210 beqzalc(scratch, offset);
3211 break;
3212 case ne:
3213 if (!is_near(L, bits)) return false;
3214 Subu(scratch, rs, rt);
3215 offset = GetOffset(offset, L, bits);
3216 bnezalc(scratch, offset);
3217 break;
3218
3219 // Signed comparison.
3220 case greater:
3221 // rs > rt
3222 if (rs.code() == rt.rm_.reg_code) {
3223 break; // No code needs to be emitted.
3224 } else if (rs.is(zero_reg)) {
3225 if (!is_near(L, bits)) return false;
3226 scratch = GetRtAsRegisterHelper(rt, scratch);
3227 offset = GetOffset(offset, L, bits);
3228 bltzalc(scratch, offset);
3229 } else if (IsZero(rt)) {
3230 if (!is_near(L, bits)) return false;
3231 offset = GetOffset(offset, L, bits);
3232 bgtzalc(rs, offset);
3233 } else {
3234 if (!is_near(L, bits)) return false;
3235 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3236 offset = GetOffset(offset, L, bits);
3237 bnezalc(scratch, offset);
3238 }
3239 break;
3240 case greater_equal:
3241 // rs >= rt
3242 if (rs.code() == rt.rm_.reg_code) {
3243 bits = OffsetSize::kOffset26;
3244 if (!is_near(L, bits)) return false;
3245 offset = GetOffset(offset, L, bits);
3246 balc(offset);
3247 } else if (rs.is(zero_reg)) {
3248 if (!is_near(L, bits)) return false;
3249 scratch = GetRtAsRegisterHelper(rt, scratch);
3250 offset = GetOffset(offset, L, bits);
3251 blezalc(scratch, offset);
3252 } else if (IsZero(rt)) {
3253 if (!is_near(L, bits)) return false;
3254 offset = GetOffset(offset, L, bits);
3255 bgezalc(rs, offset);
3256 } else {
3257 if (!is_near(L, bits)) return false;
3258 Slt(scratch, rs, rt);
3259 offset = GetOffset(offset, L, bits);
3260 beqzalc(scratch, offset);
3261 }
3262 break;
3263 case less:
3264 // rs < rt
3265 if (rs.code() == rt.rm_.reg_code) {
3266 break; // No code needs to be emitted.
3267 } else if (rs.is(zero_reg)) {
3268 if (!is_near(L, bits)) return false;
3269 scratch = GetRtAsRegisterHelper(rt, scratch);
3270 offset = GetOffset(offset, L, bits);
3271 bgtzalc(scratch, offset);
3272 } else if (IsZero(rt)) {
3273 if (!is_near(L, bits)) return false;
3274 offset = GetOffset(offset, L, bits);
3275 bltzalc(rs, offset);
3276 } else {
3277 if (!is_near(L, bits)) return false;
3278 Slt(scratch, rs, rt);
3279 offset = GetOffset(offset, L, bits);
3280 bnezalc(scratch, offset);
3281 }
3282 break;
3283 case less_equal:
3284 // rs <= r2
3285 if (rs.code() == rt.rm_.reg_code) {
3286 bits = OffsetSize::kOffset26;
3287 if (!is_near(L, bits)) return false;
3288 offset = GetOffset(offset, L, bits);
3289 balc(offset);
3290 } else if (rs.is(zero_reg)) {
3291 if (!is_near(L, bits)) return false;
3292 scratch = GetRtAsRegisterHelper(rt, scratch);
3293 offset = GetOffset(offset, L, bits);
3294 bgezalc(scratch, offset);
3295 } else if (IsZero(rt)) {
3296 if (!is_near(L, bits)) return false;
3297 offset = GetOffset(offset, L, bits);
3298 blezalc(rs, offset);
3299 } else {
3300 if (!is_near(L, bits)) return false;
3301 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3302 offset = GetOffset(offset, L, bits);
3303 beqzalc(scratch, offset);
3304 }
3305 break;
3306
3307
3308 // Unsigned comparison.
3309 case Ugreater:
3310 // rs > r2
3311 if (!is_near(L, bits)) return false;
3312 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3313 offset = GetOffset(offset, L, bits);
3314 bnezalc(scratch, offset);
3315 break;
3316 case Ugreater_equal:
3317 // rs >= r2
3318 if (!is_near(L, bits)) return false;
3319 Sltu(scratch, rs, rt);
3320 offset = GetOffset(offset, L, bits);
3321 beqzalc(scratch, offset);
3322 break;
3323 case Uless:
3324 // rs < r2
3325 if (!is_near(L, bits)) return false;
3326 Sltu(scratch, rs, rt);
3327 offset = GetOffset(offset, L, bits);
3328 bnezalc(scratch, offset);
3329 break;
3330 case Uless_equal:
3331 // rs <= r2
3332 if (!is_near(L, bits)) return false;
3333 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3334 offset = GetOffset(offset, L, bits);
3335 beqzalc(scratch, offset);
3336 break;
3337 default:
3338 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003339 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003340 return true;
3341}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003342
3343
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003344// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3345// with the slt instructions. We could use sub or add instead but we would miss
3346// overflow cases, so we keep slt and add an intermediate third instruction.
3347bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3348 Condition cond, Register rs,
3349 const Operand& rt,
3350 BranchDelaySlot bdslot) {
3351 DCHECK(L == nullptr || offset == 0);
3352 if (!is_near(L, OffsetSize::kOffset16)) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003353
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003354 Register scratch = t8;
3355 BlockTrampolinePoolScope block_trampoline_pool(this);
3356
3357 switch (cond) {
3358 case cc_always:
3359 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3360 bal(offset);
3361 break;
3362 case eq:
3363 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3364 nop();
3365 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3366 bal(offset);
3367 break;
3368 case ne:
3369 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3370 nop();
3371 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3372 bal(offset);
3373 break;
3374
3375 // Signed comparison.
3376 case greater:
3377 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3378 addiu(scratch, scratch, -1);
3379 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3380 bgezal(scratch, offset);
3381 break;
3382 case greater_equal:
3383 Slt(scratch, rs, rt);
3384 addiu(scratch, scratch, -1);
3385 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3386 bltzal(scratch, offset);
3387 break;
3388 case less:
3389 Slt(scratch, rs, rt);
3390 addiu(scratch, scratch, -1);
3391 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3392 bgezal(scratch, offset);
3393 break;
3394 case less_equal:
3395 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3396 addiu(scratch, scratch, -1);
3397 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3398 bltzal(scratch, offset);
3399 break;
3400
3401 // Unsigned comparison.
3402 case Ugreater:
3403 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3404 addiu(scratch, scratch, -1);
3405 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3406 bgezal(scratch, offset);
3407 break;
3408 case Ugreater_equal:
3409 Sltu(scratch, rs, rt);
3410 addiu(scratch, scratch, -1);
3411 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3412 bltzal(scratch, offset);
3413 break;
3414 case Uless:
3415 Sltu(scratch, rs, rt);
3416 addiu(scratch, scratch, -1);
3417 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3418 bgezal(scratch, offset);
3419 break;
3420 case Uless_equal:
3421 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3422 addiu(scratch, scratch, -1);
3423 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3424 bltzal(scratch, offset);
3425 break;
3426
3427 default:
3428 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003429 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003430
3431 // Emit a nop in the branch delay slot if required.
3432 if (bdslot == PROTECT)
3433 nop();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003434
3435 return true;
3436}
3437
3438
3439bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3440 Condition cond, Register rs,
3441 const Operand& rt,
3442 BranchDelaySlot bdslot) {
3443 BRANCH_ARGS_CHECK(cond, rs, rt);
3444
3445 if (!L) {
3446 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3447 DCHECK(is_int26(offset));
3448 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3449 } else {
3450 DCHECK(is_int16(offset));
3451 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3452 }
3453 } else {
3454 DCHECK(offset == 0);
3455 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3456 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3457 } else {
3458 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3459 }
3460 }
3461 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003462}
3463
3464
3465void MacroAssembler::Jump(Register target,
3466 Condition cond,
3467 Register rs,
3468 const Operand& rt,
3469 BranchDelaySlot bd) {
3470 BlockTrampolinePoolScope block_trampoline_pool(this);
3471 if (cond == cc_always) {
3472 jr(target);
3473 } else {
3474 BRANCH_ARGS_CHECK(cond, rs, rt);
3475 Branch(2, NegateCondition(cond), rs, rt);
3476 jr(target);
3477 }
3478 // Emit a nop in the branch delay slot if required.
3479 if (bd == PROTECT)
3480 nop();
3481}
3482
3483
3484void MacroAssembler::Jump(intptr_t target,
3485 RelocInfo::Mode rmode,
3486 Condition cond,
3487 Register rs,
3488 const Operand& rt,
3489 BranchDelaySlot bd) {
3490 Label skip;
3491 if (cond != cc_always) {
3492 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3493 }
3494 // The first instruction of 'li' may be placed in the delay slot.
3495 // This is not an issue, t9 is expected to be clobbered anyway.
3496 li(t9, Operand(target, rmode));
3497 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3498 bind(&skip);
3499}
3500
3501
3502void MacroAssembler::Jump(Address target,
3503 RelocInfo::Mode rmode,
3504 Condition cond,
3505 Register rs,
3506 const Operand& rt,
3507 BranchDelaySlot bd) {
3508 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3509 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3510}
3511
3512
3513void MacroAssembler::Jump(Handle<Code> code,
3514 RelocInfo::Mode rmode,
3515 Condition cond,
3516 Register rs,
3517 const Operand& rt,
3518 BranchDelaySlot bd) {
3519 DCHECK(RelocInfo::IsCodeTarget(rmode));
3520 AllowDeferredHandleDereference embedding_raw_address;
3521 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3522}
3523
3524
3525int MacroAssembler::CallSize(Register target,
3526 Condition cond,
3527 Register rs,
3528 const Operand& rt,
3529 BranchDelaySlot bd) {
3530 int size = 0;
3531
3532 if (cond == cc_always) {
3533 size += 1;
3534 } else {
3535 size += 3;
3536 }
3537
3538 if (bd == PROTECT)
3539 size += 1;
3540
3541 return size * kInstrSize;
3542}
3543
3544
3545// Note: To call gcc-compiled C code on mips, you must call thru t9.
3546void MacroAssembler::Call(Register target,
3547 Condition cond,
3548 Register rs,
3549 const Operand& rt,
3550 BranchDelaySlot bd) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003551#ifdef DEBUG
3552 int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3553#endif
3554
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003555 BlockTrampolinePoolScope block_trampoline_pool(this);
3556 Label start;
3557 bind(&start);
3558 if (cond == cc_always) {
3559 jalr(target);
3560 } else {
3561 BRANCH_ARGS_CHECK(cond, rs, rt);
3562 Branch(2, NegateCondition(cond), rs, rt);
3563 jalr(target);
3564 }
3565 // Emit a nop in the branch delay slot if required.
3566 if (bd == PROTECT)
3567 nop();
3568
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003569#ifdef DEBUG
3570 CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3571 SizeOfCodeGeneratedSince(&start));
3572#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003573}
3574
3575
3576int MacroAssembler::CallSize(Address target,
3577 RelocInfo::Mode rmode,
3578 Condition cond,
3579 Register rs,
3580 const Operand& rt,
3581 BranchDelaySlot bd) {
3582 int size = CallSize(t9, cond, rs, rt, bd);
3583 return size + 4 * kInstrSize;
3584}
3585
3586
3587void MacroAssembler::Call(Address target,
3588 RelocInfo::Mode rmode,
3589 Condition cond,
3590 Register rs,
3591 const Operand& rt,
3592 BranchDelaySlot bd) {
3593 BlockTrampolinePoolScope block_trampoline_pool(this);
3594 Label start;
3595 bind(&start);
3596 int64_t target_int = reinterpret_cast<int64_t>(target);
3597 // Must record previous source positions before the
3598 // li() generates a new code target.
3599 positions_recorder()->WriteRecordedPositions();
3600 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
3601 Call(t9, cond, rs, rt, bd);
3602 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3603 SizeOfCodeGeneratedSince(&start));
3604}
3605
3606
3607int MacroAssembler::CallSize(Handle<Code> code,
3608 RelocInfo::Mode rmode,
3609 TypeFeedbackId ast_id,
3610 Condition cond,
3611 Register rs,
3612 const Operand& rt,
3613 BranchDelaySlot bd) {
3614 AllowDeferredHandleDereference using_raw_address;
3615 return CallSize(reinterpret_cast<Address>(code.location()),
3616 rmode, cond, rs, rt, bd);
3617}
3618
3619
3620void MacroAssembler::Call(Handle<Code> code,
3621 RelocInfo::Mode rmode,
3622 TypeFeedbackId ast_id,
3623 Condition cond,
3624 Register rs,
3625 const Operand& rt,
3626 BranchDelaySlot bd) {
3627 BlockTrampolinePoolScope block_trampoline_pool(this);
3628 Label start;
3629 bind(&start);
3630 DCHECK(RelocInfo::IsCodeTarget(rmode));
3631 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3632 SetRecordedAstId(ast_id);
3633 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3634 }
3635 AllowDeferredHandleDereference embedding_raw_address;
3636 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3637 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3638 SizeOfCodeGeneratedSince(&start));
3639}
3640
3641
3642void MacroAssembler::Ret(Condition cond,
3643 Register rs,
3644 const Operand& rt,
3645 BranchDelaySlot bd) {
3646 Jump(ra, cond, rs, rt, bd);
3647}
3648
3649
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003650void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3651 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3652 (!L->is_bound() || is_near_r6(L))) {
3653 BranchShortHelperR6(0, L);
3654 } else {
3655 EmitForbiddenSlotInstruction();
3656 BlockTrampolinePoolScope block_trampoline_pool(this);
3657 {
3658 BlockGrowBufferScope block_buf_growth(this);
3659 // Buffer growth (and relocation) must be blocked for internal references
3660 // until associated instructions are emitted and available to be patched.
3661 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3662 j(L);
3663 }
3664 // Emit a nop in the branch delay slot if required.
3665 if (bdslot == PROTECT) nop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003666 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003667}
3668
3669
3670void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
3671 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3672 (!L->is_bound() || is_near_r6(L))) {
3673 BranchAndLinkShortHelperR6(0, L);
3674 } else {
3675 EmitForbiddenSlotInstruction();
3676 BlockTrampolinePoolScope block_trampoline_pool(this);
3677 {
3678 BlockGrowBufferScope block_buf_growth(this);
3679 // Buffer growth (and relocation) must be blocked for internal references
3680 // until associated instructions are emitted and available to be patched.
3681 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3682 jal(L);
3683 }
3684 // Emit a nop in the branch delay slot if required.
3685 if (bdslot == PROTECT) nop();
3686 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003687}
3688
3689
3690void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3691 BlockTrampolinePoolScope block_trampoline_pool(this);
3692
3693 uint64_t imm64;
3694 imm64 = jump_address(L);
3695 { BlockGrowBufferScope block_buf_growth(this);
3696 // Buffer growth (and relocation) must be blocked for internal references
3697 // until associated instructions are emitted and available to be patched.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003698 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003699 li(at, Operand(imm64), ADDRESS_LOAD);
3700 }
3701 jr(at);
3702
3703 // Emit a nop in the branch delay slot if required.
3704 if (bdslot == PROTECT)
3705 nop();
3706}
3707
3708
3709void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3710 BlockTrampolinePoolScope block_trampoline_pool(this);
3711
3712 uint64_t imm64;
3713 imm64 = jump_address(L);
3714 { BlockGrowBufferScope block_buf_growth(this);
3715 // Buffer growth (and relocation) must be blocked for internal references
3716 // until associated instructions are emitted and available to be patched.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003717 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003718 li(at, Operand(imm64), ADDRESS_LOAD);
3719 }
3720 jalr(at);
3721
3722 // Emit a nop in the branch delay slot if required.
3723 if (bdslot == PROTECT)
3724 nop();
3725}
3726
3727
3728void MacroAssembler::DropAndRet(int drop) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003729 DCHECK(is_int16(drop * kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003730 Ret(USE_DELAY_SLOT);
3731 daddiu(sp, sp, drop * kPointerSize);
3732}
3733
3734void MacroAssembler::DropAndRet(int drop,
3735 Condition cond,
3736 Register r1,
3737 const Operand& r2) {
3738 // Both Drop and Ret need to be conditional.
3739 Label skip;
3740 if (cond != cc_always) {
3741 Branch(&skip, NegateCondition(cond), r1, r2);
3742 }
3743
3744 Drop(drop);
3745 Ret();
3746
3747 if (cond != cc_always) {
3748 bind(&skip);
3749 }
3750}
3751
3752
3753void MacroAssembler::Drop(int count,
3754 Condition cond,
3755 Register reg,
3756 const Operand& op) {
3757 if (count <= 0) {
3758 return;
3759 }
3760
3761 Label skip;
3762
3763 if (cond != al) {
3764 Branch(&skip, NegateCondition(cond), reg, op);
3765 }
3766
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003767 Daddu(sp, sp, Operand(count * kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003768
3769 if (cond != al) {
3770 bind(&skip);
3771 }
3772}
3773
3774
3775
3776void MacroAssembler::Swap(Register reg1,
3777 Register reg2,
3778 Register scratch) {
3779 if (scratch.is(no_reg)) {
3780 Xor(reg1, reg1, Operand(reg2));
3781 Xor(reg2, reg2, Operand(reg1));
3782 Xor(reg1, reg1, Operand(reg2));
3783 } else {
3784 mov(scratch, reg1);
3785 mov(reg1, reg2);
3786 mov(reg2, scratch);
3787 }
3788}
3789
3790
3791void MacroAssembler::Call(Label* target) {
3792 BranchAndLink(target);
3793}
3794
3795
3796void MacroAssembler::Push(Handle<Object> handle) {
3797 li(at, Operand(handle));
3798 push(at);
3799}
3800
3801
3802void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
3803 DCHECK(!src.is(scratch));
3804 mov(scratch, src);
3805 dsrl32(src, src, 0);
3806 dsll32(src, src, 0);
3807 push(src);
3808 dsll32(scratch, scratch, 0);
3809 push(scratch);
3810}
3811
3812
3813void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
3814 DCHECK(!dst.is(scratch));
3815 pop(scratch);
3816 dsrl32(scratch, scratch, 0);
3817 pop(dst);
3818 dsrl32(dst, dst, 0);
3819 dsll32(dst, dst, 0);
3820 or_(dst, dst, scratch);
3821}
3822
3823
3824void MacroAssembler::DebugBreak() {
3825 PrepareCEntryArgs(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003826 PrepareCEntryFunction(
3827 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003828 CEntryStub ces(isolate(), 1);
3829 DCHECK(AllowThisStubCall(&ces));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003830 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003831}
3832
3833
3834// ---------------------------------------------------------------------------
3835// Exception handling.
3836
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003837void MacroAssembler::PushStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003838 // Adjust this code if not the case.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003839 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003840 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003841
3842 // Link the current handler as the next handler.
3843 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3844 ld(a5, MemOperand(a6));
3845 push(a5);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003846
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003847 // Set this new handler as the current one.
3848 sd(sp, MemOperand(a6));
3849}
3850
3851
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003852void MacroAssembler::PopStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003853 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3854 pop(a1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003855 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
3856 kPointerSize)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003857 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3858 sd(a1, MemOperand(at));
3859}
3860
3861
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003862void MacroAssembler::Allocate(int object_size,
3863 Register result,
3864 Register scratch1,
3865 Register scratch2,
3866 Label* gc_required,
3867 AllocationFlags flags) {
3868 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3869 if (!FLAG_inline_new) {
3870 if (emit_debug_code()) {
3871 // Trash the registers to simulate an allocation failure.
3872 li(result, 0x7091);
3873 li(scratch1, 0x7191);
3874 li(scratch2, 0x7291);
3875 }
3876 jmp(gc_required);
3877 return;
3878 }
3879
Ben Murdoch097c5b22016-05-18 11:27:45 +01003880 DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003881
3882 // Make object size into bytes.
3883 if ((flags & SIZE_IN_WORDS) != 0) {
3884 object_size *= kPointerSize;
3885 }
3886 DCHECK(0 == (object_size & kObjectAlignmentMask));
3887
3888 // Check relative positions of allocation top and limit addresses.
3889 // ARM adds additional checks to make sure the ldm instruction can be
3890 // used. On MIPS we don't have ldm so we don't need additional checks either.
3891 ExternalReference allocation_top =
3892 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3893 ExternalReference allocation_limit =
3894 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3895
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003896 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
3897 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003898 DCHECK((limit - top) == kPointerSize);
3899
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003900 // Set up allocation top address and allocation limit registers.
3901 Register top_address = scratch1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003902 // This code stores a temporary value in t9.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003903 Register alloc_limit = t9;
3904 Register result_end = scratch2;
3905 li(top_address, Operand(allocation_top));
3906
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003907 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003908 // Load allocation top into result and allocation limit into alloc_limit.
3909 ld(result, MemOperand(top_address));
3910 ld(alloc_limit, MemOperand(top_address, kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003911 } else {
3912 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003913 // Assert that result actually contains top on entry.
3914 ld(alloc_limit, MemOperand(top_address));
3915 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003916 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003917 // Load allocation limit. Result already contains allocation top.
3918 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003919 }
3920
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003921 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3922 // the same alignment on ARM64.
3923 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3924
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003925 if (emit_debug_code()) {
3926 And(at, result, Operand(kDoubleAlignmentMask));
3927 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3928 }
3929
3930 // Calculate new top and bail out if new space is exhausted. Use result
3931 // to calculate the new top.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003932 Daddu(result_end, result, Operand(object_size));
3933 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
3934 sd(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003935
3936 // Tag object if requested.
3937 if ((flags & TAG_OBJECT) != 0) {
3938 Daddu(result, result, Operand(kHeapObjectTag));
3939 }
3940}
3941
3942
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003943void MacroAssembler::Allocate(Register object_size, Register result,
3944 Register result_end, Register scratch,
3945 Label* gc_required, AllocationFlags flags) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003946 if (!FLAG_inline_new) {
3947 if (emit_debug_code()) {
3948 // Trash the registers to simulate an allocation failure.
3949 li(result, 0x7091);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003950 li(scratch, 0x7191);
3951 li(result_end, 0x7291);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003952 }
3953 jmp(gc_required);
3954 return;
3955 }
3956
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003957 // |object_size| and |result_end| may overlap, other registers must not.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003958 DCHECK(!AreAliased(object_size, result, scratch, t9, at));
3959 DCHECK(!AreAliased(result_end, result, scratch, t9, at));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003960
3961 // Check relative positions of allocation top and limit addresses.
3962 // ARM adds additional checks to make sure the ldm instruction can be
3963 // used. On MIPS we don't have ldm so we don't need additional checks either.
3964 ExternalReference allocation_top =
3965 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3966 ExternalReference allocation_limit =
3967 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003968 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
3969 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003970 DCHECK((limit - top) == kPointerSize);
3971
3972 // Set up allocation top address and object size registers.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003973 Register top_address = scratch;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003974 // This code stores a temporary value in t9.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003975 Register alloc_limit = t9;
3976 li(top_address, Operand(allocation_top));
3977
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003978 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003979 // Load allocation top into result and allocation limit into alloc_limit.
3980 ld(result, MemOperand(top_address));
3981 ld(alloc_limit, MemOperand(top_address, kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003982 } else {
3983 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003984 // Assert that result actually contains top on entry.
3985 ld(alloc_limit, MemOperand(top_address));
3986 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003987 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003988 // Load allocation limit. Result already contains allocation top.
3989 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003990 }
3991
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003992 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3993 // the same alignment on ARM64.
3994 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3995
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003996 if (emit_debug_code()) {
3997 And(at, result, Operand(kDoubleAlignmentMask));
3998 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3999 }
4000
4001 // Calculate new top and bail out if new space is exhausted. Use result
4002 // to calculate the new top. Object size may be in words so a shift is
4003 // required to get the number of bytes.
4004 if ((flags & SIZE_IN_WORDS) != 0) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004005 Dlsa(result_end, result, object_size, kPointerSizeLog2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004006 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004007 Daddu(result_end, result, Operand(object_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004008 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004009 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004010
4011 // Update allocation top. result temporarily holds the new top.
4012 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004013 And(at, result_end, Operand(kObjectAlignmentMask));
4014 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004015 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004016 sd(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004017
4018 // Tag object if requested.
4019 if ((flags & TAG_OBJECT) != 0) {
4020 Daddu(result, result, Operand(kHeapObjectTag));
4021 }
4022}
4023
4024
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004025void MacroAssembler::AllocateTwoByteString(Register result,
4026 Register length,
4027 Register scratch1,
4028 Register scratch2,
4029 Register scratch3,
4030 Label* gc_required) {
4031 // Calculate the number of bytes needed for the characters in the string while
4032 // observing object alignment.
4033 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4034 dsll(scratch1, length, 1); // Length in bytes, not chars.
4035 daddiu(scratch1, scratch1,
4036 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
4037 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4038
4039 // Allocate two-byte string in new space.
4040 Allocate(scratch1,
4041 result,
4042 scratch2,
4043 scratch3,
4044 gc_required,
4045 TAG_OBJECT);
4046
4047 // Set the map, length and hash field.
4048 InitializeNewString(result,
4049 length,
4050 Heap::kStringMapRootIndex,
4051 scratch1,
4052 scratch2);
4053}
4054
4055
4056void MacroAssembler::AllocateOneByteString(Register result, Register length,
4057 Register scratch1, Register scratch2,
4058 Register scratch3,
4059 Label* gc_required) {
4060 // Calculate the number of bytes needed for the characters in the string
4061 // while observing object alignment.
4062 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4063 DCHECK(kCharSize == 1);
4064 daddiu(scratch1, length,
4065 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
4066 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4067
4068 // Allocate one-byte string in new space.
4069 Allocate(scratch1,
4070 result,
4071 scratch2,
4072 scratch3,
4073 gc_required,
4074 TAG_OBJECT);
4075
4076 // Set the map, length and hash field.
4077 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
4078 scratch1, scratch2);
4079}
4080
4081
4082void MacroAssembler::AllocateTwoByteConsString(Register result,
4083 Register length,
4084 Register scratch1,
4085 Register scratch2,
4086 Label* gc_required) {
4087 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4088 TAG_OBJECT);
4089 InitializeNewString(result,
4090 length,
4091 Heap::kConsStringMapRootIndex,
4092 scratch1,
4093 scratch2);
4094}
4095
4096
4097void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
4098 Register scratch1,
4099 Register scratch2,
4100 Label* gc_required) {
4101 Allocate(ConsString::kSize,
4102 result,
4103 scratch1,
4104 scratch2,
4105 gc_required,
4106 TAG_OBJECT);
4107
4108 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
4109 scratch1, scratch2);
4110}
4111
4112
4113void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4114 Register length,
4115 Register scratch1,
4116 Register scratch2,
4117 Label* gc_required) {
4118 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4119 TAG_OBJECT);
4120
4121 InitializeNewString(result,
4122 length,
4123 Heap::kSlicedStringMapRootIndex,
4124 scratch1,
4125 scratch2);
4126}
4127
4128
4129void MacroAssembler::AllocateOneByteSlicedString(Register result,
4130 Register length,
4131 Register scratch1,
4132 Register scratch2,
4133 Label* gc_required) {
4134 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4135 TAG_OBJECT);
4136
4137 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
4138 scratch1, scratch2);
4139}
4140
4141
4142void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
4143 Label* not_unique_name) {
4144 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4145 Label succeed;
4146 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4147 Branch(&succeed, eq, at, Operand(zero_reg));
4148 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
4149
4150 bind(&succeed);
4151}
4152
4153
4154// Allocates a heap number or jumps to the label if the young space is full and
4155// a scavenge is needed.
4156void MacroAssembler::AllocateHeapNumber(Register result,
4157 Register scratch1,
4158 Register scratch2,
4159 Register heap_number_map,
4160 Label* need_gc,
4161 TaggingMode tagging_mode,
4162 MutableMode mode) {
4163 // Allocate an object in the heap for the heap number and tag it as a heap
4164 // object.
4165 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
4166 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
4167
4168 Heap::RootListIndex map_index = mode == MUTABLE
4169 ? Heap::kMutableHeapNumberMapRootIndex
4170 : Heap::kHeapNumberMapRootIndex;
4171 AssertIsRoot(heap_number_map, map_index);
4172
4173 // Store heap number map in the allocated object.
4174 if (tagging_mode == TAG_RESULT) {
4175 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
4176 } else {
4177 sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
4178 }
4179}
4180
4181
4182void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4183 FPURegister value,
4184 Register scratch1,
4185 Register scratch2,
4186 Label* gc_required) {
4187 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4188 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4189 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4190}
4191
4192
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004193void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4194 Register value, Register scratch1,
4195 Register scratch2, Label* gc_required) {
4196 DCHECK(!result.is(constructor));
4197 DCHECK(!result.is(scratch1));
4198 DCHECK(!result.is(scratch2));
4199 DCHECK(!result.is(value));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004200
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004201 // Allocate JSValue in new space.
4202 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004203
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004204 // Initialize the JSValue.
4205 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4206 sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4207 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4208 sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4209 sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4210 sd(value, FieldMemOperand(result, JSValue::kValueOffset));
4211 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004212}
4213
4214
4215void MacroAssembler::CopyBytes(Register src,
4216 Register dst,
4217 Register length,
4218 Register scratch) {
4219 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
4220
4221 // Align src before copying in word size chunks.
4222 Branch(&byte_loop, le, length, Operand(kPointerSize));
4223 bind(&align_loop_1);
4224 And(scratch, src, kPointerSize - 1);
4225 Branch(&word_loop, eq, scratch, Operand(zero_reg));
4226 lbu(scratch, MemOperand(src));
4227 Daddu(src, src, 1);
4228 sb(scratch, MemOperand(dst));
4229 Daddu(dst, dst, 1);
4230 Dsubu(length, length, Operand(1));
4231 Branch(&align_loop_1, ne, length, Operand(zero_reg));
4232
4233 // Copy bytes in word size chunks.
4234 bind(&word_loop);
4235 if (emit_debug_code()) {
4236 And(scratch, src, kPointerSize - 1);
4237 Assert(eq, kExpectingAlignmentForCopyBytes,
4238 scratch, Operand(zero_reg));
4239 }
4240 Branch(&byte_loop, lt, length, Operand(kPointerSize));
4241 ld(scratch, MemOperand(src));
4242 Daddu(src, src, kPointerSize);
4243
4244 // TODO(kalmard) check if this can be optimized to use sw in most cases.
4245 // Can't use unaligned access - copy byte by byte.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004246 if (kArchEndian == kLittle) {
4247 sb(scratch, MemOperand(dst, 0));
4248 dsrl(scratch, scratch, 8);
4249 sb(scratch, MemOperand(dst, 1));
4250 dsrl(scratch, scratch, 8);
4251 sb(scratch, MemOperand(dst, 2));
4252 dsrl(scratch, scratch, 8);
4253 sb(scratch, MemOperand(dst, 3));
4254 dsrl(scratch, scratch, 8);
4255 sb(scratch, MemOperand(dst, 4));
4256 dsrl(scratch, scratch, 8);
4257 sb(scratch, MemOperand(dst, 5));
4258 dsrl(scratch, scratch, 8);
4259 sb(scratch, MemOperand(dst, 6));
4260 dsrl(scratch, scratch, 8);
4261 sb(scratch, MemOperand(dst, 7));
4262 } else {
4263 sb(scratch, MemOperand(dst, 7));
4264 dsrl(scratch, scratch, 8);
4265 sb(scratch, MemOperand(dst, 6));
4266 dsrl(scratch, scratch, 8);
4267 sb(scratch, MemOperand(dst, 5));
4268 dsrl(scratch, scratch, 8);
4269 sb(scratch, MemOperand(dst, 4));
4270 dsrl(scratch, scratch, 8);
4271 sb(scratch, MemOperand(dst, 3));
4272 dsrl(scratch, scratch, 8);
4273 sb(scratch, MemOperand(dst, 2));
4274 dsrl(scratch, scratch, 8);
4275 sb(scratch, MemOperand(dst, 1));
4276 dsrl(scratch, scratch, 8);
4277 sb(scratch, MemOperand(dst, 0));
4278 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004279 Daddu(dst, dst, 8);
4280
4281 Dsubu(length, length, Operand(kPointerSize));
4282 Branch(&word_loop);
4283
4284 // Copy the last bytes if any left.
4285 bind(&byte_loop);
4286 Branch(&done, eq, length, Operand(zero_reg));
4287 bind(&byte_loop_1);
4288 lbu(scratch, MemOperand(src));
4289 Daddu(src, src, 1);
4290 sb(scratch, MemOperand(dst));
4291 Daddu(dst, dst, 1);
4292 Dsubu(length, length, Operand(1));
4293 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
4294 bind(&done);
4295}
4296
4297
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004298void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4299 Register end_address,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004300 Register filler) {
4301 Label loop, entry;
4302 Branch(&entry);
4303 bind(&loop);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004304 sd(filler, MemOperand(current_address));
4305 Daddu(current_address, current_address, kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004306 bind(&entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004307 Branch(&loop, ult, current_address, Operand(end_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004308}
4309
4310
4311void MacroAssembler::CheckFastElements(Register map,
4312 Register scratch,
4313 Label* fail) {
4314 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4315 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4316 STATIC_ASSERT(FAST_ELEMENTS == 2);
4317 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4318 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4319 Branch(fail, hi, scratch,
4320 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4321}
4322
4323
4324void MacroAssembler::CheckFastObjectElements(Register map,
4325 Register scratch,
4326 Label* fail) {
4327 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4328 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4329 STATIC_ASSERT(FAST_ELEMENTS == 2);
4330 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4331 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4332 Branch(fail, ls, scratch,
4333 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4334 Branch(fail, hi, scratch,
4335 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4336}
4337
4338
4339void MacroAssembler::CheckFastSmiElements(Register map,
4340 Register scratch,
4341 Label* fail) {
4342 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4343 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4344 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4345 Branch(fail, hi, scratch,
4346 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4347}
4348
4349
4350void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
4351 Register key_reg,
4352 Register elements_reg,
4353 Register scratch1,
4354 Register scratch2,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004355 Label* fail,
4356 int elements_offset) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004357 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
4358 Label smi_value, done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004359
4360 // Handle smi values specially.
4361 JumpIfSmi(value_reg, &smi_value);
4362
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004363 // Ensure that the object is a heap number.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004364 CheckMap(value_reg,
4365 scratch1,
4366 Heap::kHeapNumberMapRootIndex,
4367 fail,
4368 DONT_DO_SMI_CHECK);
4369
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004370 // Double value, turn potential sNaN into qNan.
4371 DoubleRegister double_result = f0;
4372 DoubleRegister double_scratch = f2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004373
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004374 ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
4375 Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
4376 FPUCanonicalizeNaN(double_result, double_result);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004377
4378 bind(&smi_value);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004379 // Untag and transfer.
4380 dsrl32(scratch1, value_reg, 0);
4381 mtc1(scratch1, double_scratch);
4382 cvt_d_w(double_result, double_scratch);
4383
4384 bind(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004385 Daddu(scratch1, elements_reg,
4386 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
4387 elements_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004388 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
4389 Daddu(scratch1, scratch1, scratch2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004390 // scratch1 is now effective address of the double element.
4391 sdc1(double_result, MemOperand(scratch1, 0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004392}
4393
4394
4395void MacroAssembler::CompareMapAndBranch(Register obj,
4396 Register scratch,
4397 Handle<Map> map,
4398 Label* early_success,
4399 Condition cond,
4400 Label* branch_to) {
4401 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4402 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4403}
4404
4405
4406void MacroAssembler::CompareMapAndBranch(Register obj_map,
4407 Handle<Map> map,
4408 Label* early_success,
4409 Condition cond,
4410 Label* branch_to) {
4411 Branch(branch_to, cond, obj_map, Operand(map));
4412}
4413
4414
4415void MacroAssembler::CheckMap(Register obj,
4416 Register scratch,
4417 Handle<Map> map,
4418 Label* fail,
4419 SmiCheckType smi_check_type) {
4420 if (smi_check_type == DO_SMI_CHECK) {
4421 JumpIfSmi(obj, fail);
4422 }
4423 Label success;
4424 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4425 bind(&success);
4426}
4427
4428
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004429void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4430 Register scratch2, Handle<WeakCell> cell,
4431 Handle<Code> success,
4432 SmiCheckType smi_check_type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004433 Label fail;
4434 if (smi_check_type == DO_SMI_CHECK) {
4435 JumpIfSmi(obj, &fail);
4436 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004437 ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4438 GetWeakValue(scratch2, cell);
4439 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004440 bind(&fail);
4441}
4442
4443
4444void MacroAssembler::CheckMap(Register obj,
4445 Register scratch,
4446 Heap::RootListIndex index,
4447 Label* fail,
4448 SmiCheckType smi_check_type) {
4449 if (smi_check_type == DO_SMI_CHECK) {
4450 JumpIfSmi(obj, fail);
4451 }
4452 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4453 LoadRoot(at, index);
4454 Branch(fail, ne, scratch, Operand(at));
4455}
4456
4457
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004458void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4459 li(value, Operand(cell));
4460 ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
4461}
4462
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004463void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4464 const DoubleRegister src) {
4465 sub_d(dst, src, kDoubleRegZero);
4466}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004467
4468void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4469 Label* miss) {
4470 GetWeakValue(value, cell);
4471 JumpIfSmi(value, miss);
4472}
4473
4474
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004475void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
4476 if (IsMipsSoftFloatABI) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004477 if (kArchEndian == kLittle) {
4478 Move(dst, v0, v1);
4479 } else {
4480 Move(dst, v1, v0);
4481 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004482 } else {
4483 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4484 }
4485}
4486
4487
4488void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
4489 if (IsMipsSoftFloatABI) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004490 if (kArchEndian == kLittle) {
4491 Move(dst, a0, a1);
4492 } else {
4493 Move(dst, a1, a0);
4494 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004495 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004496 Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004497 }
4498}
4499
4500
4501void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4502 if (!IsMipsSoftFloatABI) {
4503 Move(f12, src);
4504 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004505 if (kArchEndian == kLittle) {
4506 Move(a0, a1, src);
4507 } else {
4508 Move(a1, a0, src);
4509 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004510 }
4511}
4512
4513
4514void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4515 if (!IsMipsSoftFloatABI) {
4516 Move(f0, src);
4517 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004518 if (kArchEndian == kLittle) {
4519 Move(v0, v1, src);
4520 } else {
4521 Move(v1, v0, src);
4522 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004523 }
4524}
4525
4526
4527void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4528 DoubleRegister src2) {
4529 if (!IsMipsSoftFloatABI) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004530 const DoubleRegister fparg2 = f13;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004531 if (src2.is(f12)) {
4532 DCHECK(!src1.is(fparg2));
4533 Move(fparg2, src2);
4534 Move(f12, src1);
4535 } else {
4536 Move(f12, src1);
4537 Move(fparg2, src2);
4538 }
4539 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004540 if (kArchEndian == kLittle) {
4541 Move(a0, a1, src1);
4542 Move(a2, a3, src2);
4543 } else {
4544 Move(a1, a0, src1);
4545 Move(a3, a2, src2);
4546 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004547 }
4548}
4549
4550
4551// -----------------------------------------------------------------------------
4552// JavaScript invokes.
4553
4554void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4555 const ParameterCount& actual,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004556 Label* done,
4557 bool* definitely_mismatches,
4558 InvokeFlag flag,
4559 const CallWrapper& call_wrapper) {
4560 bool definitely_matches = false;
4561 *definitely_mismatches = false;
4562 Label regular_invoke;
4563
4564 // Check whether the expected and actual arguments count match. If not,
4565 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4566 // a0: actual arguments count
4567 // a1: function (passed through to callee)
4568 // a2: expected arguments count
4569
4570 // The code below is made a lot easier because the calling code already sets
4571 // up actual and expected registers according to the contract if values are
4572 // passed in registers.
4573 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4574 DCHECK(expected.is_immediate() || expected.reg().is(a2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004575
4576 if (expected.is_immediate()) {
4577 DCHECK(actual.is_immediate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004578 li(a0, Operand(actual.immediate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004579 if (expected.immediate() == actual.immediate()) {
4580 definitely_matches = true;
4581 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004582 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4583 if (expected.immediate() == sentinel) {
4584 // Don't worry about adapting arguments for builtins that
4585 // don't want that done. Skip adaption code by making it look
4586 // like we have a match between expected and actual number of
4587 // arguments.
4588 definitely_matches = true;
4589 } else {
4590 *definitely_mismatches = true;
4591 li(a2, Operand(expected.immediate()));
4592 }
4593 }
4594 } else if (actual.is_immediate()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004595 li(a0, Operand(actual.immediate()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004596 Branch(&regular_invoke, eq, expected.reg(), Operand(a0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004597 } else {
4598 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
4599 }
4600
4601 if (!definitely_matches) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004602 Handle<Code> adaptor =
4603 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4604 if (flag == CALL_FUNCTION) {
4605 call_wrapper.BeforeCall(CallSize(adaptor));
4606 Call(adaptor);
4607 call_wrapper.AfterCall();
4608 if (!*definitely_mismatches) {
4609 Branch(done);
4610 }
4611 } else {
4612 Jump(adaptor, RelocInfo::CODE_TARGET);
4613 }
4614 bind(&regular_invoke);
4615 }
4616}
4617
4618
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004619void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
4620 const ParameterCount& expected,
4621 const ParameterCount& actual) {
4622 Label skip_flooding;
4623 ExternalReference step_in_enabled =
4624 ExternalReference::debug_step_in_enabled_address(isolate());
4625 li(t0, Operand(step_in_enabled));
4626 lb(t0, MemOperand(t0));
4627 Branch(&skip_flooding, eq, t0, Operand(zero_reg));
4628 {
4629 FrameScope frame(this,
4630 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4631 if (expected.is_reg()) {
4632 SmiTag(expected.reg());
4633 Push(expected.reg());
4634 }
4635 if (actual.is_reg()) {
4636 SmiTag(actual.reg());
4637 Push(actual.reg());
4638 }
4639 if (new_target.is_valid()) {
4640 Push(new_target);
4641 }
4642 Push(fun);
4643 Push(fun);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004644 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004645 Pop(fun);
4646 if (new_target.is_valid()) {
4647 Pop(new_target);
4648 }
4649 if (actual.is_reg()) {
4650 Pop(actual.reg());
4651 SmiUntag(actual.reg());
4652 }
4653 if (expected.is_reg()) {
4654 Pop(expected.reg());
4655 SmiUntag(expected.reg());
4656 }
4657 }
4658 bind(&skip_flooding);
4659}
4660
4661
4662void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4663 const ParameterCount& expected,
4664 const ParameterCount& actual,
4665 InvokeFlag flag,
4666 const CallWrapper& call_wrapper) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004667 // You can't call a function without a valid frame.
4668 DCHECK(flag == JUMP_FUNCTION || has_frame());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004669 DCHECK(function.is(a1));
4670 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
4671
4672 if (call_wrapper.NeedsDebugStepCheck()) {
4673 FloodFunctionIfStepping(function, new_target, expected, actual);
4674 }
4675
4676 // Clear the new.target register if not given.
4677 if (!new_target.is_valid()) {
4678 LoadRoot(a3, Heap::kUndefinedValueRootIndex);
4679 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004680
4681 Label done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004682 bool definitely_mismatches = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004683 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004684 call_wrapper);
4685 if (!definitely_mismatches) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004686 // We call indirectly through the code field in the function to
4687 // allow recompilation to take effect without changing any of the
4688 // call sites.
4689 Register code = t0;
4690 ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004691 if (flag == CALL_FUNCTION) {
4692 call_wrapper.BeforeCall(CallSize(code));
4693 Call(code);
4694 call_wrapper.AfterCall();
4695 } else {
4696 DCHECK(flag == JUMP_FUNCTION);
4697 Jump(code);
4698 }
4699 // Continue here if InvokePrologue does handle the invocation due to
4700 // mismatched parameter counts.
4701 bind(&done);
4702 }
4703}
4704
4705
4706void MacroAssembler::InvokeFunction(Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004707 Register new_target,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004708 const ParameterCount& actual,
4709 InvokeFlag flag,
4710 const CallWrapper& call_wrapper) {
4711 // You can't call a function without a valid frame.
4712 DCHECK(flag == JUMP_FUNCTION || has_frame());
4713
4714 // Contract with called JS functions requires that function is passed in a1.
4715 DCHECK(function.is(a1));
4716 Register expected_reg = a2;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004717 Register temp_reg = t0;
4718 ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004719 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4720 // The argument count is stored as int32_t on 64-bit platforms.
4721 // TODO(plind): Smi on 32-bit platforms.
4722 lw(expected_reg,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004723 FieldMemOperand(temp_reg,
4724 SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004725 ParameterCount expected(expected_reg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004726 InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004727}
4728
4729
4730void MacroAssembler::InvokeFunction(Register function,
4731 const ParameterCount& expected,
4732 const ParameterCount& actual,
4733 InvokeFlag flag,
4734 const CallWrapper& call_wrapper) {
4735 // You can't call a function without a valid frame.
4736 DCHECK(flag == JUMP_FUNCTION || has_frame());
4737
4738 // Contract with called JS functions requires that function is passed in a1.
4739 DCHECK(function.is(a1));
4740
4741 // Get the function and setup the context.
4742 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4743
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004744 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004745}
4746
4747
4748void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4749 const ParameterCount& expected,
4750 const ParameterCount& actual,
4751 InvokeFlag flag,
4752 const CallWrapper& call_wrapper) {
4753 li(a1, function);
4754 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4755}
4756
4757
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004758void MacroAssembler::IsObjectJSStringType(Register object,
4759 Register scratch,
4760 Label* fail) {
4761 DCHECK(kNotStringTag != 0);
4762
4763 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4764 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4765 And(scratch, scratch, Operand(kIsNotStringMask));
4766 Branch(fail, ne, scratch, Operand(zero_reg));
4767}
4768
4769
4770void MacroAssembler::IsObjectNameType(Register object,
4771 Register scratch,
4772 Label* fail) {
4773 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4774 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4775 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4776}
4777
4778
4779// ---------------------------------------------------------------------------
4780// Support functions.
4781
4782
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004783void MacroAssembler::GetMapConstructor(Register result, Register map,
4784 Register temp, Register temp2) {
4785 Label done, loop;
4786 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4787 bind(&loop);
4788 JumpIfSmi(result, &done);
4789 GetObjectType(result, temp, temp2);
4790 Branch(&done, ne, temp2, Operand(MAP_TYPE));
4791 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4792 Branch(&loop);
4793 bind(&done);
4794}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004795
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004796
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004797void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
4798 Register scratch, Label* miss) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004799 // Get the prototype or initial map from the function.
4800 ld(result,
4801 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4802
4803 // If the prototype or initial map is the hole, don't return it and
4804 // simply miss the cache instead. This will allow us to allocate a
4805 // prototype object on-demand in the runtime system.
4806 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4807 Branch(miss, eq, result, Operand(t8));
4808
4809 // If the function does not have an initial map, we're done.
4810 Label done;
4811 GetObjectType(result, scratch, scratch);
4812 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4813
4814 // Get the prototype from the initial map.
4815 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
4816
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004817 // All done.
4818 bind(&done);
4819}
4820
4821
4822void MacroAssembler::GetObjectType(Register object,
4823 Register map,
4824 Register type_reg) {
4825 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
4826 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4827}
4828
4829
4830// -----------------------------------------------------------------------------
4831// Runtime calls.
4832
4833void MacroAssembler::CallStub(CodeStub* stub,
4834 TypeFeedbackId ast_id,
4835 Condition cond,
4836 Register r1,
4837 const Operand& r2,
4838 BranchDelaySlot bd) {
4839 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4840 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4841 cond, r1, r2, bd);
4842}
4843
4844
4845void MacroAssembler::TailCallStub(CodeStub* stub,
4846 Condition cond,
4847 Register r1,
4848 const Operand& r2,
4849 BranchDelaySlot bd) {
4850 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4851}
4852
4853
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004854bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4855 return has_frame_ || !stub->SometimesSetsUpAFrame();
4856}
4857
4858
4859void MacroAssembler::IndexFromHash(Register hash, Register index) {
4860 // If the hash field contains an array index pick it out. The assert checks
4861 // that the constants for the maximum number of digits for an array index
4862 // cached in the hash field and the number of bits reserved for it does not
4863 // conflict.
4864 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4865 (1 << String::kArrayIndexValueBits));
4866 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4867}
4868
4869
4870void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4871 FPURegister result,
4872 Register scratch1,
4873 Register scratch2,
4874 Register heap_number_map,
4875 Label* not_number,
4876 ObjectToDoubleFlags flags) {
4877 Label done;
4878 if ((flags & OBJECT_NOT_SMI) == 0) {
4879 Label not_smi;
4880 JumpIfNotSmi(object, &not_smi);
4881 // Remove smi tag and convert to double.
4882 // dsra(scratch1, object, kSmiTagSize);
4883 dsra32(scratch1, object, 0);
4884 mtc1(scratch1, result);
4885 cvt_d_w(result, result);
4886 Branch(&done);
4887 bind(&not_smi);
4888 }
4889 // Check for heap number and load double value from it.
4890 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4891 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4892
4893 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4894 // If exponent is all ones the number is either a NaN or +/-Infinity.
4895 Register exponent = scratch1;
4896 Register mask_reg = scratch2;
4897 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4898 li(mask_reg, HeapNumber::kExponentMask);
4899
4900 And(exponent, exponent, mask_reg);
4901 Branch(not_number, eq, exponent, Operand(mask_reg));
4902 }
4903 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4904 bind(&done);
4905}
4906
4907
4908void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4909 FPURegister value,
4910 Register scratch1) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004911 dsra32(scratch1, smi, 0);
4912 mtc1(scratch1, value);
4913 cvt_d_w(value, value);
4914}
4915
4916
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004917void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4918 const Operand& right,
4919 Register overflow_dst,
4920 Register scratch) {
4921 if (right.is_reg()) {
4922 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4923 } else {
4924 if (dst.is(left)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004925 li(t9, right); // Load right.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004926 mov(scratch, left); // Preserve left.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004927 addu(dst, left, t9); // Left is overwritten.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004928 xor_(scratch, dst, scratch); // Original left.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004929 xor_(overflow_dst, dst, t9);
4930 and_(overflow_dst, overflow_dst, scratch);
4931 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004932 li(t9, right);
4933 addu(dst, left, t9);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004934 xor_(overflow_dst, dst, left);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004935 xor_(scratch, dst, t9);
4936 and_(overflow_dst, scratch, overflow_dst);
4937 }
4938 }
4939}
4940
4941
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004942void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004943 Register right,
4944 Register overflow_dst,
4945 Register scratch) {
4946 DCHECK(!dst.is(overflow_dst));
4947 DCHECK(!dst.is(scratch));
4948 DCHECK(!overflow_dst.is(scratch));
4949 DCHECK(!overflow_dst.is(left));
4950 DCHECK(!overflow_dst.is(right));
4951
4952 if (left.is(right) && dst.is(left)) {
4953 DCHECK(!dst.is(t9));
4954 DCHECK(!scratch.is(t9));
4955 DCHECK(!left.is(t9));
4956 DCHECK(!right.is(t9));
4957 DCHECK(!overflow_dst.is(t9));
4958 mov(t9, right);
4959 right = t9;
4960 }
4961
4962 if (dst.is(left)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004963 mov(scratch, left); // Preserve left.
4964 addu(dst, left, right); // Left is overwritten.
4965 xor_(scratch, dst, scratch); // Original left.
4966 xor_(overflow_dst, dst, right);
4967 and_(overflow_dst, overflow_dst, scratch);
4968 } else if (dst.is(right)) {
4969 mov(scratch, right); // Preserve right.
4970 addu(dst, left, right); // Right is overwritten.
4971 xor_(scratch, dst, scratch); // Original right.
4972 xor_(overflow_dst, dst, left);
4973 and_(overflow_dst, overflow_dst, scratch);
4974 } else {
4975 addu(dst, left, right);
4976 xor_(overflow_dst, dst, left);
4977 xor_(scratch, dst, right);
4978 and_(overflow_dst, scratch, overflow_dst);
4979 }
4980}
4981
4982
4983void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
4984 const Operand& right,
4985 Register overflow_dst,
4986 Register scratch) {
4987 if (right.is_reg()) {
4988 DadduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4989 } else {
4990 if (dst.is(left)) {
4991 li(t9, right); // Load right.
4992 mov(scratch, left); // Preserve left.
4993 daddu(dst, left, t9); // Left is overwritten.
4994 xor_(scratch, dst, scratch); // Original left.
4995 xor_(overflow_dst, dst, t9);
4996 and_(overflow_dst, overflow_dst, scratch);
4997 } else {
4998 li(t9, right); // Load right.
4999 Daddu(dst, left, t9);
5000 xor_(overflow_dst, dst, left);
5001 xor_(scratch, dst, t9);
5002 and_(overflow_dst, scratch, overflow_dst);
5003 }
5004 }
5005}
5006
5007
5008void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
5009 Register right,
5010 Register overflow_dst,
5011 Register scratch) {
5012 DCHECK(!dst.is(overflow_dst));
5013 DCHECK(!dst.is(scratch));
5014 DCHECK(!overflow_dst.is(scratch));
5015 DCHECK(!overflow_dst.is(left));
5016 DCHECK(!overflow_dst.is(right));
5017
5018 if (left.is(right) && dst.is(left)) {
5019 DCHECK(!dst.is(t9));
5020 DCHECK(!scratch.is(t9));
5021 DCHECK(!left.is(t9));
5022 DCHECK(!right.is(t9));
5023 DCHECK(!overflow_dst.is(t9));
5024 mov(t9, right);
5025 right = t9;
5026 }
5027
5028 if (dst.is(left)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005029 mov(scratch, left); // Preserve left.
5030 daddu(dst, left, right); // Left is overwritten.
5031 xor_(scratch, dst, scratch); // Original left.
5032 xor_(overflow_dst, dst, right);
5033 and_(overflow_dst, overflow_dst, scratch);
5034 } else if (dst.is(right)) {
5035 mov(scratch, right); // Preserve right.
5036 daddu(dst, left, right); // Right is overwritten.
5037 xor_(scratch, dst, scratch); // Original right.
5038 xor_(overflow_dst, dst, left);
5039 and_(overflow_dst, overflow_dst, scratch);
5040 } else {
5041 daddu(dst, left, right);
5042 xor_(overflow_dst, dst, left);
5043 xor_(scratch, dst, right);
5044 and_(overflow_dst, scratch, overflow_dst);
5045 }
5046}
5047
5048
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005049static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
5050 Label* overflow_label,
5051 Label* no_overflow_label) {
5052 DCHECK(overflow_label || no_overflow_label);
5053 if (!overflow_label) {
5054 DCHECK(no_overflow_label);
5055 masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
5056 } else {
5057 masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
5058 if (no_overflow_label) masm->Branch(no_overflow_label);
5059 }
5060}
5061
5062
5063void MacroAssembler::DaddBranchOvf(Register dst, Register left,
5064 const Operand& right, Label* overflow_label,
5065 Label* no_overflow_label, Register scratch) {
5066 if (right.is_reg()) {
5067 DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5068 scratch);
5069 } else {
5070 Register overflow_dst = t9;
5071 DCHECK(!dst.is(scratch));
5072 DCHECK(!dst.is(overflow_dst));
5073 DCHECK(!scratch.is(overflow_dst));
5074 DCHECK(!left.is(overflow_dst));
5075 li(overflow_dst, right); // Load right.
5076 if (dst.is(left)) {
5077 mov(scratch, left); // Preserve left.
5078 Daddu(dst, left, overflow_dst); // Left is overwritten.
5079 xor_(scratch, dst, scratch); // Original left.
5080 xor_(overflow_dst, dst, overflow_dst);
5081 and_(overflow_dst, overflow_dst, scratch);
5082 } else {
5083 Daddu(dst, left, overflow_dst);
5084 xor_(scratch, dst, overflow_dst);
5085 xor_(overflow_dst, dst, left);
5086 and_(overflow_dst, scratch, overflow_dst);
5087 }
5088 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5089 }
5090}
5091
5092
5093void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
5094 Label* overflow_label,
5095 Label* no_overflow_label, Register scratch) {
5096 Register overflow_dst = t9;
5097 DCHECK(!dst.is(scratch));
5098 DCHECK(!dst.is(overflow_dst));
5099 DCHECK(!scratch.is(overflow_dst));
5100 DCHECK(!left.is(overflow_dst));
5101 DCHECK(!right.is(overflow_dst));
5102 DCHECK(!left.is(scratch));
5103 DCHECK(!right.is(scratch));
5104
5105 if (left.is(right) && dst.is(left)) {
5106 mov(overflow_dst, right);
5107 right = overflow_dst;
5108 }
5109
5110 if (dst.is(left)) {
5111 mov(scratch, left); // Preserve left.
5112 daddu(dst, left, right); // Left is overwritten.
5113 xor_(scratch, dst, scratch); // Original left.
5114 xor_(overflow_dst, dst, right);
5115 and_(overflow_dst, overflow_dst, scratch);
5116 } else if (dst.is(right)) {
5117 mov(scratch, right); // Preserve right.
5118 daddu(dst, left, right); // Right is overwritten.
5119 xor_(scratch, dst, scratch); // Original right.
5120 xor_(overflow_dst, dst, left);
5121 and_(overflow_dst, overflow_dst, scratch);
5122 } else {
5123 daddu(dst, left, right);
5124 xor_(overflow_dst, dst, left);
5125 xor_(scratch, dst, right);
5126 and_(overflow_dst, scratch, overflow_dst);
5127 }
5128 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5129}
5130
5131
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005132void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
5133 const Operand& right,
5134 Register overflow_dst,
5135 Register scratch) {
5136 if (right.is_reg()) {
5137 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
5138 } else {
5139 if (dst.is(left)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005140 li(t9, right); // Load right.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005141 mov(scratch, left); // Preserve left.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005142 Subu(dst, left, t9); // Left is overwritten.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005143 xor_(overflow_dst, dst, scratch); // scratch is original left.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005144 xor_(scratch, scratch, t9); // scratch is original left.
5145 and_(overflow_dst, scratch, overflow_dst);
5146 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005147 li(t9, right);
5148 subu(dst, left, t9);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005149 xor_(overflow_dst, dst, left);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005150 xor_(scratch, left, t9);
5151 and_(overflow_dst, scratch, overflow_dst);
5152 }
5153 }
5154}
5155
5156
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005157void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005158 Register right,
5159 Register overflow_dst,
5160 Register scratch) {
5161 DCHECK(!dst.is(overflow_dst));
5162 DCHECK(!dst.is(scratch));
5163 DCHECK(!overflow_dst.is(scratch));
5164 DCHECK(!overflow_dst.is(left));
5165 DCHECK(!overflow_dst.is(right));
5166 DCHECK(!scratch.is(left));
5167 DCHECK(!scratch.is(right));
5168
5169 // This happens with some crankshaft code. Since Subu works fine if
5170 // left == right, let's not make that restriction here.
5171 if (left.is(right)) {
5172 mov(dst, zero_reg);
5173 mov(overflow_dst, zero_reg);
5174 return;
5175 }
5176
5177 if (dst.is(left)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005178 mov(scratch, left); // Preserve left.
5179 subu(dst, left, right); // Left is overwritten.
5180 xor_(overflow_dst, dst, scratch); // scratch is original left.
5181 xor_(scratch, scratch, right); // scratch is original left.
5182 and_(overflow_dst, scratch, overflow_dst);
5183 } else if (dst.is(right)) {
5184 mov(scratch, right); // Preserve right.
5185 subu(dst, left, right); // Right is overwritten.
5186 xor_(overflow_dst, dst, left);
5187 xor_(scratch, left, scratch); // Original right.
5188 and_(overflow_dst, scratch, overflow_dst);
5189 } else {
5190 subu(dst, left, right);
5191 xor_(overflow_dst, dst, left);
5192 xor_(scratch, left, right);
5193 and_(overflow_dst, scratch, overflow_dst);
5194 }
5195}
5196
5197
5198void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
5199 const Operand& right,
5200 Register overflow_dst,
5201 Register scratch) {
5202 if (right.is_reg()) {
5203 DsubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
5204 } else {
5205 if (dst.is(left)) {
5206 li(t9, right); // Load right.
5207 mov(scratch, left); // Preserve left.
5208 dsubu(dst, left, t9); // Left is overwritten.
5209 xor_(overflow_dst, dst, scratch); // scratch is original left.
5210 xor_(scratch, scratch, t9); // scratch is original left.
5211 and_(overflow_dst, scratch, overflow_dst);
5212 } else {
5213 li(t9, right);
5214 dsubu(dst, left, t9);
5215 xor_(overflow_dst, dst, left);
5216 xor_(scratch, left, t9);
5217 and_(overflow_dst, scratch, overflow_dst);
5218 }
5219 }
5220}
5221
5222
5223void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
5224 Register right,
5225 Register overflow_dst,
5226 Register scratch) {
5227 DCHECK(!dst.is(overflow_dst));
5228 DCHECK(!dst.is(scratch));
5229 DCHECK(!overflow_dst.is(scratch));
5230 DCHECK(!overflow_dst.is(left));
5231 DCHECK(!overflow_dst.is(right));
5232 DCHECK(!scratch.is(left));
5233 DCHECK(!scratch.is(right));
5234
5235 // This happens with some crankshaft code. Since Subu works fine if
5236 // left == right, let's not make that restriction here.
5237 if (left.is(right)) {
5238 mov(dst, zero_reg);
5239 mov(overflow_dst, zero_reg);
5240 return;
5241 }
5242
5243 if (dst.is(left)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005244 mov(scratch, left); // Preserve left.
5245 dsubu(dst, left, right); // Left is overwritten.
5246 xor_(overflow_dst, dst, scratch); // scratch is original left.
5247 xor_(scratch, scratch, right); // scratch is original left.
5248 and_(overflow_dst, scratch, overflow_dst);
5249 } else if (dst.is(right)) {
5250 mov(scratch, right); // Preserve right.
5251 dsubu(dst, left, right); // Right is overwritten.
5252 xor_(overflow_dst, dst, left);
5253 xor_(scratch, left, scratch); // Original right.
5254 and_(overflow_dst, scratch, overflow_dst);
5255 } else {
5256 dsubu(dst, left, right);
5257 xor_(overflow_dst, dst, left);
5258 xor_(scratch, left, right);
5259 and_(overflow_dst, scratch, overflow_dst);
5260 }
5261}
5262
5263
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005264void MacroAssembler::DsubBranchOvf(Register dst, Register left,
5265 const Operand& right, Label* overflow_label,
5266 Label* no_overflow_label, Register scratch) {
5267 DCHECK(overflow_label || no_overflow_label);
5268 if (right.is_reg()) {
5269 DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5270 scratch);
5271 } else {
5272 Register overflow_dst = t9;
5273 DCHECK(!dst.is(scratch));
5274 DCHECK(!dst.is(overflow_dst));
5275 DCHECK(!scratch.is(overflow_dst));
5276 DCHECK(!left.is(overflow_dst));
5277 DCHECK(!left.is(scratch));
5278 li(overflow_dst, right); // Load right.
5279 if (dst.is(left)) {
5280 mov(scratch, left); // Preserve left.
5281 Dsubu(dst, left, overflow_dst); // Left is overwritten.
5282 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5283 xor_(scratch, dst, scratch); // scratch is original left.
5284 and_(overflow_dst, scratch, overflow_dst);
5285 } else {
5286 Dsubu(dst, left, overflow_dst);
5287 xor_(scratch, left, overflow_dst);
5288 xor_(overflow_dst, dst, left);
5289 and_(overflow_dst, scratch, overflow_dst);
5290 }
5291 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5292 }
5293}
5294
5295
5296void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
5297 Label* overflow_label,
5298 Label* no_overflow_label, Register scratch) {
5299 DCHECK(overflow_label || no_overflow_label);
5300 Register overflow_dst = t9;
5301 DCHECK(!dst.is(scratch));
5302 DCHECK(!dst.is(overflow_dst));
5303 DCHECK(!scratch.is(overflow_dst));
5304 DCHECK(!overflow_dst.is(left));
5305 DCHECK(!overflow_dst.is(right));
5306 DCHECK(!scratch.is(left));
5307 DCHECK(!scratch.is(right));
5308
5309 // This happens with some crankshaft code. Since Subu works fine if
5310 // left == right, let's not make that restriction here.
5311 if (left.is(right)) {
5312 mov(dst, zero_reg);
5313 if (no_overflow_label) {
5314 Branch(no_overflow_label);
5315 }
5316 }
5317
5318 if (dst.is(left)) {
5319 mov(scratch, left); // Preserve left.
5320 dsubu(dst, left, right); // Left is overwritten.
5321 xor_(overflow_dst, dst, scratch); // scratch is original left.
5322 xor_(scratch, scratch, right); // scratch is original left.
5323 and_(overflow_dst, scratch, overflow_dst);
5324 } else if (dst.is(right)) {
5325 mov(scratch, right); // Preserve right.
5326 dsubu(dst, left, right); // Right is overwritten.
5327 xor_(overflow_dst, dst, left);
5328 xor_(scratch, left, scratch); // Original right.
5329 and_(overflow_dst, scratch, overflow_dst);
5330 } else {
5331 dsubu(dst, left, right);
5332 xor_(overflow_dst, dst, left);
5333 xor_(scratch, left, right);
5334 and_(overflow_dst, scratch, overflow_dst);
5335 }
5336 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5337}
5338
5339
5340void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5341 SaveFPRegsMode save_doubles,
5342 BranchDelaySlot bd) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005343 // All parameters are on the stack. v0 has the return value after call.
5344
5345 // If the expected number of arguments of the runtime function is
5346 // constant, we check that the actual number of arguments match the
5347 // expectation.
5348 CHECK(f->nargs < 0 || f->nargs == num_arguments);
5349
5350 // TODO(1236192): Most runtime routines don't need the number of
5351 // arguments passed in because it is constant. At some point we
5352 // should remove this need and make the runtime routine entry code
5353 // smarter.
5354 PrepareCEntryArgs(num_arguments);
5355 PrepareCEntryFunction(ExternalReference(f, isolate()));
5356 CEntryStub stub(isolate(), 1, save_doubles);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005357 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005358}
5359
5360
5361void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5362 int num_arguments,
5363 BranchDelaySlot bd) {
5364 PrepareCEntryArgs(num_arguments);
5365 PrepareCEntryFunction(ext);
5366
5367 CEntryStub stub(isolate(), 1);
5368 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5369}
5370
5371
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005372void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5373 const Runtime::Function* function = Runtime::FunctionForId(fid);
5374 DCHECK_EQ(1, function->result_size);
5375 if (function->nargs >= 0) {
5376 PrepareCEntryArgs(function->nargs);
5377 }
5378 JumpToExternalReference(ExternalReference(fid, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005379}
5380
5381
5382void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5383 BranchDelaySlot bd) {
5384 PrepareCEntryFunction(builtin);
5385 CEntryStub stub(isolate(), 1);
5386 Jump(stub.GetCode(),
5387 RelocInfo::CODE_TARGET,
5388 al,
5389 zero_reg,
5390 Operand(zero_reg),
5391 bd);
5392}
5393
5394
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005395void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5396 Register scratch1, Register scratch2) {
5397 if (FLAG_native_code_counters && counter->Enabled()) {
5398 li(scratch1, Operand(value));
5399 li(scratch2, Operand(ExternalReference(counter)));
5400 sd(scratch1, MemOperand(scratch2));
5401 }
5402}
5403
5404
5405void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5406 Register scratch1, Register scratch2) {
5407 DCHECK(value > 0);
5408 if (FLAG_native_code_counters && counter->Enabled()) {
5409 li(scratch2, Operand(ExternalReference(counter)));
5410 ld(scratch1, MemOperand(scratch2));
5411 Daddu(scratch1, scratch1, Operand(value));
5412 sd(scratch1, MemOperand(scratch2));
5413 }
5414}
5415
5416
5417void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5418 Register scratch1, Register scratch2) {
5419 DCHECK(value > 0);
5420 if (FLAG_native_code_counters && counter->Enabled()) {
5421 li(scratch2, Operand(ExternalReference(counter)));
5422 ld(scratch1, MemOperand(scratch2));
5423 Dsubu(scratch1, scratch1, Operand(value));
5424 sd(scratch1, MemOperand(scratch2));
5425 }
5426}
5427
5428
5429// -----------------------------------------------------------------------------
5430// Debugging.
5431
5432void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5433 Register rs, Operand rt) {
5434 if (emit_debug_code())
5435 Check(cc, reason, rs, rt);
5436}
5437
5438
5439void MacroAssembler::AssertFastElements(Register elements) {
5440 if (emit_debug_code()) {
5441 DCHECK(!elements.is(at));
5442 Label ok;
5443 push(elements);
5444 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5445 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5446 Branch(&ok, eq, elements, Operand(at));
5447 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5448 Branch(&ok, eq, elements, Operand(at));
5449 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5450 Branch(&ok, eq, elements, Operand(at));
5451 Abort(kJSObjectWithFastElementsMapHasSlowElements);
5452 bind(&ok);
5453 pop(elements);
5454 }
5455}
5456
5457
5458void MacroAssembler::Check(Condition cc, BailoutReason reason,
5459 Register rs, Operand rt) {
5460 Label L;
5461 Branch(&L, cc, rs, rt);
5462 Abort(reason);
5463 // Will not return here.
5464 bind(&L);
5465}
5466
5467
5468void MacroAssembler::Abort(BailoutReason reason) {
5469 Label abort_start;
5470 bind(&abort_start);
5471#ifdef DEBUG
5472 const char* msg = GetBailoutReason(reason);
5473 if (msg != NULL) {
5474 RecordComment("Abort message: ");
5475 RecordComment(msg);
5476 }
5477
5478 if (FLAG_trap_on_abort) {
5479 stop(msg);
5480 return;
5481 }
5482#endif
5483
5484 li(a0, Operand(Smi::FromInt(reason)));
5485 push(a0);
5486 // Disable stub call restrictions to always allow calls to abort.
5487 if (!has_frame_) {
5488 // We don't actually want to generate a pile of code for this, so just
5489 // claim there is a stack frame, without generating one.
5490 FrameScope scope(this, StackFrame::NONE);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005491 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005492 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005493 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005494 }
5495 // Will not return here.
5496 if (is_trampoline_pool_blocked()) {
5497 // If the calling code cares about the exact number of
5498 // instructions generated, we insert padding here to keep the size
5499 // of the Abort macro constant.
5500 // Currently in debug mode with debug_code enabled the number of
5501 // generated instructions is 10, so we use this as a maximum value.
5502 static const int kExpectedAbortInstructions = 10;
5503 int abort_instructions = InstructionsGeneratedSince(&abort_start);
5504 DCHECK(abort_instructions <= kExpectedAbortInstructions);
5505 while (abort_instructions++ < kExpectedAbortInstructions) {
5506 nop();
5507 }
5508 }
5509}
5510
5511
5512void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5513 if (context_chain_length > 0) {
5514 // Move up the chain of contexts to the context containing the slot.
5515 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5516 for (int i = 1; i < context_chain_length; i++) {
5517 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5518 }
5519 } else {
5520 // Slot is in the current function context. Move it into the
5521 // destination register in case we store into it (the write barrier
5522 // cannot be allowed to destroy the context in esi).
5523 Move(dst, cp);
5524 }
5525}
5526
5527
5528void MacroAssembler::LoadTransitionedArrayMapConditional(
5529 ElementsKind expected_kind,
5530 ElementsKind transitioned_kind,
5531 Register map_in_out,
5532 Register scratch,
5533 Label* no_map_match) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005534 DCHECK(IsFastElementsKind(expected_kind));
5535 DCHECK(IsFastElementsKind(transitioned_kind));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005536
5537 // Check that the function's map is the same as the expected cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005538 ld(scratch, NativeContextMemOperand());
5539 ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005540 Branch(no_map_match, ne, map_in_out, Operand(at));
5541
5542 // Use the transitioned cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005543 ld(map_in_out,
5544 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005545}
5546
5547
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005548void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5549 ld(dst, NativeContextMemOperand());
5550 ld(dst, ContextMemOperand(dst, index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005551}
5552
5553
5554void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5555 Register map,
5556 Register scratch) {
5557 // Load the initial map. The global functions all have initial maps.
5558 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5559 if (emit_debug_code()) {
5560 Label ok, fail;
5561 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
5562 Branch(&ok);
5563 bind(&fail);
5564 Abort(kGlobalFunctionsMustHaveInitialMap);
5565 bind(&ok);
5566 }
5567}
5568
5569
5570void MacroAssembler::StubPrologue() {
5571 Push(ra, fp, cp);
5572 Push(Smi::FromInt(StackFrame::STUB));
5573 // Adjust FP to point to saved FP.
5574 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
5575}
5576
5577
5578void MacroAssembler::Prologue(bool code_pre_aging) {
5579 PredictableCodeSizeScope predictible_code_size_scope(
5580 this, kNoCodeAgeSequenceLength);
5581 // The following three instructions must remain together and unmodified
5582 // for code aging to work properly.
5583 if (code_pre_aging) {
5584 // Pre-age the code.
5585 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
5586 nop(Assembler::CODE_AGE_MARKER_NOP);
5587 // Load the stub address to t9 and call it,
5588 // GetCodeAgeAndParity() extracts the stub address from this instruction.
5589 li(t9,
5590 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
5591 ADDRESS_LOAD);
5592 nop(); // Prevent jalr to jal optimization.
5593 jalr(t9, a0);
5594 nop(); // Branch delay slot nop.
5595 nop(); // Pad the empty space.
5596 } else {
5597 Push(ra, fp, cp, a1);
5598 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5599 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5600 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5601 // Adjust fp to point to caller's fp.
5602 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
5603 }
5604}
5605
5606
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005607void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
5608 ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5609 ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
5610 ld(vector,
5611 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
5612}
5613
5614
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005615void MacroAssembler::EnterFrame(StackFrame::Type type,
5616 bool load_constant_pool_pointer_reg) {
5617 // Out-of-line constant pool not implemented on mips64.
5618 UNREACHABLE();
5619}
5620
5621
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005622void MacroAssembler::EnterFrame(StackFrame::Type type) {
5623 daddiu(sp, sp, -5 * kPointerSize);
5624 li(t8, Operand(Smi::FromInt(type)));
5625 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
5626 sd(ra, MemOperand(sp, 4 * kPointerSize));
5627 sd(fp, MemOperand(sp, 3 * kPointerSize));
5628 sd(cp, MemOperand(sp, 2 * kPointerSize));
5629 sd(t8, MemOperand(sp, 1 * kPointerSize));
5630 sd(t9, MemOperand(sp, 0 * kPointerSize));
5631 // Adjust FP to point to saved FP.
5632 Daddu(fp, sp,
5633 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
5634}
5635
5636
5637void MacroAssembler::LeaveFrame(StackFrame::Type type) {
5638 mov(sp, fp);
5639 ld(fp, MemOperand(sp, 0 * kPointerSize));
5640 ld(ra, MemOperand(sp, 1 * kPointerSize));
5641 daddiu(sp, sp, 2 * kPointerSize);
5642}
5643
5644
5645void MacroAssembler::EnterExitFrame(bool save_doubles,
5646 int stack_space) {
5647 // Set up the frame structure on the stack.
5648 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5649 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5650 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5651
5652 // This is how the stack will look:
5653 // fp + 2 (==kCallerSPDisplacement) - old stack's end
5654 // [fp + 1 (==kCallerPCOffset)] - saved old ra
5655 // [fp + 0 (==kCallerFPOffset)] - saved old fp
5656 // [fp - 1 (==kSPOffset)] - sp of the called function
5657 // [fp - 2 (==kCodeOffset)] - CodeObject
5658 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5659 // new stack (will contain saved ra)
5660
5661 // Save registers.
5662 daddiu(sp, sp, -4 * kPointerSize);
5663 sd(ra, MemOperand(sp, 3 * kPointerSize));
5664 sd(fp, MemOperand(sp, 2 * kPointerSize));
5665 daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
5666
5667 if (emit_debug_code()) {
5668 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5669 }
5670
5671 // Accessed from ExitFrame::code_slot.
5672 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
5673 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5674
5675 // Save the frame pointer and the context in top.
5676 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5677 sd(fp, MemOperand(t8));
5678 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5679 sd(cp, MemOperand(t8));
5680
5681 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5682 if (save_doubles) {
5683 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
5684 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005685 int space = kNumOfSavedRegisters * kDoubleSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005686 Dsubu(sp, sp, Operand(space));
5687 // Remember: we only need to save every 2nd double FPU value.
5688 for (int i = 0; i < kNumOfSavedRegisters; i++) {
5689 FPURegister reg = FPURegister::from_code(2 * i);
5690 sdc1(reg, MemOperand(sp, i * kDoubleSize));
5691 }
5692 }
5693
5694 // Reserve place for the return address, stack space and an optional slot
5695 // (used by the DirectCEntryStub to hold the return value if a struct is
5696 // returned) and align the frame preparing for calling the runtime function.
5697 DCHECK(stack_space >= 0);
5698 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5699 if (frame_alignment > 0) {
5700 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5701 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5702 }
5703
5704 // Set the exit frame sp value to point just before the return address
5705 // location.
5706 daddiu(at, sp, kPointerSize);
5707 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5708}
5709
5710
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005711void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
5712 bool restore_context, bool do_return,
5713 bool argument_count_is_length) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005714 // Optionally restore all double registers.
5715 if (save_doubles) {
5716 // Remember: we only need to restore every 2nd double FPU value.
5717 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
5718 Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
5719 kNumOfSavedRegisters * kDoubleSize));
5720 for (int i = 0; i < kNumOfSavedRegisters; i++) {
5721 FPURegister reg = FPURegister::from_code(2 * i);
5722 ldc1(reg, MemOperand(t8, i * kDoubleSize));
5723 }
5724 }
5725
5726 // Clear top frame.
5727 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5728 sd(zero_reg, MemOperand(t8));
5729
5730 // Restore current context from top and clear it in debug mode.
5731 if (restore_context) {
5732 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5733 ld(cp, MemOperand(t8));
5734 }
5735#ifdef DEBUG
5736 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5737 sd(a3, MemOperand(t8));
5738#endif
5739
5740 // Pop the arguments, restore registers, and return.
5741 mov(sp, fp); // Respect ABI stack constraint.
5742 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5743 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5744
5745 if (argument_count.is_valid()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005746 if (argument_count_is_length) {
5747 daddu(sp, sp, argument_count);
5748 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005749 Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005750 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005751 }
5752
5753 if (do_return) {
5754 Ret(USE_DELAY_SLOT);
5755 // If returning, the instruction in the delay slot will be the addiu below.
5756 }
5757 daddiu(sp, sp, 2 * kPointerSize);
5758}
5759
5760
5761void MacroAssembler::InitializeNewString(Register string,
5762 Register length,
5763 Heap::RootListIndex map_index,
5764 Register scratch1,
5765 Register scratch2) {
5766 // dsll(scratch1, length, kSmiTagSize);
5767 dsll32(scratch1, length, 0);
5768 LoadRoot(scratch2, map_index);
5769 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
5770 li(scratch1, Operand(String::kEmptyHashField));
5771 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005772 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005773}
5774
5775
5776int MacroAssembler::ActivationFrameAlignment() {
5777#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5778 // Running on the real platform. Use the alignment as mandated by the local
5779 // environment.
5780 // Note: This will break if we ever start generating snapshots on one Mips
5781 // platform for another Mips platform with a different alignment.
5782 return base::OS::ActivationFrameAlignment();
5783#else // V8_HOST_ARCH_MIPS
5784 // If we are using the simulator then we should always align to the expected
5785 // alignment. As the simulator is used to generate snapshots we do not know
5786 // if the target platform will need alignment, so this is controlled from a
5787 // flag.
5788 return FLAG_sim_stack_alignment;
5789#endif // V8_HOST_ARCH_MIPS
5790}
5791
5792
5793void MacroAssembler::AssertStackIsAligned() {
5794 if (emit_debug_code()) {
5795 const int frame_alignment = ActivationFrameAlignment();
5796 const int frame_alignment_mask = frame_alignment - 1;
5797
5798 if (frame_alignment > kPointerSize) {
5799 Label alignment_as_expected;
5800 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5801 andi(at, sp, frame_alignment_mask);
5802 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5803 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5804 stop("Unexpected stack alignment");
5805 bind(&alignment_as_expected);
5806 }
5807 }
5808}
5809
5810
5811void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5812 Register reg,
5813 Register scratch,
5814 Label* not_power_of_two_or_zero) {
5815 Dsubu(scratch, reg, Operand(1));
5816 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5817 scratch, Operand(zero_reg));
5818 and_(at, scratch, reg); // In the delay slot.
5819 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5820}
5821
5822
5823void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5824 DCHECK(!reg.is(overflow));
5825 mov(overflow, reg); // Save original value.
5826 SmiTag(reg);
5827 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5828}
5829
5830
5831void MacroAssembler::SmiTagCheckOverflow(Register dst,
5832 Register src,
5833 Register overflow) {
5834 if (dst.is(src)) {
5835 // Fall back to slower case.
5836 SmiTagCheckOverflow(dst, overflow);
5837 } else {
5838 DCHECK(!dst.is(src));
5839 DCHECK(!dst.is(overflow));
5840 DCHECK(!src.is(overflow));
5841 SmiTag(dst, src);
5842 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5843 }
5844}
5845
5846
5847void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
5848 if (SmiValuesAre32Bits()) {
5849 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5850 } else {
5851 lw(dst, src);
5852 SmiUntag(dst);
5853 }
5854}
5855
5856
5857void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
5858 if (SmiValuesAre32Bits()) {
5859 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
5860 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5861 dsll(dst, dst, scale);
5862 } else {
5863 lw(dst, src);
5864 DCHECK(scale >= kSmiTagSize);
5865 sll(dst, dst, scale - kSmiTagSize);
5866 }
5867}
5868
5869
5870// Returns 2 values: the Smi and a scaled version of the int within the Smi.
5871void MacroAssembler::SmiLoadWithScale(Register d_smi,
5872 Register d_scaled,
5873 MemOperand src,
5874 int scale) {
5875 if (SmiValuesAre32Bits()) {
5876 ld(d_smi, src);
5877 dsra(d_scaled, d_smi, kSmiShift - scale);
5878 } else {
5879 lw(d_smi, src);
5880 DCHECK(scale >= kSmiTagSize);
5881 sll(d_scaled, d_smi, scale - kSmiTagSize);
5882 }
5883}
5884
5885
5886// Returns 2 values: the untagged Smi (int32) and scaled version of that int.
5887void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
5888 Register d_scaled,
5889 MemOperand src,
5890 int scale) {
5891 if (SmiValuesAre32Bits()) {
5892 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
5893 dsll(d_scaled, d_int, scale);
5894 } else {
5895 lw(d_int, src);
5896 // Need both the int and the scaled in, so use two instructions.
5897 SmiUntag(d_int);
5898 sll(d_scaled, d_int, scale);
5899 }
5900}
5901
5902
5903void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5904 Register src,
5905 Label* smi_case) {
5906 // DCHECK(!dst.is(src));
5907 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5908 SmiUntag(dst, src);
5909}
5910
5911
5912void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5913 Register src,
5914 Label* non_smi_case) {
5915 // DCHECK(!dst.is(src));
5916 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5917 SmiUntag(dst, src);
5918}
5919
5920void MacroAssembler::JumpIfSmi(Register value,
5921 Label* smi_label,
5922 Register scratch,
5923 BranchDelaySlot bd) {
5924 DCHECK_EQ(0, kSmiTag);
5925 andi(scratch, value, kSmiTagMask);
5926 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5927}
5928
5929void MacroAssembler::JumpIfNotSmi(Register value,
5930 Label* not_smi_label,
5931 Register scratch,
5932 BranchDelaySlot bd) {
5933 DCHECK_EQ(0, kSmiTag);
5934 andi(scratch, value, kSmiTagMask);
5935 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5936}
5937
5938
5939void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5940 Register reg2,
5941 Label* on_not_both_smi) {
5942 STATIC_ASSERT(kSmiTag == 0);
5943 // TODO(plind): Find some better to fix this assert issue.
5944#if defined(__APPLE__)
5945 DCHECK_EQ(1, kSmiTagMask);
5946#else
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005947 DCHECK_EQ((int64_t)1, kSmiTagMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005948#endif
5949 or_(at, reg1, reg2);
5950 JumpIfNotSmi(at, on_not_both_smi);
5951}
5952
5953
5954void MacroAssembler::JumpIfEitherSmi(Register reg1,
5955 Register reg2,
5956 Label* on_either_smi) {
5957 STATIC_ASSERT(kSmiTag == 0);
5958 // TODO(plind): Find some better to fix this assert issue.
5959#if defined(__APPLE__)
5960 DCHECK_EQ(1, kSmiTagMask);
5961#else
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005962 DCHECK_EQ((int64_t)1, kSmiTagMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005963#endif
5964 // Both Smi tags must be 1 (not Smi).
5965 and_(at, reg1, reg2);
5966 JumpIfSmi(at, on_either_smi);
5967}
5968
5969
5970void MacroAssembler::AssertNotSmi(Register object) {
5971 if (emit_debug_code()) {
5972 STATIC_ASSERT(kSmiTag == 0);
5973 andi(at, object, kSmiTagMask);
5974 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5975 }
5976}
5977
5978
5979void MacroAssembler::AssertSmi(Register object) {
5980 if (emit_debug_code()) {
5981 STATIC_ASSERT(kSmiTag == 0);
5982 andi(at, object, kSmiTagMask);
5983 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5984 }
5985}
5986
5987
5988void MacroAssembler::AssertString(Register object) {
5989 if (emit_debug_code()) {
5990 STATIC_ASSERT(kSmiTag == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005991 SmiTst(object, t8);
5992 Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
5993 GetObjectType(object, t8, t8);
5994 Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005995 }
5996}
5997
5998
5999void MacroAssembler::AssertName(Register object) {
6000 if (emit_debug_code()) {
6001 STATIC_ASSERT(kSmiTag == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006002 SmiTst(object, t8);
6003 Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
6004 GetObjectType(object, t8, t8);
6005 Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
6006 }
6007}
6008
6009
6010void MacroAssembler::AssertFunction(Register object) {
6011 if (emit_debug_code()) {
6012 STATIC_ASSERT(kSmiTag == 0);
6013 SmiTst(object, t8);
6014 Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
6015 GetObjectType(object, t8, t8);
6016 Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
6017 }
6018}
6019
6020
6021void MacroAssembler::AssertBoundFunction(Register object) {
6022 if (emit_debug_code()) {
6023 STATIC_ASSERT(kSmiTag == 0);
6024 SmiTst(object, t8);
6025 Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
6026 GetObjectType(object, t8, t8);
6027 Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006028 }
6029}
6030
6031
Ben Murdoch097c5b22016-05-18 11:27:45 +01006032void MacroAssembler::AssertReceiver(Register object) {
6033 if (emit_debug_code()) {
6034 STATIC_ASSERT(kSmiTag == 0);
6035 SmiTst(object, t8);
6036 Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
6037 GetObjectType(object, t8, t8);
6038 Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
6039 }
6040}
6041
6042
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006043void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
6044 Register scratch) {
6045 if (emit_debug_code()) {
6046 Label done_checking;
6047 AssertNotSmi(object);
6048 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
6049 Branch(&done_checking, eq, object, Operand(scratch));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006050 ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006051 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006052 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006053 bind(&done_checking);
6054 }
6055}
6056
6057
6058void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
6059 if (emit_debug_code()) {
6060 DCHECK(!reg.is(at));
6061 LoadRoot(at, index);
6062 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
6063 }
6064}
6065
6066
6067void MacroAssembler::JumpIfNotHeapNumber(Register object,
6068 Register heap_number_map,
6069 Register scratch,
6070 Label* on_not_heap_number) {
6071 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
6072 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
6073 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
6074}
6075
6076
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006077void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
6078 Register first, Register second, Register scratch1, Register scratch2,
6079 Label* failure) {
6080 // Test that both first and second are sequential one-byte strings.
6081 // Assume that they are non-smis.
6082 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
6083 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
6084 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
6085 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
6086
6087 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
6088 scratch2, failure);
6089}
6090
6091
6092void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
6093 Register second,
6094 Register scratch1,
6095 Register scratch2,
6096 Label* failure) {
6097 // Check that neither is a smi.
6098 STATIC_ASSERT(kSmiTag == 0);
6099 And(scratch1, first, Operand(second));
6100 JumpIfSmi(scratch1, failure);
6101 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
6102 scratch2, failure);
6103}
6104
6105
6106void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
6107 Register first, Register second, Register scratch1, Register scratch2,
6108 Label* failure) {
6109 const int kFlatOneByteStringMask =
6110 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6111 const int kFlatOneByteStringTag =
6112 kStringTag | kOneByteStringTag | kSeqStringTag;
6113 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
6114 andi(scratch1, first, kFlatOneByteStringMask);
6115 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
6116 andi(scratch2, second, kFlatOneByteStringMask);
6117 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
6118}
6119
6120
6121void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
6122 Register scratch,
6123 Label* failure) {
6124 const int kFlatOneByteStringMask =
6125 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6126 const int kFlatOneByteStringTag =
6127 kStringTag | kOneByteStringTag | kSeqStringTag;
6128 And(scratch, type, Operand(kFlatOneByteStringMask));
6129 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
6130}
6131
Ben Murdoch097c5b22016-05-18 11:27:45 +01006132static const int kRegisterPassedArguments = 8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006133
6134int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
6135 int num_double_arguments) {
6136 int stack_passed_words = 0;
6137 num_reg_arguments += 2 * num_double_arguments;
6138
6139 // O32: Up to four simple arguments are passed in registers a0..a3.
6140 // N64: Up to eight simple arguments are passed in registers a0..a7.
6141 if (num_reg_arguments > kRegisterPassedArguments) {
6142 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
6143 }
6144 stack_passed_words += kCArgSlotCount;
6145 return stack_passed_words;
6146}
6147
6148
6149void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
6150 Register index,
6151 Register value,
6152 Register scratch,
6153 uint32_t encoding_mask) {
6154 Label is_object;
6155 SmiTst(string, at);
6156 Check(ne, kNonObject, at, Operand(zero_reg));
6157
6158 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
6159 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
6160
6161 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
6162 li(scratch, Operand(encoding_mask));
6163 Check(eq, kUnexpectedStringType, at, Operand(scratch));
6164
6165 // TODO(plind): requires Smi size check code for mips32.
6166
6167 ld(at, FieldMemOperand(string, String::kLengthOffset));
6168 Check(lt, kIndexIsTooLarge, index, Operand(at));
6169
6170 DCHECK(Smi::FromInt(0) == 0);
6171 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6172}
6173
6174
6175void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6176 int num_double_arguments,
6177 Register scratch) {
6178 int frame_alignment = ActivationFrameAlignment();
6179
6180 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
6181 // O32: Up to four simple arguments are passed in registers a0..a3.
6182 // Those four arguments must have reserved argument slots on the stack for
6183 // mips, even though those argument slots are not normally used.
6184 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
6185 // address than) the (O32) argument slots. (arg slot calculation handled by
6186 // CalculateStackPassedWords()).
6187 int stack_passed_arguments = CalculateStackPassedWords(
6188 num_reg_arguments, num_double_arguments);
6189 if (frame_alignment > kPointerSize) {
6190 // Make stack end at alignment and make room for num_arguments - 4 words
6191 // and the original value of sp.
6192 mov(scratch, sp);
6193 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6194 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6195 And(sp, sp, Operand(-frame_alignment));
6196 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6197 } else {
6198 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6199 }
6200}
6201
6202
6203void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6204 Register scratch) {
6205 PrepareCallCFunction(num_reg_arguments, 0, scratch);
6206}
6207
6208
6209void MacroAssembler::CallCFunction(ExternalReference function,
6210 int num_reg_arguments,
6211 int num_double_arguments) {
6212 li(t8, Operand(function));
6213 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6214}
6215
6216
6217void MacroAssembler::CallCFunction(Register function,
6218 int num_reg_arguments,
6219 int num_double_arguments) {
6220 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6221}
6222
6223
6224void MacroAssembler::CallCFunction(ExternalReference function,
6225 int num_arguments) {
6226 CallCFunction(function, num_arguments, 0);
6227}
6228
6229
6230void MacroAssembler::CallCFunction(Register function,
6231 int num_arguments) {
6232 CallCFunction(function, num_arguments, 0);
6233}
6234
6235
6236void MacroAssembler::CallCFunctionHelper(Register function,
6237 int num_reg_arguments,
6238 int num_double_arguments) {
6239 DCHECK(has_frame());
6240 // Make sure that the stack is aligned before calling a C function unless
6241 // running in the simulator. The simulator has its own alignment check which
6242 // provides more information.
6243 // The argument stots are presumed to have been set up by
6244 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6245
6246#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6247 if (emit_debug_code()) {
6248 int frame_alignment = base::OS::ActivationFrameAlignment();
6249 int frame_alignment_mask = frame_alignment - 1;
6250 if (frame_alignment > kPointerSize) {
6251 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6252 Label alignment_as_expected;
6253 And(at, sp, Operand(frame_alignment_mask));
6254 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6255 // Don't use Check here, as it will call Runtime_Abort possibly
6256 // re-entering here.
6257 stop("Unexpected alignment in CallCFunction");
6258 bind(&alignment_as_expected);
6259 }
6260 }
6261#endif // V8_HOST_ARCH_MIPS
6262
6263 // Just call directly. The function called cannot cause a GC, or
6264 // allow preemption, so the return address in the link register
6265 // stays correct.
6266
6267 if (!function.is(t9)) {
6268 mov(t9, function);
6269 function = t9;
6270 }
6271
6272 Call(function);
6273
6274 int stack_passed_arguments = CalculateStackPassedWords(
6275 num_reg_arguments, num_double_arguments);
6276
6277 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6278 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6279 } else {
6280 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6281 }
6282}
6283
6284
6285#undef BRANCH_ARGS_CHECK
6286
6287
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006288void MacroAssembler::CheckPageFlag(
6289 Register object,
6290 Register scratch,
6291 int mask,
6292 Condition cc,
6293 Label* condition_met) {
6294 And(scratch, object, Operand(~Page::kPageAlignmentMask));
6295 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6296 And(scratch, scratch, Operand(mask));
6297 Branch(condition_met, cc, scratch, Operand(zero_reg));
6298}
6299
6300
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006301void MacroAssembler::JumpIfBlack(Register object,
6302 Register scratch0,
6303 Register scratch1,
6304 Label* on_black) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006305 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
6306 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006307}
6308
6309
6310void MacroAssembler::HasColor(Register object,
6311 Register bitmap_scratch,
6312 Register mask_scratch,
6313 Label* has_color,
6314 int first_bit,
6315 int second_bit) {
6316 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6317 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6318
6319 GetMarkBits(object, bitmap_scratch, mask_scratch);
6320
6321 Label other_color;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006322 // Note that we are using two 4-byte aligned loads.
6323 LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006324 And(t8, t9, Operand(mask_scratch));
6325 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6326 // Shift left 1 by adding.
6327 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
6328 And(t8, t9, Operand(mask_scratch));
6329 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6330
6331 bind(&other_color);
6332}
6333
6334
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006335void MacroAssembler::GetMarkBits(Register addr_reg,
6336 Register bitmap_reg,
6337 Register mask_reg) {
6338 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6339 // addr_reg is divided into fields:
6340 // |63 page base 20|19 high 8|7 shift 3|2 0|
6341 // 'high' gives the index of the cell holding color bits for the object.
6342 // 'shift' gives the offset in the cell for this object's color.
6343 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6344 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6345 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6346 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
Ben Murdoch097c5b22016-05-18 11:27:45 +01006347 Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006348 li(t8, Operand(1));
6349 dsllv(mask_reg, t8, mask_reg);
6350}
6351
6352
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006353void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6354 Register mask_scratch, Register load_scratch,
6355 Label* value_is_white) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006356 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6357 GetMarkBits(value, bitmap_scratch, mask_scratch);
6358
6359 // If the value is black or grey we don't need to do anything.
6360 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006361 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6362 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006363 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6364
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006365 // Since both black and grey have a 1 in the first position and white does
6366 // not have a 1 there we only need to check one bit.
6367 // Note that we are using a 4-byte aligned 8-byte load.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006368 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006369 LoadWordPair(load_scratch,
6370 MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6371 } else {
6372 lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006373 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006374 And(t8, mask_scratch, load_scratch);
6375 Branch(value_is_white, eq, t8, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006376}
6377
6378
6379void MacroAssembler::LoadInstanceDescriptors(Register map,
6380 Register descriptors) {
6381 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6382}
6383
6384
6385void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006386 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006387 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6388}
6389
6390
6391void MacroAssembler::EnumLength(Register dst, Register map) {
6392 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006393 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006394 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6395 SmiTag(dst);
6396}
6397
6398
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006399void MacroAssembler::LoadAccessor(Register dst, Register holder,
6400 int accessor_index,
6401 AccessorComponent accessor) {
6402 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6403 LoadInstanceDescriptors(dst, dst);
6404 ld(dst,
6405 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6406 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6407 : AccessorPair::kSetterOffset;
6408 ld(dst, FieldMemOperand(dst, offset));
6409}
6410
6411
Ben Murdoch097c5b22016-05-18 11:27:45 +01006412void MacroAssembler::CheckEnumCache(Label* call_runtime) {
6413 Register null_value = a5;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006414 Register empty_fixed_array_value = a6;
6415 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6416 Label next, start;
6417 mov(a2, a0);
6418
6419 // Check if the enum length field is properly initialized, indicating that
6420 // there is an enum cache.
6421 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6422
6423 EnumLength(a3, a1);
6424 Branch(
6425 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
6426
Ben Murdoch097c5b22016-05-18 11:27:45 +01006427 LoadRoot(null_value, Heap::kNullValueRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006428 jmp(&start);
6429
6430 bind(&next);
6431 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6432
6433 // For all objects but the receiver, check that the cache is empty.
6434 EnumLength(a3, a1);
6435 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
6436
6437 bind(&start);
6438
6439 // Check that there are no elements. Register a2 contains the current JS
6440 // object we've reached through the prototype chain.
6441 Label no_elements;
6442 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
6443 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
6444
6445 // Second chance, the object may be using the empty slow element dictionary.
6446 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
6447 Branch(call_runtime, ne, a2, Operand(at));
6448
6449 bind(&no_elements);
6450 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
6451 Branch(&next, ne, a2, Operand(null_value));
6452}
6453
6454
6455void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
6456 DCHECK(!output_reg.is(input_reg));
6457 Label done;
6458 li(output_reg, Operand(255));
6459 // Normal branch: nop in delay slot.
6460 Branch(&done, gt, input_reg, Operand(output_reg));
6461 // Use delay slot in this branch.
6462 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6463 mov(output_reg, zero_reg); // In delay slot.
6464 mov(output_reg, input_reg); // Value is in range 0..255.
6465 bind(&done);
6466}
6467
6468
6469void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6470 DoubleRegister input_reg,
6471 DoubleRegister temp_double_reg) {
6472 Label above_zero;
6473 Label done;
6474 Label in_bounds;
6475
6476 Move(temp_double_reg, 0.0);
6477 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6478
6479 // Double value is less than zero, NaN or Inf, return 0.
6480 mov(result_reg, zero_reg);
6481 Branch(&done);
6482
6483 // Double value is >= 255, return 255.
6484 bind(&above_zero);
6485 Move(temp_double_reg, 255.0);
6486 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6487 li(result_reg, Operand(255));
6488 Branch(&done);
6489
6490 // In 0-255 range, round and truncate.
6491 bind(&in_bounds);
6492 cvt_w_d(temp_double_reg, input_reg);
6493 mfc1(result_reg, temp_double_reg);
6494 bind(&done);
6495}
6496
6497
6498void MacroAssembler::TestJSArrayForAllocationMemento(
6499 Register receiver_reg,
6500 Register scratch_reg,
6501 Label* no_memento_found,
6502 Condition cond,
6503 Label* allocation_memento_present) {
6504 ExternalReference new_space_start =
6505 ExternalReference::new_space_start(isolate());
6506 ExternalReference new_space_allocation_top =
6507 ExternalReference::new_space_allocation_top_address(isolate());
6508 Daddu(scratch_reg, receiver_reg,
6509 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
6510 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
6511 li(at, Operand(new_space_allocation_top));
6512 ld(at, MemOperand(at));
6513 Branch(no_memento_found, gt, scratch_reg, Operand(at));
6514 ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
6515 if (allocation_memento_present) {
6516 Branch(allocation_memento_present, cond, scratch_reg,
6517 Operand(isolate()->factory()->allocation_memento_map()));
6518 }
6519}
6520
6521
6522Register GetRegisterThatIsNotOneOf(Register reg1,
6523 Register reg2,
6524 Register reg3,
6525 Register reg4,
6526 Register reg5,
6527 Register reg6) {
6528 RegList regs = 0;
6529 if (reg1.is_valid()) regs |= reg1.bit();
6530 if (reg2.is_valid()) regs |= reg2.bit();
6531 if (reg3.is_valid()) regs |= reg3.bit();
6532 if (reg4.is_valid()) regs |= reg4.bit();
6533 if (reg5.is_valid()) regs |= reg5.bit();
6534 if (reg6.is_valid()) regs |= reg6.bit();
6535
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006536 const RegisterConfiguration* config =
6537 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
6538 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
6539 int code = config->GetAllocatableGeneralCode(i);
6540 Register candidate = Register::from_code(code);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006541 if (regs & candidate.bit()) continue;
6542 return candidate;
6543 }
6544 UNREACHABLE();
6545 return no_reg;
6546}
6547
6548
6549void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6550 Register object,
6551 Register scratch0,
6552 Register scratch1,
6553 Label* found) {
6554 DCHECK(!scratch1.is(scratch0));
6555 Factory* factory = isolate()->factory();
6556 Register current = scratch0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006557 Label loop_again, end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006558
6559 // Scratch contained elements pointer.
6560 Move(current, object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006561 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
6562 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
6563 Branch(&end, eq, current, Operand(factory->null_value()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006564
6565 // Loop based on the map going up the prototype chain.
6566 bind(&loop_again);
6567 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006568 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
6569 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
6570 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
6571 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006572 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
6573 DecodeField<Map::ElementsKindBits>(scratch1);
6574 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
6575 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
6576 Branch(&loop_again, ne, current, Operand(factory->null_value()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006577
6578 bind(&end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006579}
6580
6581
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006582bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
6583 Register reg5, Register reg6, Register reg7, Register reg8,
6584 Register reg9, Register reg10) {
6585 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
6586 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6587 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
6588 reg10.is_valid();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006589
6590 RegList regs = 0;
6591 if (reg1.is_valid()) regs |= reg1.bit();
6592 if (reg2.is_valid()) regs |= reg2.bit();
6593 if (reg3.is_valid()) regs |= reg3.bit();
6594 if (reg4.is_valid()) regs |= reg4.bit();
6595 if (reg5.is_valid()) regs |= reg5.bit();
6596 if (reg6.is_valid()) regs |= reg6.bit();
6597 if (reg7.is_valid()) regs |= reg7.bit();
6598 if (reg8.is_valid()) regs |= reg8.bit();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006599 if (reg9.is_valid()) regs |= reg9.bit();
6600 if (reg10.is_valid()) regs |= reg10.bit();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006601 int n_of_non_aliasing_regs = NumRegs(regs);
6602
6603 return n_of_valid_regs != n_of_non_aliasing_regs;
6604}
6605
6606
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006607CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006608 FlushICache flush_cache)
6609 : address_(address),
6610 size_(instructions * Assembler::kInstrSize),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006611 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006612 flush_cache_(flush_cache) {
6613 // Create a new macro assembler pointing to the address of the code to patch.
6614 // The size is adjusted with kGap on order for the assembler to generate size
6615 // bytes of instructions without failing with buffer size constraints.
6616 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6617}
6618
6619
6620CodePatcher::~CodePatcher() {
6621 // Indicate that code has changed.
6622 if (flush_cache_ == FLUSH) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006623 Assembler::FlushICache(masm_.isolate(), address_, size_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006624 }
6625 // Check that the code was patched as expected.
6626 DCHECK(masm_.pc_ == address_ + size_);
6627 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6628}
6629
6630
6631void CodePatcher::Emit(Instr instr) {
6632 masm()->emit(instr);
6633}
6634
6635
6636void CodePatcher::Emit(Address addr) {
6637 // masm()->emit(reinterpret_cast<Instr>(addr));
6638}
6639
6640
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006641void CodePatcher::ChangeBranchCondition(Instr current_instr,
6642 uint32_t new_opcode) {
6643 current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
6644 masm_.emit(current_instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006645}
6646
6647
6648void MacroAssembler::TruncatingDiv(Register result,
6649 Register dividend,
6650 int32_t divisor) {
6651 DCHECK(!dividend.is(result));
6652 DCHECK(!dividend.is(at));
6653 DCHECK(!result.is(at));
6654 base::MagicNumbersForDivision<uint32_t> mag =
6655 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04006656 li(at, Operand(static_cast<int32_t>(mag.multiplier)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006657 Mulh(result, dividend, Operand(at));
6658 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6659 if (divisor > 0 && neg) {
6660 Addu(result, result, Operand(dividend));
6661 }
6662 if (divisor < 0 && !neg && mag.multiplier > 0) {
6663 Subu(result, result, Operand(dividend));
6664 }
6665 if (mag.shift > 0) sra(result, result, mag.shift);
6666 srl(at, dividend, 31);
6667 Addu(result, result, Operand(at));
6668}
6669
6670
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006671} // namespace internal
6672} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006673
6674#endif // V8_TARGET_ARCH_MIPS64