blob: b7b4f2882b7b2533b7f1b1afe5032114b271a5ee [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <limits.h> // For LONG_MIN, LONG_MAX.
6
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007#if V8_TARGET_ARCH_MIPS64
8
9#include "src/base/division-by-constant.h"
10#include "src/bootstrapper.h"
11#include "src/codegen.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000012#include "src/debug/debug.h"
13#include "src/mips64/macro-assembler-mips64.h"
14#include "src/register-configuration.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040015#include "src/runtime/runtime.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016
17namespace v8 {
18namespace internal {
19
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
21 CodeObjectRequired create_code_object)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022 : Assembler(arg_isolate, buffer, size),
23 generating_stub_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040024 has_frame_(false),
25 has_double_zero_reg_set_(false) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000026 if (create_code_object == CodeObjectRequired::kYes) {
27 code_object_ =
28 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029 }
30}
31
32
33void MacroAssembler::Load(Register dst,
34 const MemOperand& src,
35 Representation r) {
36 DCHECK(!r.IsDouble());
37 if (r.IsInteger8()) {
38 lb(dst, src);
39 } else if (r.IsUInteger8()) {
40 lbu(dst, src);
41 } else if (r.IsInteger16()) {
42 lh(dst, src);
43 } else if (r.IsUInteger16()) {
44 lhu(dst, src);
45 } else if (r.IsInteger32()) {
46 lw(dst, src);
47 } else {
48 ld(dst, src);
49 }
50}
51
52
53void MacroAssembler::Store(Register src,
54 const MemOperand& dst,
55 Representation r) {
56 DCHECK(!r.IsDouble());
57 if (r.IsInteger8() || r.IsUInteger8()) {
58 sb(src, dst);
59 } else if (r.IsInteger16() || r.IsUInteger16()) {
60 sh(src, dst);
61 } else if (r.IsInteger32()) {
62 sw(src, dst);
63 } else {
64 if (r.IsHeapObject()) {
65 AssertNotSmi(src);
66 } else if (r.IsSmi()) {
67 AssertSmi(src);
68 }
69 sd(src, dst);
70 }
71}
72
73
74void MacroAssembler::LoadRoot(Register destination,
75 Heap::RootListIndex index) {
76 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
77}
78
79
80void MacroAssembler::LoadRoot(Register destination,
81 Heap::RootListIndex index,
82 Condition cond,
83 Register src1, const Operand& src2) {
84 Branch(2, NegateCondition(cond), src1, src2);
85 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
86}
87
88
89void MacroAssembler::StoreRoot(Register source,
90 Heap::RootListIndex index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000091 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 sd(source, MemOperand(s6, index << kPointerSizeLog2));
93}
94
95
96void MacroAssembler::StoreRoot(Register source,
97 Heap::RootListIndex index,
98 Condition cond,
99 Register src1, const Operand& src2) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000100 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000101 Branch(2, NegateCondition(cond), src1, src2);
102 sd(source, MemOperand(s6, index << kPointerSizeLog2));
103}
104
Ben Murdochda12d292016-06-02 14:46:10 +0100105void MacroAssembler::PushCommonFrame(Register marker_reg) {
106 if (marker_reg.is_valid()) {
107 Push(ra, fp, marker_reg);
108 Daddu(fp, sp, Operand(kPointerSize));
109 } else {
110 Push(ra, fp);
111 mov(fp, sp);
112 }
113}
114
115void MacroAssembler::PopCommonFrame(Register marker_reg) {
116 if (marker_reg.is_valid()) {
117 Pop(ra, fp, marker_reg);
118 } else {
119 Pop(ra, fp);
120 }
121}
122
123void MacroAssembler::PushStandardFrame(Register function_reg) {
124 int offset = -StandardFrameConstants::kContextOffset;
125 if (function_reg.is_valid()) {
126 Push(ra, fp, cp, function_reg);
127 offset += kPointerSize;
128 } else {
129 Push(ra, fp, cp);
130 }
131 Daddu(fp, sp, Operand(offset));
132}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000133
134// Push and pop all registers that can hold pointers.
135void MacroAssembler::PushSafepointRegisters() {
136 // Safepoints expect a block of kNumSafepointRegisters values on the
137 // stack, so adjust the stack for unsaved registers.
138 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
139 DCHECK(num_unsaved >= 0);
140 if (num_unsaved > 0) {
141 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
142 }
143 MultiPush(kSafepointSavedRegisters);
144}
145
146
147void MacroAssembler::PopSafepointRegisters() {
148 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
149 MultiPop(kSafepointSavedRegisters);
150 if (num_unsaved > 0) {
151 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
152 }
153}
154
155
156void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
157 sd(src, SafepointRegisterSlot(dst));
158}
159
160
161void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
162 ld(dst, SafepointRegisterSlot(src));
163}
164
165
166int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
167 // The registers are pushed starting with the highest encoding,
168 // which means that lowest encodings are closest to the stack pointer.
169 return kSafepointRegisterStackIndexMap[reg_code];
170}
171
172
173MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
174 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
175}
176
177
178MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
179 UNIMPLEMENTED_MIPS();
180 // General purpose registers are pushed last on the stack.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000181 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000182 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
183 return MemOperand(sp, doubles_size + register_offset);
184}
185
186
187void MacroAssembler::InNewSpace(Register object,
188 Register scratch,
189 Condition cc,
190 Label* branch) {
191 DCHECK(cc == eq || cc == ne);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100192 const int mask =
193 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
194 CheckPageFlag(object, scratch, mask, cc, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000195}
196
197
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000198// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
199// The register 'object' contains a heap object pointer. The heap object
200// tag is shifted away.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000201void MacroAssembler::RecordWriteField(
202 Register object,
203 int offset,
204 Register value,
205 Register dst,
206 RAStatus ra_status,
207 SaveFPRegsMode save_fp,
208 RememberedSetAction remembered_set_action,
209 SmiCheck smi_check,
210 PointersToHereCheck pointers_to_here_check_for_value) {
211 DCHECK(!AreAliased(value, dst, t8, object));
212 // First, check if a write barrier is even needed. The tests below
213 // catch stores of Smis.
214 Label done;
215
216 // Skip barrier if writing a smi.
217 if (smi_check == INLINE_SMI_CHECK) {
218 JumpIfSmi(value, &done);
219 }
220
221 // Although the object register is tagged, the offset is relative to the start
222 // of the object, so so offset must be a multiple of kPointerSize.
223 DCHECK(IsAligned(offset, kPointerSize));
224
225 Daddu(dst, object, Operand(offset - kHeapObjectTag));
226 if (emit_debug_code()) {
227 Label ok;
228 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
229 Branch(&ok, eq, t8, Operand(zero_reg));
230 stop("Unaligned cell in write barrier");
231 bind(&ok);
232 }
233
234 RecordWrite(object,
235 dst,
236 value,
237 ra_status,
238 save_fp,
239 remembered_set_action,
240 OMIT_SMI_CHECK,
241 pointers_to_here_check_for_value);
242
243 bind(&done);
244
245 // Clobber clobbered input registers when running with the debug-code flag
246 // turned on to provoke errors.
247 if (emit_debug_code()) {
248 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
249 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
250 }
251}
252
253
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000254// Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000255void MacroAssembler::RecordWriteForMap(Register object,
256 Register map,
257 Register dst,
258 RAStatus ra_status,
259 SaveFPRegsMode fp_mode) {
260 if (emit_debug_code()) {
261 DCHECK(!dst.is(at));
262 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
263 Check(eq,
264 kWrongAddressOrValuePassedToRecordWrite,
265 dst,
266 Operand(isolate()->factory()->meta_map()));
267 }
268
269 if (!FLAG_incremental_marking) {
270 return;
271 }
272
273 if (emit_debug_code()) {
274 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
275 Check(eq,
276 kWrongAddressOrValuePassedToRecordWrite,
277 map,
278 Operand(at));
279 }
280
281 Label done;
282
283 // A single check of the map's pages interesting flag suffices, since it is
284 // only set during incremental collection, and then it's also guaranteed that
285 // the from object's page's interesting flag is also set. This optimization
286 // relies on the fact that maps can never be in new space.
287 CheckPageFlag(map,
288 map, // Used as scratch.
289 MemoryChunk::kPointersToHereAreInterestingMask,
290 eq,
291 &done);
292
293 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
294 if (emit_debug_code()) {
295 Label ok;
296 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
297 Branch(&ok, eq, at, Operand(zero_reg));
298 stop("Unaligned cell in write barrier");
299 bind(&ok);
300 }
301
302 // Record the actual write.
303 if (ra_status == kRAHasNotBeenSaved) {
304 push(ra);
305 }
306 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
307 fp_mode);
308 CallStub(&stub);
309 if (ra_status == kRAHasNotBeenSaved) {
310 pop(ra);
311 }
312
313 bind(&done);
314
315 // Count number of write barriers in generated code.
316 isolate()->counters()->write_barriers_static()->Increment();
317 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
318
319 // Clobber clobbered registers when running with the debug-code flag
320 // turned on to provoke errors.
321 if (emit_debug_code()) {
322 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
323 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
324 }
325}
326
327
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000328// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
329// The register 'object' contains a heap object pointer. The heap object
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000330// tag is shifted away.
331void MacroAssembler::RecordWrite(
332 Register object,
333 Register address,
334 Register value,
335 RAStatus ra_status,
336 SaveFPRegsMode fp_mode,
337 RememberedSetAction remembered_set_action,
338 SmiCheck smi_check,
339 PointersToHereCheck pointers_to_here_check_for_value) {
340 DCHECK(!AreAliased(object, address, value, t8));
341 DCHECK(!AreAliased(object, address, value, t9));
342
343 if (emit_debug_code()) {
344 ld(at, MemOperand(address));
345 Assert(
346 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
347 }
348
349 if (remembered_set_action == OMIT_REMEMBERED_SET &&
350 !FLAG_incremental_marking) {
351 return;
352 }
353
354 // First, check if a write barrier is even needed. The tests below
355 // catch stores of smis and stores into the young generation.
356 Label done;
357
358 if (smi_check == INLINE_SMI_CHECK) {
359 DCHECK_EQ(0, kSmiTag);
360 JumpIfSmi(value, &done);
361 }
362
363 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
364 CheckPageFlag(value,
365 value, // Used as scratch.
366 MemoryChunk::kPointersToHereAreInterestingMask,
367 eq,
368 &done);
369 }
370 CheckPageFlag(object,
371 value, // Used as scratch.
372 MemoryChunk::kPointersFromHereAreInterestingMask,
373 eq,
374 &done);
375
376 // Record the actual write.
377 if (ra_status == kRAHasNotBeenSaved) {
378 push(ra);
379 }
380 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
381 fp_mode);
382 CallStub(&stub);
383 if (ra_status == kRAHasNotBeenSaved) {
384 pop(ra);
385 }
386
387 bind(&done);
388
389 // Count number of write barriers in generated code.
390 isolate()->counters()->write_barriers_static()->Increment();
391 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
392 value);
393
394 // Clobber clobbered registers when running with the debug-code flag
395 // turned on to provoke errors.
396 if (emit_debug_code()) {
397 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
398 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
399 }
400}
401
Ben Murdoch097c5b22016-05-18 11:27:45 +0100402void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
403 Register code_entry,
404 Register scratch) {
405 const int offset = JSFunction::kCodeEntryOffset;
406
407 // Since a code entry (value) is always in old space, we don't need to update
408 // remembered set. If incremental marking is off, there is nothing for us to
409 // do.
410 if (!FLAG_incremental_marking) return;
411
412 DCHECK(js_function.is(a1));
413 DCHECK(code_entry.is(a4));
414 DCHECK(scratch.is(a5));
415 AssertNotSmi(js_function);
416
417 if (emit_debug_code()) {
418 Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
419 ld(at, MemOperand(scratch));
420 Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
421 Operand(code_entry));
422 }
423
424 // First, check if a write barrier is even needed. The tests below
425 // catch stores of Smis and stores into young gen.
426 Label done;
427
428 CheckPageFlag(code_entry, scratch,
429 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
430 CheckPageFlag(js_function, scratch,
431 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
432
433 const Register dst = scratch;
434 Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
435
436 // Save caller-saved registers. js_function and code_entry are in the
437 // caller-saved register list.
438 DCHECK(kJSCallerSaved & js_function.bit());
439 DCHECK(kJSCallerSaved & code_entry.bit());
440 MultiPush(kJSCallerSaved | ra.bit());
441
442 int argument_count = 3;
443
444 PrepareCallCFunction(argument_count, code_entry);
445
446 Move(a0, js_function);
447 Move(a1, dst);
448 li(a2, Operand(ExternalReference::isolate_address(isolate())));
449
450 {
451 AllowExternalCallThatCantCauseGC scope(this);
452 CallCFunction(
453 ExternalReference::incremental_marking_record_write_code_entry_function(
454 isolate()),
455 argument_count);
456 }
457
458 // Restore caller-saved registers.
459 MultiPop(kJSCallerSaved | ra.bit());
460
461 bind(&done);
462}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000463
464void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
465 Register address,
466 Register scratch,
467 SaveFPRegsMode fp_mode,
468 RememberedSetFinalAction and_then) {
469 Label done;
470 if (emit_debug_code()) {
471 Label ok;
472 JumpIfNotInNewSpace(object, scratch, &ok);
473 stop("Remembered set pointer is in new space");
474 bind(&ok);
475 }
476 // Load store buffer top.
477 ExternalReference store_buffer =
478 ExternalReference::store_buffer_top(isolate());
479 li(t8, Operand(store_buffer));
480 ld(scratch, MemOperand(t8));
481 // Store pointer to buffer and increment buffer top.
482 sd(address, MemOperand(scratch));
483 Daddu(scratch, scratch, kPointerSize);
484 // Write back new top of buffer.
485 sd(scratch, MemOperand(t8));
486 // Call stub on end of buffer.
487 // Check for end of buffer.
Ben Murdochda12d292016-06-02 14:46:10 +0100488 And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000489 DCHECK(!scratch.is(t8));
490 if (and_then == kFallThroughAtEnd) {
Ben Murdochda12d292016-06-02 14:46:10 +0100491 Branch(&done, ne, t8, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000492 } else {
493 DCHECK(and_then == kReturnAtEnd);
Ben Murdochda12d292016-06-02 14:46:10 +0100494 Ret(ne, t8, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000495 }
496 push(ra);
497 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
498 CallStub(&store_buffer_overflow);
499 pop(ra);
500 bind(&done);
501 if (and_then == kReturnAtEnd) {
502 Ret();
503 }
504}
505
506
507// -----------------------------------------------------------------------------
508// Allocation support.
509
510
511void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
512 Register scratch,
513 Label* miss) {
514 Label same_contexts;
Ben Murdochda12d292016-06-02 14:46:10 +0100515 Register temporary = t8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000516
517 DCHECK(!holder_reg.is(scratch));
518 DCHECK(!holder_reg.is(at));
519 DCHECK(!scratch.is(at));
520
Ben Murdochda12d292016-06-02 14:46:10 +0100521 // Load current lexical context from the active StandardFrame, which
522 // may require crawling past STUB frames.
523 Label load_context;
524 Label has_context;
525 mov(at, fp);
526 bind(&load_context);
527 ld(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
528 // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
529 JumpIfNotSmi(scratch, &has_context, temporary);
530 ld(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
531 Branch(&load_context);
532 bind(&has_context);
533
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000534 // In debug mode, make sure the lexical context is set.
535#ifdef DEBUG
536 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
537 scratch, Operand(zero_reg));
538#endif
539
540 // Load the native context of the current context.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000541 ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000542
543 // Check the context is a native context.
544 if (emit_debug_code()) {
545 push(holder_reg); // Temporarily save holder on the stack.
546 // Read the first word and compare to the native_context_map.
547 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
548 LoadRoot(at, Heap::kNativeContextMapRootIndex);
549 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
550 holder_reg, Operand(at));
551 pop(holder_reg); // Restore holder.
552 }
553
554 // Check if both contexts are the same.
555 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
556 Branch(&same_contexts, eq, scratch, Operand(at));
557
558 // Check the context is a native context.
559 if (emit_debug_code()) {
560 push(holder_reg); // Temporarily save holder on the stack.
561 mov(holder_reg, at); // Move at to its holding place.
562 LoadRoot(at, Heap::kNullValueRootIndex);
563 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
564 holder_reg, Operand(at));
565
566 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
567 LoadRoot(at, Heap::kNativeContextMapRootIndex);
568 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
569 holder_reg, Operand(at));
570 // Restore at is not needed. at is reloaded below.
571 pop(holder_reg); // Restore holder.
572 // Restore at to holder's context.
573 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
574 }
575
576 // Check that the security token in the calling global object is
577 // compatible with the security token in the receiving global
578 // object.
579 int token_offset = Context::kHeaderSize +
580 Context::SECURITY_TOKEN_INDEX * kPointerSize;
581
582 ld(scratch, FieldMemOperand(scratch, token_offset));
583 ld(at, FieldMemOperand(at, token_offset));
584 Branch(miss, ne, scratch, Operand(at));
585
586 bind(&same_contexts);
587}
588
589
590// Compute the hash code from the untagged key. This must be kept in sync with
591// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
592// code-stub-hydrogen.cc
593void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
594 // First of all we assign the hash seed to scratch.
595 LoadRoot(scratch, Heap::kHashSeedRootIndex);
596 SmiUntag(scratch);
597
598 // Xor original key with a seed.
599 xor_(reg0, reg0, scratch);
600
601 // Compute the hash code from the untagged key. This must be kept in sync
602 // with ComputeIntegerHash in utils.h.
603 //
604 // hash = ~hash + (hash << 15);
605 // The algorithm uses 32-bit integer values.
606 nor(scratch, reg0, zero_reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100607 Lsa(reg0, scratch, reg0, 15);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000608
609 // hash = hash ^ (hash >> 12);
610 srl(at, reg0, 12);
611 xor_(reg0, reg0, at);
612
613 // hash = hash + (hash << 2);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100614 Lsa(reg0, reg0, reg0, 2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615
616 // hash = hash ^ (hash >> 4);
617 srl(at, reg0, 4);
618 xor_(reg0, reg0, at);
619
620 // hash = hash * 2057;
621 sll(scratch, reg0, 11);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100622 Lsa(reg0, reg0, reg0, 3);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000623 addu(reg0, reg0, scratch);
624
625 // hash = hash ^ (hash >> 16);
626 srl(at, reg0, 16);
627 xor_(reg0, reg0, at);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000628 And(reg0, reg0, Operand(0x3fffffff));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000629}
630
631
632void MacroAssembler::LoadFromNumberDictionary(Label* miss,
633 Register elements,
634 Register key,
635 Register result,
636 Register reg0,
637 Register reg1,
638 Register reg2) {
639 // Register use:
640 //
641 // elements - holds the slow-case elements of the receiver on entry.
642 // Unchanged unless 'result' is the same register.
643 //
644 // key - holds the smi key on entry.
645 // Unchanged unless 'result' is the same register.
646 //
647 //
648 // result - holds the result on exit if the load succeeded.
649 // Allowed to be the same as 'key' or 'result'.
650 // Unchanged on bailout so 'key' or 'result' can be used
651 // in further computation.
652 //
653 // Scratch registers:
654 //
655 // reg0 - holds the untagged key on entry and holds the hash once computed.
656 //
657 // reg1 - Used to hold the capacity mask of the dictionary.
658 //
659 // reg2 - Used for the index into the dictionary.
660 // at - Temporary (avoid MacroAssembler instructions also using 'at').
661 Label done;
662
663 GetNumberHash(reg0, reg1);
664
665 // Compute the capacity mask.
666 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
667 SmiUntag(reg1, reg1);
668 Dsubu(reg1, reg1, Operand(1));
669
670 // Generate an unrolled loop that performs a few probes before giving up.
671 for (int i = 0; i < kNumberDictionaryProbes; i++) {
672 // Use reg2 for index calculations and keep the hash intact in reg0.
673 mov(reg2, reg0);
674 // Compute the masked index: (hash + i + i * i) & mask.
675 if (i > 0) {
676 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
677 }
678 and_(reg2, reg2, reg1);
679
680 // Scale the index by multiplying by the element size.
681 DCHECK(SeededNumberDictionary::kEntrySize == 3);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100682 Dlsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000683
684 // Check if the key is identical to the name.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100685 Dlsa(reg2, elements, reg2, kPointerSizeLog2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000686
687 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
688 if (i != kNumberDictionaryProbes - 1) {
689 Branch(&done, eq, key, Operand(at));
690 } else {
691 Branch(miss, ne, key, Operand(at));
692 }
693 }
694
695 bind(&done);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400696 // Check that the value is a field property.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000697 // reg2: elements + (index * kPointerSize).
698 const int kDetailsOffset =
699 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
700 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000701 DCHECK_EQ(DATA, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000702 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
703 Branch(miss, ne, at, Operand(zero_reg));
704
705 // Get the value at the masked, scaled index and return.
706 const int kValueOffset =
707 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
708 ld(result, FieldMemOperand(reg2, kValueOffset));
709}
710
711
712// ---------------------------------------------------------------------------
713// Instruction macros.
714
715void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
716 if (rt.is_reg()) {
717 addu(rd, rs, rt.rm());
718 } else {
719 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000720 addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000721 } else {
722 // li handles the relocation.
723 DCHECK(!rs.is(at));
724 li(at, rt);
725 addu(rd, rs, at);
726 }
727 }
728}
729
730
731void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
732 if (rt.is_reg()) {
733 daddu(rd, rs, rt.rm());
734 } else {
735 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000736 daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000737 } else {
738 // li handles the relocation.
739 DCHECK(!rs.is(at));
740 li(at, rt);
741 daddu(rd, rs, at);
742 }
743 }
744}
745
746
747void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
748 if (rt.is_reg()) {
749 subu(rd, rs, rt.rm());
750 } else {
751 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000752 addiu(rd, rs, static_cast<int32_t>(
753 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000754 } else {
755 // li handles the relocation.
756 DCHECK(!rs.is(at));
757 li(at, rt);
758 subu(rd, rs, at);
759 }
760 }
761}
762
763
764void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
765 if (rt.is_reg()) {
766 dsubu(rd, rs, rt.rm());
767 } else {
768 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000769 daddiu(rd, rs,
770 static_cast<int32_t>(
771 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000772 } else {
773 // li handles the relocation.
774 DCHECK(!rs.is(at));
775 li(at, rt);
776 dsubu(rd, rs, at);
777 }
778 }
779}
780
781
782void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
783 if (rt.is_reg()) {
784 mul(rd, rs, rt.rm());
785 } else {
786 // li handles the relocation.
787 DCHECK(!rs.is(at));
788 li(at, rt);
789 mul(rd, rs, at);
790 }
791}
792
793
794void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
795 if (rt.is_reg()) {
796 if (kArchVariant != kMips64r6) {
797 mult(rs, rt.rm());
798 mfhi(rd);
799 } else {
800 muh(rd, rs, rt.rm());
801 }
802 } else {
803 // li handles the relocation.
804 DCHECK(!rs.is(at));
805 li(at, rt);
806 if (kArchVariant != kMips64r6) {
807 mult(rs, at);
808 mfhi(rd);
809 } else {
810 muh(rd, rs, at);
811 }
812 }
813}
814
815
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400816void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
817 if (rt.is_reg()) {
818 if (kArchVariant != kMips64r6) {
819 multu(rs, rt.rm());
820 mfhi(rd);
821 } else {
822 muhu(rd, rs, rt.rm());
823 }
824 } else {
825 // li handles the relocation.
826 DCHECK(!rs.is(at));
827 li(at, rt);
828 if (kArchVariant != kMips64r6) {
829 multu(rs, at);
830 mfhi(rd);
831 } else {
832 muhu(rd, rs, at);
833 }
834 }
835}
836
837
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000838void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
839 if (rt.is_reg()) {
840 if (kArchVariant == kMips64r6) {
841 dmul(rd, rs, rt.rm());
842 } else {
843 dmult(rs, rt.rm());
844 mflo(rd);
845 }
846 } else {
847 // li handles the relocation.
848 DCHECK(!rs.is(at));
849 li(at, rt);
850 if (kArchVariant == kMips64r6) {
851 dmul(rd, rs, at);
852 } else {
853 dmult(rs, at);
854 mflo(rd);
855 }
856 }
857}
858
859
860void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
861 if (rt.is_reg()) {
862 if (kArchVariant == kMips64r6) {
863 dmuh(rd, rs, rt.rm());
864 } else {
865 dmult(rs, rt.rm());
866 mfhi(rd);
867 }
868 } else {
869 // li handles the relocation.
870 DCHECK(!rs.is(at));
871 li(at, rt);
872 if (kArchVariant == kMips64r6) {
873 dmuh(rd, rs, at);
874 } else {
875 dmult(rs, at);
876 mfhi(rd);
877 }
878 }
879}
880
881
882void MacroAssembler::Mult(Register rs, const Operand& rt) {
883 if (rt.is_reg()) {
884 mult(rs, rt.rm());
885 } else {
886 // li handles the relocation.
887 DCHECK(!rs.is(at));
888 li(at, rt);
889 mult(rs, at);
890 }
891}
892
893
894void MacroAssembler::Dmult(Register rs, const Operand& rt) {
895 if (rt.is_reg()) {
896 dmult(rs, rt.rm());
897 } else {
898 // li handles the relocation.
899 DCHECK(!rs.is(at));
900 li(at, rt);
901 dmult(rs, at);
902 }
903}
904
905
906void MacroAssembler::Multu(Register rs, const Operand& rt) {
907 if (rt.is_reg()) {
908 multu(rs, rt.rm());
909 } else {
910 // li handles the relocation.
911 DCHECK(!rs.is(at));
912 li(at, rt);
913 multu(rs, at);
914 }
915}
916
917
918void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
919 if (rt.is_reg()) {
920 dmultu(rs, rt.rm());
921 } else {
922 // li handles the relocation.
923 DCHECK(!rs.is(at));
924 li(at, rt);
925 dmultu(rs, at);
926 }
927}
928
929
930void MacroAssembler::Div(Register rs, const Operand& rt) {
931 if (rt.is_reg()) {
932 div(rs, rt.rm());
933 } else {
934 // li handles the relocation.
935 DCHECK(!rs.is(at));
936 li(at, rt);
937 div(rs, at);
938 }
939}
940
941
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400942void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
943 if (rt.is_reg()) {
944 if (kArchVariant != kMips64r6) {
945 div(rs, rt.rm());
946 mflo(res);
947 } else {
948 div(res, rs, rt.rm());
949 }
950 } else {
951 // li handles the relocation.
952 DCHECK(!rs.is(at));
953 li(at, rt);
954 if (kArchVariant != kMips64r6) {
955 div(rs, at);
956 mflo(res);
957 } else {
958 div(res, rs, at);
959 }
960 }
961}
962
963
964void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
965 if (rt.is_reg()) {
966 if (kArchVariant != kMips64r6) {
967 div(rs, rt.rm());
968 mfhi(rd);
969 } else {
970 mod(rd, rs, rt.rm());
971 }
972 } else {
973 // li handles the relocation.
974 DCHECK(!rs.is(at));
975 li(at, rt);
976 if (kArchVariant != kMips64r6) {
977 div(rs, at);
978 mfhi(rd);
979 } else {
980 mod(rd, rs, at);
981 }
982 }
983}
984
985
986void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
987 if (rt.is_reg()) {
988 if (kArchVariant != kMips64r6) {
989 divu(rs, rt.rm());
990 mfhi(rd);
991 } else {
992 modu(rd, rs, rt.rm());
993 }
994 } else {
995 // li handles the relocation.
996 DCHECK(!rs.is(at));
997 li(at, rt);
998 if (kArchVariant != kMips64r6) {
999 divu(rs, at);
1000 mfhi(rd);
1001 } else {
1002 modu(rd, rs, at);
1003 }
1004 }
1005}
1006
1007
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001008void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
1009 if (rt.is_reg()) {
1010 ddiv(rs, rt.rm());
1011 } else {
1012 // li handles the relocation.
1013 DCHECK(!rs.is(at));
1014 li(at, rt);
1015 ddiv(rs, at);
1016 }
1017}
1018
1019
1020void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
1021 if (kArchVariant != kMips64r6) {
1022 if (rt.is_reg()) {
1023 ddiv(rs, rt.rm());
1024 mflo(rd);
1025 } else {
1026 // li handles the relocation.
1027 DCHECK(!rs.is(at));
1028 li(at, rt);
1029 ddiv(rs, at);
1030 mflo(rd);
1031 }
1032 } else {
1033 if (rt.is_reg()) {
1034 ddiv(rd, rs, rt.rm());
1035 } else {
1036 // li handles the relocation.
1037 DCHECK(!rs.is(at));
1038 li(at, rt);
1039 ddiv(rd, rs, at);
1040 }
1041 }
1042}
1043
1044
1045void MacroAssembler::Divu(Register rs, const Operand& rt) {
1046 if (rt.is_reg()) {
1047 divu(rs, rt.rm());
1048 } else {
1049 // li handles the relocation.
1050 DCHECK(!rs.is(at));
1051 li(at, rt);
1052 divu(rs, at);
1053 }
1054}
1055
1056
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001057void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
1058 if (rt.is_reg()) {
1059 if (kArchVariant != kMips64r6) {
1060 divu(rs, rt.rm());
1061 mflo(res);
1062 } else {
1063 divu(res, rs, rt.rm());
1064 }
1065 } else {
1066 // li handles the relocation.
1067 DCHECK(!rs.is(at));
1068 li(at, rt);
1069 if (kArchVariant != kMips64r6) {
1070 divu(rs, at);
1071 mflo(res);
1072 } else {
1073 divu(res, rs, at);
1074 }
1075 }
1076}
1077
1078
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001079void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
1080 if (rt.is_reg()) {
1081 ddivu(rs, rt.rm());
1082 } else {
1083 // li handles the relocation.
1084 DCHECK(!rs.is(at));
1085 li(at, rt);
1086 ddivu(rs, at);
1087 }
1088}
1089
1090
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001091void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
1092 if (rt.is_reg()) {
1093 if (kArchVariant != kMips64r6) {
1094 ddivu(rs, rt.rm());
1095 mflo(res);
1096 } else {
1097 ddivu(res, rs, rt.rm());
1098 }
1099 } else {
1100 // li handles the relocation.
1101 DCHECK(!rs.is(at));
1102 li(at, rt);
1103 if (kArchVariant != kMips64r6) {
1104 ddivu(rs, at);
1105 mflo(res);
1106 } else {
1107 ddivu(res, rs, at);
1108 }
1109 }
1110}
1111
1112
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001113void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
1114 if (kArchVariant != kMips64r6) {
1115 if (rt.is_reg()) {
1116 ddiv(rs, rt.rm());
1117 mfhi(rd);
1118 } else {
1119 // li handles the relocation.
1120 DCHECK(!rs.is(at));
1121 li(at, rt);
1122 ddiv(rs, at);
1123 mfhi(rd);
1124 }
1125 } else {
1126 if (rt.is_reg()) {
1127 dmod(rd, rs, rt.rm());
1128 } else {
1129 // li handles the relocation.
1130 DCHECK(!rs.is(at));
1131 li(at, rt);
1132 dmod(rd, rs, at);
1133 }
1134 }
1135}
1136
1137
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001138void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
1139 if (kArchVariant != kMips64r6) {
1140 if (rt.is_reg()) {
1141 ddivu(rs, rt.rm());
1142 mfhi(rd);
1143 } else {
1144 // li handles the relocation.
1145 DCHECK(!rs.is(at));
1146 li(at, rt);
1147 ddivu(rs, at);
1148 mfhi(rd);
1149 }
1150 } else {
1151 if (rt.is_reg()) {
1152 dmodu(rd, rs, rt.rm());
1153 } else {
1154 // li handles the relocation.
1155 DCHECK(!rs.is(at));
1156 li(at, rt);
1157 dmodu(rd, rs, at);
1158 }
1159 }
1160}
1161
1162
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001163void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1164 if (rt.is_reg()) {
1165 and_(rd, rs, rt.rm());
1166 } else {
1167 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001168 andi(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001169 } else {
1170 // li handles the relocation.
1171 DCHECK(!rs.is(at));
1172 li(at, rt);
1173 and_(rd, rs, at);
1174 }
1175 }
1176}
1177
1178
1179void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1180 if (rt.is_reg()) {
1181 or_(rd, rs, rt.rm());
1182 } else {
1183 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001184 ori(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001185 } else {
1186 // li handles the relocation.
1187 DCHECK(!rs.is(at));
1188 li(at, rt);
1189 or_(rd, rs, at);
1190 }
1191 }
1192}
1193
1194
1195void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1196 if (rt.is_reg()) {
1197 xor_(rd, rs, rt.rm());
1198 } else {
1199 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001200 xori(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001201 } else {
1202 // li handles the relocation.
1203 DCHECK(!rs.is(at));
1204 li(at, rt);
1205 xor_(rd, rs, at);
1206 }
1207 }
1208}
1209
1210
1211void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1212 if (rt.is_reg()) {
1213 nor(rd, rs, rt.rm());
1214 } else {
1215 // li handles the relocation.
1216 DCHECK(!rs.is(at));
1217 li(at, rt);
1218 nor(rd, rs, at);
1219 }
1220}
1221
1222
1223void MacroAssembler::Neg(Register rs, const Operand& rt) {
1224 DCHECK(rt.is_reg());
1225 DCHECK(!at.is(rs));
1226 DCHECK(!at.is(rt.rm()));
1227 li(at, -1);
1228 xor_(rs, rt.rm(), at);
1229}
1230
1231
1232void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1233 if (rt.is_reg()) {
1234 slt(rd, rs, rt.rm());
1235 } else {
1236 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001237 slti(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001238 } else {
1239 // li handles the relocation.
1240 DCHECK(!rs.is(at));
1241 li(at, rt);
1242 slt(rd, rs, at);
1243 }
1244 }
1245}
1246
1247
1248void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1249 if (rt.is_reg()) {
1250 sltu(rd, rs, rt.rm());
1251 } else {
1252 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001253 sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001254 } else {
1255 // li handles the relocation.
1256 DCHECK(!rs.is(at));
1257 li(at, rt);
1258 sltu(rd, rs, at);
1259 }
1260 }
1261}
1262
1263
1264void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001265 if (rt.is_reg()) {
1266 rotrv(rd, rs, rt.rm());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001267 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01001268 int64_t ror_value = rt.imm64_ % 32;
1269 if (ror_value < 0) {
1270 ror_value += 32;
1271 }
1272 rotr(rd, rs, ror_value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001273 }
1274}
1275
1276
1277void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1278 if (rt.is_reg()) {
1279 drotrv(rd, rs, rt.rm());
1280 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01001281 int64_t dror_value = rt.imm64_ % 64;
1282 if (dror_value < 0) dror_value += 64;
1283 if (dror_value <= 31) {
1284 drotr(rd, rs, dror_value);
1285 } else {
1286 drotr32(rd, rs, dror_value - 32);
1287 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001288 }
1289}
1290
1291
1292void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1293 pref(hint, rs);
1294}
1295
1296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001297void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1298 Register scratch) {
Ben Murdochda12d292016-06-02 14:46:10 +01001299 DCHECK(sa >= 1 && sa <= 31);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001300 if (kArchVariant == kMips64r6 && sa <= 4) {
Ben Murdochda12d292016-06-02 14:46:10 +01001301 lsa(rd, rt, rs, sa - 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001302 } else {
1303 Register tmp = rd.is(rt) ? scratch : rd;
1304 DCHECK(!tmp.is(rt));
1305 sll(tmp, rs, sa);
1306 Addu(rd, rt, tmp);
1307 }
1308}
1309
1310
1311void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
1312 Register scratch) {
Ben Murdochda12d292016-06-02 14:46:10 +01001313 DCHECK(sa >= 1 && sa <= 31);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001314 if (kArchVariant == kMips64r6 && sa <= 4) {
Ben Murdochda12d292016-06-02 14:46:10 +01001315 dlsa(rd, rt, rs, sa - 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001316 } else {
1317 Register tmp = rd.is(rt) ? scratch : rd;
1318 DCHECK(!tmp.is(rt));
1319 dsll(tmp, rs, sa);
1320 Daddu(rd, rt, tmp);
1321 }
1322}
1323
1324
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001325// ------------Pseudo-instructions-------------
1326
1327void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01001328 DCHECK(!rd.is(at));
1329 DCHECK(!rs.rm().is(at));
1330 if (kArchVariant == kMips64r6) {
1331 lw(rd, rs);
1332 } else {
1333 DCHECK(kArchVariant == kMips64r2);
1334 if (is_int16(rs.offset() + kMipsLwrOffset) &&
1335 is_int16(rs.offset() + kMipsLwlOffset)) {
1336 if (!rd.is(rs.rm())) {
1337 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1338 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1339 } else {
1340 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1341 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1342 mov(rd, at);
1343 }
1344 } else { // Offset > 16 bits, use multiple instructions to load.
1345 LoadRegPlusOffsetToAt(rs);
1346 lwr(rd, MemOperand(at, kMipsLwrOffset));
1347 lwl(rd, MemOperand(at, kMipsLwlOffset));
1348 }
1349 }
1350}
1351
1352void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
1353 if (kArchVariant == kMips64r6) {
1354 lwu(rd, rs);
1355 } else {
1356 DCHECK(kArchVariant == kMips64r2);
1357 Ulw(rd, rs);
1358 Dext(rd, rd, 0, 32);
1359 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001360}
1361
1362
1363void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01001364 DCHECK(!rd.is(at));
1365 DCHECK(!rs.rm().is(at));
1366 if (kArchVariant == kMips64r6) {
1367 sw(rd, rs);
1368 } else {
1369 DCHECK(kArchVariant == kMips64r2);
1370 if (is_int16(rs.offset() + kMipsSwrOffset) &&
1371 is_int16(rs.offset() + kMipsSwlOffset)) {
1372 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1373 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1374 } else {
1375 LoadRegPlusOffsetToAt(rs);
1376 swr(rd, MemOperand(at, kMipsSwrOffset));
1377 swl(rd, MemOperand(at, kMipsSwlOffset));
1378 }
1379 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001380}
1381
Ben Murdochc5610432016-08-08 18:44:38 +01001382void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1383 DCHECK(!rd.is(at));
1384 DCHECK(!rs.rm().is(at));
1385 if (kArchVariant == kMips64r6) {
1386 lh(rd, rs);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001387 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01001388 DCHECK(kArchVariant == kMips64r2);
1389 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1390#if defined(V8_TARGET_LITTLE_ENDIAN)
1391 lbu(at, rs);
1392 lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1393#elif defined(V8_TARGET_BIG_ENDIAN)
1394 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1395 lb(rd, rs);
1396#endif
1397 } else { // Offset > 16 bits, use multiple instructions to load.
1398 LoadRegPlusOffsetToAt(rs);
1399#if defined(V8_TARGET_LITTLE_ENDIAN)
1400 lb(rd, MemOperand(at, 1));
1401 lbu(at, MemOperand(at, 0));
1402#elif defined(V8_TARGET_BIG_ENDIAN)
1403 lb(rd, MemOperand(at, 0));
1404 lbu(at, MemOperand(at, 1));
1405#endif
1406 }
1407 dsll(rd, rd, 8);
1408 or_(rd, rd, at);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001409 }
Ben Murdochc5610432016-08-08 18:44:38 +01001410}
1411
1412void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1413 DCHECK(!rd.is(at));
1414 DCHECK(!rs.rm().is(at));
1415 if (kArchVariant == kMips64r6) {
1416 lhu(rd, rs);
1417 } else {
1418 DCHECK(kArchVariant == kMips64r2);
1419 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1420#if defined(V8_TARGET_LITTLE_ENDIAN)
1421 lbu(at, rs);
1422 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1423#elif defined(V8_TARGET_BIG_ENDIAN)
1424 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1425 lbu(rd, rs);
1426#endif
1427 } else { // Offset > 16 bits, use multiple instructions to load.
1428 LoadRegPlusOffsetToAt(rs);
1429#if defined(V8_TARGET_LITTLE_ENDIAN)
1430 lbu(rd, MemOperand(at, 1));
1431 lbu(at, MemOperand(at, 0));
1432#elif defined(V8_TARGET_BIG_ENDIAN)
1433 lbu(rd, MemOperand(at, 0));
1434 lbu(at, MemOperand(at, 1));
1435#endif
1436 }
1437 dsll(rd, rd, 8);
1438 or_(rd, rd, at);
1439 }
1440}
1441
1442void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1443 DCHECK(!rd.is(at));
1444 DCHECK(!rs.rm().is(at));
1445 DCHECK(!rs.rm().is(scratch));
1446 DCHECK(!scratch.is(at));
1447 if (kArchVariant == kMips64r6) {
1448 sh(rd, rs);
1449 } else {
1450 DCHECK(kArchVariant == kMips64r2);
1451 MemOperand source = rs;
1452 // If offset > 16 bits, load address to at with offset 0.
1453 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1454 LoadRegPlusOffsetToAt(rs);
1455 source = MemOperand(at, 0);
1456 }
1457
1458 if (!scratch.is(rd)) {
1459 mov(scratch, rd);
1460 }
1461
1462#if defined(V8_TARGET_LITTLE_ENDIAN)
1463 sb(scratch, source);
1464 srl(scratch, scratch, 8);
1465 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1466#elif defined(V8_TARGET_BIG_ENDIAN)
1467 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1468 srl(scratch, scratch, 8);
1469 sb(scratch, source);
1470#endif
1471 }
1472}
1473
1474void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
1475 DCHECK(!rd.is(at));
1476 DCHECK(!rs.rm().is(at));
1477 if (kArchVariant == kMips64r6) {
1478 ld(rd, rs);
1479 } else {
1480 DCHECK(kArchVariant == kMips64r2);
1481 if (is_int16(rs.offset() + kMipsLdrOffset) &&
1482 is_int16(rs.offset() + kMipsLdlOffset)) {
1483 if (!rd.is(rs.rm())) {
1484 ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1485 ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1486 } else {
1487 ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1488 ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1489 mov(rd, at);
1490 }
1491 } else { // Offset > 16 bits, use multiple instructions to load.
1492 LoadRegPlusOffsetToAt(rs);
1493 ldr(rd, MemOperand(at, kMipsLdrOffset));
1494 ldl(rd, MemOperand(at, kMipsLdlOffset));
1495 }
1496 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001497}
1498
1499
1500// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1501// bits,
1502// second word in high bits.
1503void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1504 Register scratch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001505 lwu(rd, rs);
1506 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1507 dsll32(scratch, scratch, 0);
1508 Daddu(rd, rd, scratch);
1509}
1510
Ben Murdochc5610432016-08-08 18:44:38 +01001511void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
1512 DCHECK(!rd.is(at));
1513 DCHECK(!rs.rm().is(at));
1514 if (kArchVariant == kMips64r6) {
1515 sd(rd, rs);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001516 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01001517 DCHECK(kArchVariant == kMips64r2);
1518 if (is_int16(rs.offset() + kMipsSdrOffset) &&
1519 is_int16(rs.offset() + kMipsSdlOffset)) {
1520 sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
1521 sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
1522 } else {
1523 LoadRegPlusOffsetToAt(rs);
1524 sdr(rd, MemOperand(at, kMipsSdrOffset));
1525 sdl(rd, MemOperand(at, kMipsSdlOffset));
1526 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001527 }
1528}
1529
1530
1531// Do 64-bit store as two consequent 32-bit stores to unaligned address.
1532void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1533 Register scratch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001534 sw(rd, rs);
1535 dsrl32(scratch, rd, 0);
1536 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1537}
1538
Ben Murdochc5610432016-08-08 18:44:38 +01001539void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1540 Register scratch) {
1541 if (kArchVariant == kMips64r6) {
1542 lwc1(fd, rs);
1543 } else {
1544 DCHECK(kArchVariant == kMips64r2);
1545 Ulw(scratch, rs);
1546 mtc1(scratch, fd);
1547 }
1548}
1549
1550void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1551 Register scratch) {
1552 if (kArchVariant == kMips64r6) {
1553 swc1(fd, rs);
1554 } else {
1555 DCHECK(kArchVariant == kMips64r2);
1556 mfc1(scratch, fd);
1557 Usw(scratch, rs);
1558 }
1559}
1560
1561void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1562 Register scratch) {
1563 DCHECK(!scratch.is(at));
1564 if (kArchVariant == kMips64r6) {
1565 ldc1(fd, rs);
1566 } else {
1567 DCHECK(kArchVariant == kMips64r2);
1568 Uld(scratch, rs);
1569 dmtc1(scratch, fd);
1570 }
1571}
1572
1573void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1574 Register scratch) {
1575 DCHECK(!scratch.is(at));
1576 if (kArchVariant == kMips64r6) {
1577 sdc1(fd, rs);
1578 } else {
1579 DCHECK(kArchVariant == kMips64r2);
1580 dmfc1(scratch, fd);
1581 Usd(scratch, rs);
1582 }
1583}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001584
1585void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1586 AllowDeferredHandleDereference smi_check;
1587 if (value->IsSmi()) {
1588 li(dst, Operand(value), mode);
1589 } else {
1590 DCHECK(value->IsHeapObject());
1591 if (isolate()->heap()->InNewSpace(*value)) {
1592 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1593 li(dst, Operand(cell));
1594 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1595 } else {
1596 li(dst, Operand(value));
1597 }
1598 }
1599}
1600
Ben Murdoch097c5b22016-05-18 11:27:45 +01001601static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
1602 if ((imm >> (bitnum - 1)) & 0x1) {
1603 imm = (imm >> bitnum) + 1;
1604 } else {
1605 imm = imm >> bitnum;
1606 }
1607 return imm;
1608}
1609
1610bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1611 bool higher_bits_sign_extended = false;
1612 if (is_int16(j.imm64_)) {
1613 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1614 } else if (!(j.imm64_ & kHiMask)) {
1615 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1616 } else if (!(j.imm64_ & kImm16Mask)) {
1617 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1618 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1619 higher_bits_sign_extended = true;
1620 }
1621 } else {
1622 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1623 ori(rd, rd, (j.imm64_ & kImm16Mask));
1624 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1625 higher_bits_sign_extended = true;
1626 }
1627 }
1628 return higher_bits_sign_extended;
1629}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001630
1631void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1632 DCHECK(!j.is_reg());
1633 BlockTrampolinePoolScope block_trampoline_pool(this);
1634 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1635 // Normal load of an immediate value which does not need Relocation Info.
1636 if (is_int32(j.imm64_)) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001637 LiLower32BitHelper(rd, j);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001638 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001639 if (kArchVariant == kMips64r6) {
1640 int64_t imm = j.imm64_;
1641 bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
1642 imm = ShiftAndFixSignExtension(imm, 32);
1643 // If LUI writes 1s to higher bits, we need both DAHI/DATI.
1644 if ((imm & kImm16Mask) ||
1645 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1646 dahi(rd, imm & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001647 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001648 imm = ShiftAndFixSignExtension(imm, 16);
1649 if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
1650 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1651 dati(rd, imm & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001652 }
1653 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001654 if (is_int48(j.imm64_)) {
1655 if ((j.imm64_ >> 32) & kImm16Mask) {
1656 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1657 if ((j.imm64_ >> 16) & kImm16Mask) {
1658 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1659 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001660 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001661 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
1662 }
1663 dsll(rd, rd, 16);
1664 if (j.imm64_ & kImm16Mask) {
1665 ori(rd, rd, j.imm64_ & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001666 }
1667 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001668 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1669 if ((j.imm64_ >> 32) & kImm16Mask) {
1670 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1671 }
1672 if ((j.imm64_ >> 16) & kImm16Mask) {
1673 dsll(rd, rd, 16);
1674 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1675 if (j.imm64_ & kImm16Mask) {
1676 dsll(rd, rd, 16);
1677 ori(rd, rd, j.imm64_ & kImm16Mask);
1678 } else {
1679 dsll(rd, rd, 16);
1680 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001681 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001682 if (j.imm64_ & kImm16Mask) {
1683 dsll32(rd, rd, 0);
1684 ori(rd, rd, j.imm64_ & kImm16Mask);
1685 } else {
1686 dsll32(rd, rd, 0);
1687 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001688 }
1689 }
1690 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001691 }
1692 } else if (MustUseReg(j.rmode_)) {
1693 RecordRelocInfo(j.rmode_, j.imm64_);
1694 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1695 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1696 dsll(rd, rd, 16);
1697 ori(rd, rd, j.imm64_ & kImm16Mask);
1698 } else if (mode == ADDRESS_LOAD) {
1699 // We always need the same number of instructions as we may need to patch
1700 // this code to load another value which may need all 4 instructions.
1701 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1702 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1703 dsll(rd, rd, 16);
1704 ori(rd, rd, j.imm64_ & kImm16Mask);
1705 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001706 if (kArchVariant == kMips64r6) {
1707 int64_t imm = j.imm64_;
1708 lui(rd, (imm >> kLuiShift) & kImm16Mask);
1709 if (imm & kImm16Mask) {
1710 ori(rd, rd, (imm & kImm16Mask));
1711 }
1712 if ((imm >> 31) & 0x1) {
1713 imm = (imm >> 32) + 1;
1714 } else {
1715 imm = imm >> 32;
1716 }
1717 dahi(rd, imm & kImm16Mask);
1718 if ((imm >> 15) & 0x1) {
1719 imm = (imm >> 16) + 1;
1720 } else {
1721 imm = imm >> 16;
1722 }
1723 dati(rd, imm & kImm16Mask);
1724 } else {
1725 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1726 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1727 dsll(rd, rd, 16);
1728 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1729 dsll(rd, rd, 16);
1730 ori(rd, rd, j.imm64_ & kImm16Mask);
1731 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001732 }
1733}
1734
1735
1736void MacroAssembler::MultiPush(RegList regs) {
1737 int16_t num_to_push = NumberOfBitsSet(regs);
1738 int16_t stack_offset = num_to_push * kPointerSize;
1739
1740 Dsubu(sp, sp, Operand(stack_offset));
1741 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1742 if ((regs & (1 << i)) != 0) {
1743 stack_offset -= kPointerSize;
1744 sd(ToRegister(i), MemOperand(sp, stack_offset));
1745 }
1746 }
1747}
1748
1749
1750void MacroAssembler::MultiPushReversed(RegList regs) {
1751 int16_t num_to_push = NumberOfBitsSet(regs);
1752 int16_t stack_offset = num_to_push * kPointerSize;
1753
1754 Dsubu(sp, sp, Operand(stack_offset));
1755 for (int16_t i = 0; i < kNumRegisters; i++) {
1756 if ((regs & (1 << i)) != 0) {
1757 stack_offset -= kPointerSize;
1758 sd(ToRegister(i), MemOperand(sp, stack_offset));
1759 }
1760 }
1761}
1762
1763
1764void MacroAssembler::MultiPop(RegList regs) {
1765 int16_t stack_offset = 0;
1766
1767 for (int16_t i = 0; i < kNumRegisters; i++) {
1768 if ((regs & (1 << i)) != 0) {
1769 ld(ToRegister(i), MemOperand(sp, stack_offset));
1770 stack_offset += kPointerSize;
1771 }
1772 }
1773 daddiu(sp, sp, stack_offset);
1774}
1775
1776
1777void MacroAssembler::MultiPopReversed(RegList regs) {
1778 int16_t stack_offset = 0;
1779
1780 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1781 if ((regs & (1 << i)) != 0) {
1782 ld(ToRegister(i), MemOperand(sp, stack_offset));
1783 stack_offset += kPointerSize;
1784 }
1785 }
1786 daddiu(sp, sp, stack_offset);
1787}
1788
1789
1790void MacroAssembler::MultiPushFPU(RegList regs) {
1791 int16_t num_to_push = NumberOfBitsSet(regs);
1792 int16_t stack_offset = num_to_push * kDoubleSize;
1793
1794 Dsubu(sp, sp, Operand(stack_offset));
1795 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1796 if ((regs & (1 << i)) != 0) {
1797 stack_offset -= kDoubleSize;
1798 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1799 }
1800 }
1801}
1802
1803
1804void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1805 int16_t num_to_push = NumberOfBitsSet(regs);
1806 int16_t stack_offset = num_to_push * kDoubleSize;
1807
1808 Dsubu(sp, sp, Operand(stack_offset));
1809 for (int16_t i = 0; i < kNumRegisters; i++) {
1810 if ((regs & (1 << i)) != 0) {
1811 stack_offset -= kDoubleSize;
1812 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1813 }
1814 }
1815}
1816
1817
1818void MacroAssembler::MultiPopFPU(RegList regs) {
1819 int16_t stack_offset = 0;
1820
1821 for (int16_t i = 0; i < kNumRegisters; i++) {
1822 if ((regs & (1 << i)) != 0) {
1823 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1824 stack_offset += kDoubleSize;
1825 }
1826 }
1827 daddiu(sp, sp, stack_offset);
1828}
1829
1830
1831void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1832 int16_t stack_offset = 0;
1833
1834 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1835 if ((regs & (1 << i)) != 0) {
1836 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1837 stack_offset += kDoubleSize;
1838 }
1839 }
1840 daddiu(sp, sp, stack_offset);
1841}
1842
1843
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001844void MacroAssembler::Ext(Register rt,
1845 Register rs,
1846 uint16_t pos,
1847 uint16_t size) {
1848 DCHECK(pos < 32);
1849 DCHECK(pos + size < 33);
1850 ext_(rt, rs, pos, size);
1851}
1852
1853
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001854void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1855 uint16_t size) {
1856 DCHECK(pos < 32);
1857 DCHECK(pos + size < 33);
1858 dext_(rt, rs, pos, size);
1859}
1860
1861
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001862void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
1863 uint16_t size) {
1864 DCHECK(pos < 32);
1865 DCHECK(size <= 64);
1866 dextm(rt, rs, pos, size);
1867}
1868
1869
1870void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
1871 uint16_t size) {
1872 DCHECK(pos >= 32 && pos < 64);
1873 DCHECK(size < 33);
1874 dextu(rt, rs, pos, size);
1875}
1876
1877
1878void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
1879 uint16_t size) {
1880 DCHECK(pos < 32);
1881 DCHECK(pos + size <= 32);
1882 DCHECK(size != 0);
1883 dins_(rt, rs, pos, size);
1884}
1885
1886
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001887void MacroAssembler::Ins(Register rt,
1888 Register rs,
1889 uint16_t pos,
1890 uint16_t size) {
1891 DCHECK(pos < 32);
1892 DCHECK(pos + size <= 32);
1893 DCHECK(size != 0);
1894 ins_(rt, rs, pos, size);
1895}
1896
1897
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001898void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001899 // Move the data from fs to t8.
1900 mfc1(t8, fs);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001901 Cvt_d_uw(fd, t8);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001902}
1903
1904
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001905void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
1906 // Convert rs to a FP value in fd.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001907 DCHECK(!rs.is(t9));
1908 DCHECK(!rs.is(at));
1909
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001910 // Zero extend int32 in rs.
1911 Dext(t9, rs, 0, 32);
1912 dmtc1(t9, fd);
1913 cvt_d_l(fd, fd);
1914}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001915
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001916
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001917void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
1918 // Move the data from fs to t8.
1919 dmfc1(t8, fs);
1920 Cvt_d_ul(fd, t8);
1921}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001922
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001923
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001924void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
1925 // Convert rs to a FP value in fd.
1926
1927 DCHECK(!rs.is(t9));
1928 DCHECK(!rs.is(at));
1929
1930 Label msb_clear, conversion_done;
1931
1932 Branch(&msb_clear, ge, rs, Operand(zero_reg));
1933
1934 // Rs >= 2^63
1935 andi(t9, rs, 1);
1936 dsrl(rs, rs, 1);
1937 or_(t9, t9, rs);
1938 dmtc1(t9, fd);
1939 cvt_d_l(fd, fd);
1940 Branch(USE_DELAY_SLOT, &conversion_done);
1941 add_d(fd, fd, fd); // In delay slot.
1942
1943 bind(&msb_clear);
1944 // Rs < 2^63, we can do simple conversion.
1945 dmtc1(rs, fd);
1946 cvt_d_l(fd, fd);
1947
1948 bind(&conversion_done);
1949}
1950
Ben Murdoch097c5b22016-05-18 11:27:45 +01001951void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
1952 // Move the data from fs to t8.
1953 mfc1(t8, fs);
1954 Cvt_s_uw(fd, t8);
1955}
1956
1957void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
1958 // Convert rs to a FP value in fd.
1959 DCHECK(!rs.is(t9));
1960 DCHECK(!rs.is(at));
1961
1962 // Zero extend int32 in rs.
1963 Dext(t9, rs, 0, 32);
1964 dmtc1(t9, fd);
1965 cvt_s_l(fd, fd);
1966}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001967
1968void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
1969 // Move the data from fs to t8.
1970 dmfc1(t8, fs);
1971 Cvt_s_ul(fd, t8);
1972}
1973
1974
1975void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
1976 // Convert rs to a FP value in fd.
1977
1978 DCHECK(!rs.is(t9));
1979 DCHECK(!rs.is(at));
1980
1981 Label positive, conversion_done;
1982
1983 Branch(&positive, ge, rs, Operand(zero_reg));
1984
1985 // Rs >= 2^31.
1986 andi(t9, rs, 1);
1987 dsrl(rs, rs, 1);
1988 or_(t9, t9, rs);
1989 dmtc1(t9, fd);
1990 cvt_s_l(fd, fd);
1991 Branch(USE_DELAY_SLOT, &conversion_done);
1992 add_s(fd, fd, fd); // In delay slot.
1993
1994 bind(&positive);
1995 // Rs < 2^31, we can do simple conversion.
1996 dmtc1(rs, fd);
1997 cvt_s_l(fd, fd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001998
1999 bind(&conversion_done);
2000}
2001
2002
2003void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
2004 round_l_d(fd, fs);
2005}
2006
2007
2008void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
2009 floor_l_d(fd, fs);
2010}
2011
2012
2013void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
2014 ceil_l_d(fd, fs);
2015}
2016
2017
2018void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
2019 trunc_l_d(fd, fs);
2020}
2021
2022
2023void MacroAssembler::Trunc_l_ud(FPURegister fd,
2024 FPURegister fs,
2025 FPURegister scratch) {
2026 // Load to GPR.
2027 dmfc1(t8, fs);
2028 // Reset sign bit.
2029 li(at, 0x7fffffffffffffff);
2030 and_(t8, t8, at);
2031 dmtc1(t8, fs);
2032 trunc_l_d(fd, fs);
2033}
2034
2035
2036void MacroAssembler::Trunc_uw_d(FPURegister fd,
2037 FPURegister fs,
2038 FPURegister scratch) {
2039 Trunc_uw_d(fs, t8, scratch);
2040 mtc1(t8, fd);
2041}
2042
Ben Murdoch097c5b22016-05-18 11:27:45 +01002043void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
2044 FPURegister scratch) {
2045 Trunc_uw_s(fs, t8, scratch);
2046 mtc1(t8, fd);
2047}
2048
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002049void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
2050 FPURegister scratch, Register result) {
2051 Trunc_ul_d(fs, t8, scratch, result);
2052 dmtc1(t8, fd);
2053}
2054
2055
2056void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
2057 FPURegister scratch, Register result) {
2058 Trunc_ul_s(fs, t8, scratch, result);
2059 dmtc1(t8, fd);
2060}
2061
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002062
2063void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
2064 trunc_w_d(fd, fs);
2065}
2066
2067
2068void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
2069 round_w_d(fd, fs);
2070}
2071
2072
2073void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
2074 floor_w_d(fd, fs);
2075}
2076
2077
2078void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
2079 ceil_w_d(fd, fs);
2080}
2081
2082
2083void MacroAssembler::Trunc_uw_d(FPURegister fd,
2084 Register rs,
2085 FPURegister scratch) {
2086 DCHECK(!fd.is(scratch));
2087 DCHECK(!rs.is(at));
2088
2089 // Load 2^31 into scratch as its float representation.
2090 li(at, 0x41E00000);
2091 mtc1(zero_reg, scratch);
2092 mthc1(at, scratch);
2093 // Test if scratch > fd.
2094 // If fd < 2^31 we can convert it normally.
2095 Label simple_convert;
2096 BranchF(&simple_convert, NULL, lt, fd, scratch);
2097
2098 // First we subtract 2^31 from fd, then trunc it to rs
2099 // and add 2^31 to rs.
2100 sub_d(scratch, fd, scratch);
2101 trunc_w_d(scratch, scratch);
2102 mfc1(rs, scratch);
2103 Or(rs, rs, 1 << 31);
2104
2105 Label done;
2106 Branch(&done);
2107 // Simple conversion.
2108 bind(&simple_convert);
2109 trunc_w_d(scratch, fd);
2110 mfc1(rs, scratch);
2111
2112 bind(&done);
2113}
2114
Ben Murdoch097c5b22016-05-18 11:27:45 +01002115void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
2116 FPURegister scratch) {
2117 DCHECK(!fd.is(scratch));
2118 DCHECK(!rs.is(at));
2119
2120 // Load 2^31 into scratch as its float representation.
2121 li(at, 0x4F000000);
2122 mtc1(at, scratch);
2123 // Test if scratch > fd.
2124 // If fd < 2^31 we can convert it normally.
2125 Label simple_convert;
2126 BranchF32(&simple_convert, NULL, lt, fd, scratch);
2127
2128 // First we subtract 2^31 from fd, then trunc it to rs
2129 // and add 2^31 to rs.
2130 sub_s(scratch, fd, scratch);
2131 trunc_w_s(scratch, scratch);
2132 mfc1(rs, scratch);
2133 Or(rs, rs, 1 << 31);
2134
2135 Label done;
2136 Branch(&done);
2137 // Simple conversion.
2138 bind(&simple_convert);
2139 trunc_w_s(scratch, fd);
2140 mfc1(rs, scratch);
2141
2142 bind(&done);
2143}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002144
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002145void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
2146 FPURegister scratch, Register result) {
2147 DCHECK(!fd.is(scratch));
2148 DCHECK(!AreAliased(rs, result, at));
2149
2150 Label simple_convert, done, fail;
2151 if (result.is_valid()) {
2152 mov(result, zero_reg);
2153 Move(scratch, -1.0);
2154 // If fd =< -1 or unordered, then the conversion fails.
2155 BranchF(&fail, &fail, le, fd, scratch);
2156 }
2157
2158 // Load 2^63 into scratch as its double representation.
2159 li(at, 0x43e0000000000000);
2160 dmtc1(at, scratch);
2161
2162 // Test if scratch > fd.
2163 // If fd < 2^63 we can convert it normally.
2164 BranchF(&simple_convert, nullptr, lt, fd, scratch);
2165
2166 // First we subtract 2^63 from fd, then trunc it to rs
2167 // and add 2^63 to rs.
2168 sub_d(scratch, fd, scratch);
2169 trunc_l_d(scratch, scratch);
2170 dmfc1(rs, scratch);
2171 Or(rs, rs, Operand(1UL << 63));
2172 Branch(&done);
2173
2174 // Simple conversion.
2175 bind(&simple_convert);
2176 trunc_l_d(scratch, fd);
2177 dmfc1(rs, scratch);
2178
2179 bind(&done);
2180 if (result.is_valid()) {
2181 // Conversion is failed if the result is negative.
2182 addiu(at, zero_reg, -1);
2183 dsrl(at, at, 1); // Load 2^62.
2184 dmfc1(result, scratch);
2185 xor_(result, result, at);
2186 Slt(result, zero_reg, result);
2187 }
2188
2189 bind(&fail);
2190}
2191
2192
2193void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
2194 FPURegister scratch, Register result) {
2195 DCHECK(!fd.is(scratch));
2196 DCHECK(!AreAliased(rs, result, at));
2197
2198 Label simple_convert, done, fail;
2199 if (result.is_valid()) {
2200 mov(result, zero_reg);
2201 Move(scratch, -1.0f);
2202 // If fd =< -1 or unordered, then the conversion fails.
2203 BranchF32(&fail, &fail, le, fd, scratch);
2204 }
2205
2206 // Load 2^63 into scratch as its float representation.
2207 li(at, 0x5f000000);
2208 mtc1(at, scratch);
2209
2210 // Test if scratch > fd.
2211 // If fd < 2^63 we can convert it normally.
2212 BranchF32(&simple_convert, nullptr, lt, fd, scratch);
2213
2214 // First we subtract 2^63 from fd, then trunc it to rs
2215 // and add 2^63 to rs.
2216 sub_s(scratch, fd, scratch);
2217 trunc_l_s(scratch, scratch);
2218 dmfc1(rs, scratch);
2219 Or(rs, rs, Operand(1UL << 63));
2220 Branch(&done);
2221
2222 // Simple conversion.
2223 bind(&simple_convert);
2224 trunc_l_s(scratch, fd);
2225 dmfc1(rs, scratch);
2226
2227 bind(&done);
2228 if (result.is_valid()) {
2229 // Conversion is failed if the result is negative or unordered.
2230 addiu(at, zero_reg, -1);
2231 dsrl(at, at, 1); // Load 2^62.
2232 dmfc1(result, scratch);
2233 xor_(result, result, at);
2234 Slt(result, zero_reg, result);
2235 }
2236
2237 bind(&fail);
2238}
2239
2240
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002241void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2242 FPURegister ft, FPURegister scratch) {
2243 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
2244 madd_d(fd, fr, fs, ft);
2245 } else {
2246 // Can not change source regs's value.
2247 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2248 mul_d(scratch, fs, ft);
2249 add_d(fd, fr, scratch);
2250 }
2251}
2252
2253
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002254void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
2255 Label* nan, Condition cond, FPURegister cmp1,
2256 FPURegister cmp2, BranchDelaySlot bd) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002257 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002258 if (cond == al) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002259 Branch(bd, target);
2260 return;
2261 }
2262
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002263 if (kArchVariant == kMips64r6) {
2264 sizeField = sizeField == D ? L : W;
2265 }
2266
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002267 DCHECK(nan || target);
2268 // Check for unordered (NaN) cases.
2269 if (nan) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002270 bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002271 if (kArchVariant != kMips64r6) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002272 if (long_branch) {
2273 Label skip;
2274 c(UN, sizeField, cmp1, cmp2);
2275 bc1f(&skip);
2276 nop();
2277 BranchLong(nan, bd);
2278 bind(&skip);
2279 } else {
2280 c(UN, sizeField, cmp1, cmp2);
2281 bc1t(nan);
2282 if (bd == PROTECT) {
2283 nop();
2284 }
2285 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002286 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002287 // Use kDoubleCompareReg for comparison result. It has to be unavailable
2288 // to lithium
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002289 // register allocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002290 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2291 if (long_branch) {
2292 Label skip;
2293 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2294 bc1eqz(&skip, kDoubleCompareReg);
2295 nop();
2296 BranchLong(nan, bd);
2297 bind(&skip);
2298 } else {
2299 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2300 bc1nez(nan, kDoubleCompareReg);
2301 if (bd == PROTECT) {
2302 nop();
2303 }
2304 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002305 }
2306 }
2307
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002308 if (target) {
2309 bool long_branch =
2310 target->is_bound() ? is_near(target) : is_trampoline_emitted();
2311 if (long_branch) {
2312 Label skip;
2313 Condition neg_cond = NegateFpuCondition(cond);
2314 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
2315 BranchLong(target, bd);
2316 bind(&skip);
2317 } else {
2318 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
2319 }
2320 }
2321}
2322
2323
2324void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
2325 Condition cc, FPURegister cmp1,
2326 FPURegister cmp2, BranchDelaySlot bd) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002327 if (kArchVariant != kMips64r6) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002328 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002329 if (target) {
2330 // Here NaN cases were either handled by this function or are assumed to
2331 // have been handled by the caller.
2332 switch (cc) {
2333 case lt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002334 c(OLT, sizeField, cmp1, cmp2);
2335 bc1t(target);
2336 break;
2337 case ult:
2338 c(ULT, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002339 bc1t(target);
2340 break;
2341 case gt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002342 c(ULE, sizeField, cmp1, cmp2);
2343 bc1f(target);
2344 break;
2345 case ugt:
2346 c(OLE, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002347 bc1f(target);
2348 break;
2349 case ge:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002350 c(ULT, sizeField, cmp1, cmp2);
2351 bc1f(target);
2352 break;
2353 case uge:
2354 c(OLT, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002355 bc1f(target);
2356 break;
2357 case le:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002358 c(OLE, sizeField, cmp1, cmp2);
2359 bc1t(target);
2360 break;
2361 case ule:
2362 c(ULE, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002363 bc1t(target);
2364 break;
2365 case eq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002366 c(EQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002367 bc1t(target);
2368 break;
2369 case ueq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002370 c(UEQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002371 bc1t(target);
2372 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002373 case ne: // Unordered or not equal.
2374 c(EQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002375 bc1f(target);
2376 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002377 case ogl:
2378 c(UEQ, sizeField, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002379 bc1f(target);
2380 break;
2381 default:
2382 CHECK(0);
2383 }
2384 }
2385 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002386 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002387 if (target) {
2388 // Here NaN cases were either handled by this function or are assumed to
2389 // have been handled by the caller.
2390 // Unsigned conditions are treated as their signed counterpart.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002391 // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
2392 // 1) mode.
2393 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002394 switch (cc) {
2395 case lt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002396 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2397 bc1nez(target, kDoubleCompareReg);
2398 break;
2399 case ult:
2400 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2401 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002402 break;
2403 case gt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002404 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2405 bc1eqz(target, kDoubleCompareReg);
2406 break;
2407 case ugt:
2408 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2409 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002410 break;
2411 case ge:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002412 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2413 bc1eqz(target, kDoubleCompareReg);
2414 break;
2415 case uge:
2416 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2417 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002418 break;
2419 case le:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002420 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2421 bc1nez(target, kDoubleCompareReg);
2422 break;
2423 case ule:
2424 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2425 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002426 break;
2427 case eq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002428 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2429 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002430 break;
2431 case ueq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002432 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2433 bc1nez(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002434 break;
2435 case ne:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002436 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2437 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002438 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002439 case ogl:
2440 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2441 bc1eqz(target, kDoubleCompareReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002442 break;
2443 default:
2444 CHECK(0);
2445 }
2446 }
2447 }
2448
2449 if (bd == PROTECT) {
2450 nop();
2451 }
2452}
2453
2454
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002455void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2456 DCHECK(!src_low.is(at));
2457 mfhc1(at, dst);
2458 mtc1(src_low, dst);
2459 mthc1(at, dst);
2460}
2461
2462
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002463void MacroAssembler::Move(FPURegister dst, float imm) {
2464 li(at, Operand(bit_cast<int32_t>(imm)));
2465 mtc1(at, dst);
2466}
2467
2468
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002469void MacroAssembler::Move(FPURegister dst, double imm) {
2470 static const DoubleRepresentation minus_zero(-0.0);
2471 static const DoubleRepresentation zero(0.0);
2472 DoubleRepresentation value_rep(imm);
2473 // Handle special values first.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002474 if (value_rep == zero && has_double_zero_reg_set_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002475 mov_d(dst, kDoubleRegZero);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002476 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002477 neg_d(dst, kDoubleRegZero);
2478 } else {
2479 uint32_t lo, hi;
2480 DoubleAsTwoUInt32(imm, &lo, &hi);
2481 // Move the low part of the double into the lower bits of the corresponding
2482 // FPU register.
2483 if (lo != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002484 if (!(lo & kImm16Mask)) {
2485 lui(at, (lo >> kLuiShift) & kImm16Mask);
2486 mtc1(at, dst);
2487 } else if (!(lo & kHiMask)) {
2488 ori(at, zero_reg, lo & kImm16Mask);
2489 mtc1(at, dst);
2490 } else {
2491 lui(at, (lo >> kLuiShift) & kImm16Mask);
2492 ori(at, at, lo & kImm16Mask);
2493 mtc1(at, dst);
2494 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002495 } else {
2496 mtc1(zero_reg, dst);
2497 }
2498 // Move the high part of the double into the high bits of the corresponding
2499 // FPU register.
2500 if (hi != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002501 if (!(hi & kImm16Mask)) {
2502 lui(at, (hi >> kLuiShift) & kImm16Mask);
2503 mthc1(at, dst);
2504 } else if (!(hi & kHiMask)) {
2505 ori(at, zero_reg, hi & kImm16Mask);
2506 mthc1(at, dst);
2507 } else {
2508 lui(at, (hi >> kLuiShift) & kImm16Mask);
2509 ori(at, at, hi & kImm16Mask);
2510 mthc1(at, dst);
2511 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002512 } else {
2513 mthc1(zero_reg, dst);
2514 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002515 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002516 }
2517}
2518
2519
2520void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2521 if (kArchVariant == kMips64r6) {
2522 Label done;
2523 Branch(&done, ne, rt, Operand(zero_reg));
2524 mov(rd, rs);
2525 bind(&done);
2526 } else {
2527 movz(rd, rs, rt);
2528 }
2529}
2530
2531
2532void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2533 if (kArchVariant == kMips64r6) {
2534 Label done;
2535 Branch(&done, eq, rt, Operand(zero_reg));
2536 mov(rd, rs);
2537 bind(&done);
2538 } else {
2539 movn(rd, rs, rt);
2540 }
2541}
2542
2543
2544void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2545 movt(rd, rs, cc);
2546}
2547
2548
2549void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2550 movf(rd, rs, cc);
2551}
2552
Ben Murdochda12d292016-06-02 14:46:10 +01002553#define __ masm->
2554
2555static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2556 FPURegister src1, FPURegister src2, Label* equal) {
2557 if (src1.is(src2)) {
2558 __ Move(dst, src1);
2559 return true;
2560 }
2561
2562 Label other, compare_not_equal;
2563 FPURegister left, right;
2564 if (kind == MaxMinKind::kMin) {
2565 left = src1;
2566 right = src2;
2567 } else {
2568 left = src2;
2569 right = src1;
2570 }
2571
2572 __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
2573 // Left and right hand side are equal, check for -0 vs. +0.
2574 __ dmfc1(t8, src1);
2575 __ Branch(&other, eq, t8, Operand(0x8000000000000000));
2576 __ Move_d(dst, right);
2577 __ Branch(equal);
2578 __ bind(&other);
2579 __ Move_d(dst, left);
2580 __ Branch(equal);
2581 __ bind(&compare_not_equal);
2582 return false;
2583}
2584
2585static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2586 FPURegister src1, FPURegister src2, Label* equal) {
2587 if (src1.is(src2)) {
2588 __ Move(dst, src1);
2589 return true;
2590 }
2591
2592 Label other, compare_not_equal;
2593 FPURegister left, right;
2594 if (kind == MaxMinKind::kMin) {
2595 left = src1;
2596 right = src2;
2597 } else {
2598 left = src2;
2599 right = src1;
2600 }
2601
2602 __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
2603 // Left and right hand side are equal, check for -0 vs. +0.
2604 __ FmoveLow(t8, src1);
2605 __ dsll32(t8, t8, 0);
2606 __ Branch(&other, eq, t8, Operand(0x8000000000000000));
2607 __ Move_s(dst, right);
2608 __ Branch(equal);
2609 __ bind(&other);
2610 __ Move_s(dst, left);
2611 __ Branch(equal);
2612 __ bind(&compare_not_equal);
2613 return false;
2614}
2615
2616#undef __
2617
2618void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
2619 FPURegister src2, Label* nan) {
2620 if (nan) {
2621 BranchF64(nullptr, nan, eq, src1, src2);
2622 }
2623 if (kArchVariant >= kMips64r6) {
2624 min_d(dst, src1, src2);
2625 } else {
2626 Label skip;
2627 if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2628 if (dst.is(src1)) {
2629 BranchF64(&skip, nullptr, le, src1, src2);
2630 Move_d(dst, src2);
2631 } else if (dst.is(src2)) {
2632 BranchF64(&skip, nullptr, ge, src1, src2);
2633 Move_d(dst, src1);
2634 } else {
2635 Label right;
2636 BranchF64(&right, nullptr, gt, src1, src2);
2637 Move_d(dst, src1);
2638 Branch(&skip);
2639 bind(&right);
2640 Move_d(dst, src2);
2641 }
2642 }
2643 bind(&skip);
2644 }
2645}
2646
2647void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
2648 FPURegister src2, Label* nan) {
2649 if (nan) {
2650 BranchF64(nullptr, nan, eq, src1, src2);
2651 }
2652 if (kArchVariant >= kMips64r6) {
2653 max_d(dst, src1, src2);
2654 } else {
2655 Label skip;
2656 if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2657 if (dst.is(src1)) {
2658 BranchF64(&skip, nullptr, ge, src1, src2);
2659 Move_d(dst, src2);
2660 } else if (dst.is(src2)) {
2661 BranchF64(&skip, nullptr, le, src1, src2);
2662 Move_d(dst, src1);
2663 } else {
2664 Label right;
2665 BranchF64(&right, nullptr, lt, src1, src2);
2666 Move_d(dst, src1);
2667 Branch(&skip);
2668 bind(&right);
2669 Move_d(dst, src2);
2670 }
2671 }
2672 bind(&skip);
2673 }
2674}
2675
2676void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
2677 FPURegister src2, Label* nan) {
2678 if (nan) {
2679 BranchF32(nullptr, nan, eq, src1, src2);
2680 }
2681 if (kArchVariant >= kMips64r6) {
2682 min_s(dst, src1, src2);
2683 } else {
2684 Label skip;
2685 if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2686 if (dst.is(src1)) {
2687 BranchF32(&skip, nullptr, le, src1, src2);
2688 Move_s(dst, src2);
2689 } else if (dst.is(src2)) {
2690 BranchF32(&skip, nullptr, ge, src1, src2);
2691 Move_s(dst, src1);
2692 } else {
2693 Label right;
2694 BranchF32(&right, nullptr, gt, src1, src2);
2695 Move_s(dst, src1);
2696 Branch(&skip);
2697 bind(&right);
2698 Move_s(dst, src2);
2699 }
2700 }
2701 bind(&skip);
2702 }
2703}
2704
2705void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
2706 FPURegister src2, Label* nan) {
2707 if (nan) {
2708 BranchF32(nullptr, nan, eq, src1, src2);
2709 }
2710 if (kArchVariant >= kMips64r6) {
2711 max_s(dst, src1, src2);
2712 } else {
2713 Label skip;
2714 if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2715 if (dst.is(src1)) {
2716 BranchF32(&skip, nullptr, ge, src1, src2);
2717 Move_s(dst, src2);
2718 } else if (dst.is(src2)) {
2719 BranchF32(&skip, nullptr, le, src1, src2);
2720 Move_s(dst, src1);
2721 } else {
2722 Label right;
2723 BranchF32(&right, nullptr, lt, src1, src2);
2724 Move_s(dst, src1);
2725 Branch(&skip);
2726 bind(&right);
2727 Move_s(dst, src2);
2728 }
2729 }
2730 bind(&skip);
2731 }
2732}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002733
2734void MacroAssembler::Clz(Register rd, Register rs) {
2735 clz(rd, rs);
2736}
2737
2738
2739void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2740 Register result,
2741 DoubleRegister double_input,
2742 Register scratch,
2743 DoubleRegister double_scratch,
2744 Register except_flag,
2745 CheckForInexactConversion check_inexact) {
2746 DCHECK(!result.is(scratch));
2747 DCHECK(!double_input.is(double_scratch));
2748 DCHECK(!except_flag.is(scratch));
2749
2750 Label done;
2751
2752 // Clear the except flag (0 = no exception)
2753 mov(except_flag, zero_reg);
2754
2755 // Test for values that can be exactly represented as a signed 32-bit integer.
2756 cvt_w_d(double_scratch, double_input);
2757 mfc1(result, double_scratch);
2758 cvt_d_w(double_scratch, double_scratch);
2759 BranchF(&done, NULL, eq, double_input, double_scratch);
2760
2761 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
2762
2763 if (check_inexact == kDontCheckForInexactConversion) {
2764 // Ignore inexact exceptions.
2765 except_mask &= ~kFCSRInexactFlagMask;
2766 }
2767
2768 // Save FCSR.
2769 cfc1(scratch, FCSR);
2770 // Disable FPU exceptions.
2771 ctc1(zero_reg, FCSR);
2772
2773 // Do operation based on rounding mode.
2774 switch (rounding_mode) {
2775 case kRoundToNearest:
2776 Round_w_d(double_scratch, double_input);
2777 break;
2778 case kRoundToZero:
2779 Trunc_w_d(double_scratch, double_input);
2780 break;
2781 case kRoundToPlusInf:
2782 Ceil_w_d(double_scratch, double_input);
2783 break;
2784 case kRoundToMinusInf:
2785 Floor_w_d(double_scratch, double_input);
2786 break;
2787 } // End of switch-statement.
2788
2789 // Retrieve FCSR.
2790 cfc1(except_flag, FCSR);
2791 // Restore FCSR.
2792 ctc1(scratch, FCSR);
2793 // Move the converted value into the result register.
2794 mfc1(result, double_scratch);
2795
2796 // Check for fpu exceptions.
2797 And(except_flag, except_flag, Operand(except_mask));
2798
2799 bind(&done);
2800}
2801
2802
2803void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2804 DoubleRegister double_input,
2805 Label* done) {
2806 DoubleRegister single_scratch = kLithiumScratchDouble.low();
2807 Register scratch = at;
2808 Register scratch2 = t9;
2809
2810 // Clear cumulative exception flags and save the FCSR.
2811 cfc1(scratch2, FCSR);
2812 ctc1(zero_reg, FCSR);
2813 // Try a conversion to a signed integer.
2814 trunc_w_d(single_scratch, double_input);
2815 mfc1(result, single_scratch);
2816 // Retrieve and restore the FCSR.
2817 cfc1(scratch, FCSR);
2818 ctc1(scratch2, FCSR);
2819 // Check for overflow and NaNs.
2820 And(scratch,
2821 scratch,
2822 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2823 // If we had no exceptions we are done.
2824 Branch(done, eq, scratch, Operand(zero_reg));
2825}
2826
2827
2828void MacroAssembler::TruncateDoubleToI(Register result,
2829 DoubleRegister double_input) {
2830 Label done;
2831
2832 TryInlineTruncateDoubleToI(result, double_input, &done);
2833
2834 // If we fell through then inline version didn't succeed - call stub instead.
2835 push(ra);
2836 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2837 sdc1(double_input, MemOperand(sp, 0));
2838
2839 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2840 CallStub(&stub);
2841
2842 Daddu(sp, sp, Operand(kDoubleSize));
2843 pop(ra);
2844
2845 bind(&done);
2846}
2847
2848
2849void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2850 Label done;
2851 DoubleRegister double_scratch = f12;
2852 DCHECK(!result.is(object));
2853
2854 ldc1(double_scratch,
2855 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2856 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2857
2858 // If we fell through then inline version didn't succeed - call stub instead.
2859 push(ra);
2860 DoubleToIStub stub(isolate(),
2861 object,
2862 result,
2863 HeapNumber::kValueOffset - kHeapObjectTag,
2864 true,
2865 true);
2866 CallStub(&stub);
2867 pop(ra);
2868
2869 bind(&done);
2870}
2871
2872
2873void MacroAssembler::TruncateNumberToI(Register object,
2874 Register result,
2875 Register heap_number_map,
2876 Register scratch,
2877 Label* not_number) {
2878 Label done;
2879 DCHECK(!result.is(object));
2880
2881 UntagAndJumpIfSmi(result, object, &done);
2882 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2883 TruncateHeapNumberToI(result, object);
2884
2885 bind(&done);
2886}
2887
2888
2889void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2890 Register src,
2891 int num_least_bits) {
2892 // Ext(dst, src, kSmiTagSize, num_least_bits);
2893 SmiUntag(dst, src);
2894 And(dst, dst, Operand((1 << num_least_bits) - 1));
2895}
2896
2897
2898void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2899 Register src,
2900 int num_least_bits) {
2901 DCHECK(!src.is(dst));
2902 And(dst, src, Operand((1 << num_least_bits) - 1));
2903}
2904
2905
2906// Emulated condtional branches do not emit a nop in the branch delay slot.
2907//
2908// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2909#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
2910 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
2911 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2912
2913
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002914void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2915 DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002916 BranchShort(offset, bdslot);
2917}
2918
2919
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002920void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2921 const Operand& rt, BranchDelaySlot bdslot) {
2922 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2923 DCHECK(is_near);
2924 USE(is_near);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002925}
2926
2927
2928void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2929 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002930 if (is_near_branch(L)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002931 BranchShort(L, bdslot);
2932 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002933 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002934 }
2935 } else {
2936 if (is_trampoline_emitted()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002937 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002938 } else {
2939 BranchShort(L, bdslot);
2940 }
2941 }
2942}
2943
2944
2945void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2946 const Operand& rt,
2947 BranchDelaySlot bdslot) {
2948 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002949 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002950 if (cond != cc_always) {
2951 Label skip;
2952 Condition neg_cond = NegateCondition(cond);
2953 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002954 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002955 bind(&skip);
2956 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002957 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002958 }
2959 }
2960 } else {
2961 if (is_trampoline_emitted()) {
2962 if (cond != cc_always) {
2963 Label skip;
2964 Condition neg_cond = NegateCondition(cond);
2965 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002966 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002967 bind(&skip);
2968 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002969 BranchLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002970 }
2971 } else {
2972 BranchShort(L, cond, rs, rt, bdslot);
2973 }
2974 }
2975}
2976
2977
2978void MacroAssembler::Branch(Label* L,
2979 Condition cond,
2980 Register rs,
2981 Heap::RootListIndex index,
2982 BranchDelaySlot bdslot) {
2983 LoadRoot(at, index);
2984 Branch(L, cond, rs, Operand(at), bdslot);
2985}
2986
2987
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002988void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2989 BranchDelaySlot bdslot) {
2990 DCHECK(L == nullptr || offset == 0);
2991 offset = GetOffset(offset, L, OffsetSize::kOffset16);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002992 b(offset);
2993
2994 // Emit a nop in the branch delay slot if required.
2995 if (bdslot == PROTECT)
2996 nop();
2997}
2998
2999
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003000void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
3001 DCHECK(L == nullptr || offset == 0);
3002 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3003 bc(offset);
3004}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003005
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003006
3007void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
3008 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3009 DCHECK(is_int26(offset));
3010 BranchShortHelperR6(offset, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003011 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003012 DCHECK(is_int16(offset));
3013 BranchShortHelper(offset, nullptr, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003014 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003015}
3016
3017
3018void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003019 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3020 BranchShortHelperR6(0, L);
3021 } else {
3022 BranchShortHelper(0, L, bdslot);
3023 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003024}
3025
3026
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003027static inline bool IsZero(const Operand& rt) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003028 if (rt.is_reg()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003029 return rt.rm().is(zero_reg);
3030 } else {
3031 return rt.immediate() == 0;
3032 }
3033}
3034
3035
3036int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
3037 if (L) {
3038 offset = branch_offset_helper(L, bits) >> 2;
3039 } else {
3040 DCHECK(is_intn(offset, bits));
3041 }
3042 return offset;
3043}
3044
3045
3046Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
3047 Register scratch) {
3048 Register r2 = no_reg;
3049 if (rt.is_reg()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003050 r2 = rt.rm_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003051 } else {
3052 r2 = scratch;
3053 li(r2, rt);
3054 }
3055
3056 return r2;
3057}
3058
3059
3060bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
3061 Condition cond, Register rs,
3062 const Operand& rt) {
3063 DCHECK(L == nullptr || offset == 0);
3064 Register scratch = rs.is(at) ? t8 : at;
3065 OffsetSize bits = OffsetSize::kOffset16;
3066
3067 // Be careful to always use shifted_branch_offset only just before the
3068 // branch instruction, as the location will be remember for patching the
3069 // target.
3070 {
3071 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003072 switch (cond) {
3073 case cc_always:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003074 bits = OffsetSize::kOffset26;
3075 if (!is_near(L, bits)) return false;
3076 offset = GetOffset(offset, L, bits);
3077 bc(offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003078 break;
3079 case eq:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003080 if (rs.code() == rt.rm_.reg_code) {
3081 // Pre R6 beq is used here to make the code patchable. Otherwise bc
3082 // should be used which has no condition field so is not patchable.
3083 bits = OffsetSize::kOffset16;
3084 if (!is_near(L, bits)) return false;
3085 scratch = GetRtAsRegisterHelper(rt, scratch);
3086 offset = GetOffset(offset, L, bits);
3087 beq(rs, scratch, offset);
3088 nop();
3089 } else if (IsZero(rt)) {
3090 bits = OffsetSize::kOffset21;
3091 if (!is_near(L, bits)) return false;
3092 offset = GetOffset(offset, L, bits);
3093 beqzc(rs, offset);
3094 } else {
3095 // We don't want any other register but scratch clobbered.
3096 bits = OffsetSize::kOffset16;
3097 if (!is_near(L, bits)) return false;
3098 scratch = GetRtAsRegisterHelper(rt, scratch);
3099 offset = GetOffset(offset, L, bits);
3100 beqc(rs, scratch, offset);
3101 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003102 break;
3103 case ne:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003104 if (rs.code() == rt.rm_.reg_code) {
3105 // Pre R6 bne is used here to make the code patchable. Otherwise we
3106 // should not generate any instruction.
3107 bits = OffsetSize::kOffset16;
3108 if (!is_near(L, bits)) return false;
3109 scratch = GetRtAsRegisterHelper(rt, scratch);
3110 offset = GetOffset(offset, L, bits);
3111 bne(rs, scratch, offset);
3112 nop();
3113 } else if (IsZero(rt)) {
3114 bits = OffsetSize::kOffset21;
3115 if (!is_near(L, bits)) return false;
3116 offset = GetOffset(offset, L, bits);
3117 bnezc(rs, offset);
3118 } else {
3119 // We don't want any other register but scratch clobbered.
3120 bits = OffsetSize::kOffset16;
3121 if (!is_near(L, bits)) return false;
3122 scratch = GetRtAsRegisterHelper(rt, scratch);
3123 offset = GetOffset(offset, L, bits);
3124 bnec(rs, scratch, offset);
3125 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003126 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003127
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003128 // Signed comparison.
3129 case greater:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003130 // rs > rt
3131 if (rs.code() == rt.rm_.reg_code) {
3132 break; // No code needs to be emitted.
3133 } else if (rs.is(zero_reg)) {
3134 bits = OffsetSize::kOffset16;
3135 if (!is_near(L, bits)) return false;
3136 scratch = GetRtAsRegisterHelper(rt, scratch);
3137 offset = GetOffset(offset, L, bits);
3138 bltzc(scratch, offset);
3139 } else if (IsZero(rt)) {
3140 bits = OffsetSize::kOffset16;
3141 if (!is_near(L, bits)) return false;
3142 offset = GetOffset(offset, L, bits);
3143 bgtzc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003144 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003145 bits = OffsetSize::kOffset16;
3146 if (!is_near(L, bits)) return false;
3147 scratch = GetRtAsRegisterHelper(rt, scratch);
3148 DCHECK(!rs.is(scratch));
3149 offset = GetOffset(offset, L, bits);
3150 bltc(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003151 }
3152 break;
3153 case greater_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003154 // rs >= rt
3155 if (rs.code() == rt.rm_.reg_code) {
3156 bits = OffsetSize::kOffset26;
3157 if (!is_near(L, bits)) return false;
3158 offset = GetOffset(offset, L, bits);
3159 bc(offset);
3160 } else if (rs.is(zero_reg)) {
3161 bits = OffsetSize::kOffset16;
3162 if (!is_near(L, bits)) return false;
3163 scratch = GetRtAsRegisterHelper(rt, scratch);
3164 offset = GetOffset(offset, L, bits);
3165 blezc(scratch, offset);
3166 } else if (IsZero(rt)) {
3167 bits = OffsetSize::kOffset16;
3168 if (!is_near(L, bits)) return false;
3169 offset = GetOffset(offset, L, bits);
3170 bgezc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003171 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003172 bits = OffsetSize::kOffset16;
3173 if (!is_near(L, bits)) return false;
3174 scratch = GetRtAsRegisterHelper(rt, scratch);
3175 DCHECK(!rs.is(scratch));
3176 offset = GetOffset(offset, L, bits);
3177 bgec(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003178 }
3179 break;
3180 case less:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003181 // rs < rt
3182 if (rs.code() == rt.rm_.reg_code) {
3183 break; // No code needs to be emitted.
3184 } else if (rs.is(zero_reg)) {
3185 bits = OffsetSize::kOffset16;
3186 if (!is_near(L, bits)) return false;
3187 scratch = GetRtAsRegisterHelper(rt, scratch);
3188 offset = GetOffset(offset, L, bits);
3189 bgtzc(scratch, offset);
3190 } else if (IsZero(rt)) {
3191 bits = OffsetSize::kOffset16;
3192 if (!is_near(L, bits)) return false;
3193 offset = GetOffset(offset, L, bits);
3194 bltzc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003195 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003196 bits = OffsetSize::kOffset16;
3197 if (!is_near(L, bits)) return false;
3198 scratch = GetRtAsRegisterHelper(rt, scratch);
3199 DCHECK(!rs.is(scratch));
3200 offset = GetOffset(offset, L, bits);
3201 bltc(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003202 }
3203 break;
3204 case less_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003205 // rs <= rt
3206 if (rs.code() == rt.rm_.reg_code) {
3207 bits = OffsetSize::kOffset26;
3208 if (!is_near(L, bits)) return false;
3209 offset = GetOffset(offset, L, bits);
3210 bc(offset);
3211 } else if (rs.is(zero_reg)) {
3212 bits = OffsetSize::kOffset16;
3213 if (!is_near(L, bits)) return false;
3214 scratch = GetRtAsRegisterHelper(rt, scratch);
3215 offset = GetOffset(offset, L, bits);
3216 bgezc(scratch, offset);
3217 } else if (IsZero(rt)) {
3218 bits = OffsetSize::kOffset16;
3219 if (!is_near(L, bits)) return false;
3220 offset = GetOffset(offset, L, bits);
3221 blezc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003222 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003223 bits = OffsetSize::kOffset16;
3224 if (!is_near(L, bits)) return false;
3225 scratch = GetRtAsRegisterHelper(rt, scratch);
3226 DCHECK(!rs.is(scratch));
3227 offset = GetOffset(offset, L, bits);
3228 bgec(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003229 }
3230 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003231
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003232 // Unsigned comparison.
3233 case Ugreater:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003234 // rs > rt
3235 if (rs.code() == rt.rm_.reg_code) {
3236 break; // No code needs to be emitted.
3237 } else if (rs.is(zero_reg)) {
3238 bits = OffsetSize::kOffset21;
3239 if (!is_near(L, bits)) return false;
3240 scratch = GetRtAsRegisterHelper(rt, scratch);
3241 offset = GetOffset(offset, L, bits);
3242 bnezc(scratch, offset);
3243 } else if (IsZero(rt)) {
3244 bits = OffsetSize::kOffset21;
3245 if (!is_near(L, bits)) return false;
3246 offset = GetOffset(offset, L, bits);
3247 bnezc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003248 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003249 bits = OffsetSize::kOffset16;
3250 if (!is_near(L, bits)) return false;
3251 scratch = GetRtAsRegisterHelper(rt, scratch);
3252 DCHECK(!rs.is(scratch));
3253 offset = GetOffset(offset, L, bits);
3254 bltuc(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003255 }
3256 break;
3257 case Ugreater_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003258 // rs >= rt
3259 if (rs.code() == rt.rm_.reg_code) {
3260 bits = OffsetSize::kOffset26;
3261 if (!is_near(L, bits)) return false;
3262 offset = GetOffset(offset, L, bits);
3263 bc(offset);
3264 } else if (rs.is(zero_reg)) {
3265 bits = OffsetSize::kOffset21;
3266 if (!is_near(L, bits)) return false;
3267 scratch = GetRtAsRegisterHelper(rt, scratch);
3268 offset = GetOffset(offset, L, bits);
3269 beqzc(scratch, offset);
3270 } else if (IsZero(rt)) {
3271 bits = OffsetSize::kOffset26;
3272 if (!is_near(L, bits)) return false;
3273 offset = GetOffset(offset, L, bits);
3274 bc(offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003275 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003276 bits = OffsetSize::kOffset16;
3277 if (!is_near(L, bits)) return false;
3278 scratch = GetRtAsRegisterHelper(rt, scratch);
3279 DCHECK(!rs.is(scratch));
3280 offset = GetOffset(offset, L, bits);
3281 bgeuc(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003282 }
3283 break;
3284 case Uless:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003285 // rs < rt
3286 if (rs.code() == rt.rm_.reg_code) {
3287 break; // No code needs to be emitted.
3288 } else if (rs.is(zero_reg)) {
3289 bits = OffsetSize::kOffset21;
3290 if (!is_near(L, bits)) return false;
3291 scratch = GetRtAsRegisterHelper(rt, scratch);
3292 offset = GetOffset(offset, L, bits);
3293 bnezc(scratch, offset);
3294 } else if (IsZero(rt)) {
3295 break; // No code needs to be emitted.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003296 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003297 bits = OffsetSize::kOffset16;
3298 if (!is_near(L, bits)) return false;
3299 scratch = GetRtAsRegisterHelper(rt, scratch);
3300 DCHECK(!rs.is(scratch));
3301 offset = GetOffset(offset, L, bits);
3302 bltuc(rs, scratch, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003303 }
3304 break;
3305 case Uless_equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003306 // rs <= rt
3307 if (rs.code() == rt.rm_.reg_code) {
3308 bits = OffsetSize::kOffset26;
3309 if (!is_near(L, bits)) return false;
3310 offset = GetOffset(offset, L, bits);
3311 bc(offset);
3312 } else if (rs.is(zero_reg)) {
3313 bits = OffsetSize::kOffset26;
3314 if (!is_near(L, bits)) return false;
3315 scratch = GetRtAsRegisterHelper(rt, scratch);
3316 offset = GetOffset(offset, L, bits);
3317 bc(offset);
3318 } else if (IsZero(rt)) {
3319 bits = OffsetSize::kOffset21;
3320 if (!is_near(L, bits)) return false;
3321 offset = GetOffset(offset, L, bits);
3322 beqzc(rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003323 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003324 bits = OffsetSize::kOffset16;
3325 if (!is_near(L, bits)) return false;
3326 scratch = GetRtAsRegisterHelper(rt, scratch);
3327 DCHECK(!rs.is(scratch));
3328 offset = GetOffset(offset, L, bits);
3329 bgeuc(scratch, rs, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003330 }
3331 break;
3332 default:
3333 UNREACHABLE();
3334 }
3335 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003336 CheckTrampolinePoolQuick(1);
3337 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003338}
3339
3340
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003341bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3342 Register rs, const Operand& rt,
3343 BranchDelaySlot bdslot) {
3344 DCHECK(L == nullptr || offset == 0);
3345 if (!is_near(L, OffsetSize::kOffset16)) return false;
3346
3347 Register scratch = at;
3348 int32_t offset32;
3349
3350 // Be careful to always use shifted_branch_offset only just before the
3351 // branch instruction, as the location will be remember for patching the
3352 // target.
3353 {
3354 BlockTrampolinePoolScope block_trampoline_pool(this);
3355 switch (cond) {
3356 case cc_always:
3357 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3358 b(offset32);
3359 break;
3360 case eq:
3361 if (IsZero(rt)) {
3362 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3363 beq(rs, zero_reg, offset32);
3364 } else {
3365 // We don't want any other register but scratch clobbered.
3366 scratch = GetRtAsRegisterHelper(rt, scratch);
3367 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3368 beq(rs, scratch, offset32);
3369 }
3370 break;
3371 case ne:
3372 if (IsZero(rt)) {
3373 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3374 bne(rs, zero_reg, offset32);
3375 } else {
3376 // We don't want any other register but scratch clobbered.
3377 scratch = GetRtAsRegisterHelper(rt, scratch);
3378 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3379 bne(rs, scratch, offset32);
3380 }
3381 break;
3382
3383 // Signed comparison.
3384 case greater:
3385 if (IsZero(rt)) {
3386 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3387 bgtz(rs, offset32);
3388 } else {
3389 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3390 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3391 bne(scratch, zero_reg, offset32);
3392 }
3393 break;
3394 case greater_equal:
3395 if (IsZero(rt)) {
3396 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3397 bgez(rs, offset32);
3398 } else {
3399 Slt(scratch, rs, rt);
3400 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3401 beq(scratch, zero_reg, offset32);
3402 }
3403 break;
3404 case less:
3405 if (IsZero(rt)) {
3406 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3407 bltz(rs, offset32);
3408 } else {
3409 Slt(scratch, rs, rt);
3410 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3411 bne(scratch, zero_reg, offset32);
3412 }
3413 break;
3414 case less_equal:
3415 if (IsZero(rt)) {
3416 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3417 blez(rs, offset32);
3418 } else {
3419 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3420 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3421 beq(scratch, zero_reg, offset32);
3422 }
3423 break;
3424
3425 // Unsigned comparison.
3426 case Ugreater:
3427 if (IsZero(rt)) {
3428 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3429 bne(rs, zero_reg, offset32);
3430 } else {
3431 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3432 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3433 bne(scratch, zero_reg, offset32);
3434 }
3435 break;
3436 case Ugreater_equal:
3437 if (IsZero(rt)) {
3438 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3439 b(offset32);
3440 } else {
3441 Sltu(scratch, rs, rt);
3442 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3443 beq(scratch, zero_reg, offset32);
3444 }
3445 break;
3446 case Uless:
3447 if (IsZero(rt)) {
3448 return true; // No code needs to be emitted.
3449 } else {
3450 Sltu(scratch, rs, rt);
3451 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3452 bne(scratch, zero_reg, offset32);
3453 }
3454 break;
3455 case Uless_equal:
3456 if (IsZero(rt)) {
3457 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3458 beq(rs, zero_reg, offset32);
3459 } else {
3460 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3461 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3462 beq(scratch, zero_reg, offset32);
3463 }
3464 break;
3465 default:
3466 UNREACHABLE();
3467 }
3468 }
3469
3470 // Emit a nop in the branch delay slot if required.
3471 if (bdslot == PROTECT)
3472 nop();
3473
3474 return true;
3475}
3476
3477
3478bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3479 Register rs, const Operand& rt,
3480 BranchDelaySlot bdslot) {
3481 BRANCH_ARGS_CHECK(cond, rs, rt);
3482
3483 if (!L) {
3484 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3485 DCHECK(is_int26(offset));
3486 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3487 } else {
3488 DCHECK(is_int16(offset));
3489 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3490 }
3491 } else {
3492 DCHECK(offset == 0);
3493 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3494 return BranchShortHelperR6(0, L, cond, rs, rt);
3495 } else {
3496 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3497 }
3498 }
3499 return false;
3500}
3501
3502
3503void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3504 const Operand& rt, BranchDelaySlot bdslot) {
3505 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3506}
3507
3508
3509void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3510 const Operand& rt, BranchDelaySlot bdslot) {
3511 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3512}
3513
3514
3515void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003516 BranchAndLinkShort(offset, bdslot);
3517}
3518
3519
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003520void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3521 const Operand& rt, BranchDelaySlot bdslot) {
3522 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3523 DCHECK(is_near);
3524 USE(is_near);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003525}
3526
3527
3528void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3529 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003530 if (is_near_branch(L)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003531 BranchAndLinkShort(L, bdslot);
3532 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003533 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003534 }
3535 } else {
3536 if (is_trampoline_emitted()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003537 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003538 } else {
3539 BranchAndLinkShort(L, bdslot);
3540 }
3541 }
3542}
3543
3544
3545void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3546 const Operand& rt,
3547 BranchDelaySlot bdslot) {
3548 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003549 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003550 Label skip;
3551 Condition neg_cond = NegateCondition(cond);
3552 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003553 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003554 bind(&skip);
3555 }
3556 } else {
3557 if (is_trampoline_emitted()) {
3558 Label skip;
3559 Condition neg_cond = NegateCondition(cond);
3560 BranchShort(&skip, neg_cond, rs, rt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003561 BranchAndLinkLong(L, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003562 bind(&skip);
3563 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003564 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003565 }
3566 }
3567}
3568
3569
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003570void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3571 BranchDelaySlot bdslot) {
3572 DCHECK(L == nullptr || offset == 0);
3573 offset = GetOffset(offset, L, OffsetSize::kOffset16);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003574 bal(offset);
3575
3576 // Emit a nop in the branch delay slot if required.
3577 if (bdslot == PROTECT)
3578 nop();
3579}
3580
3581
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003582void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3583 DCHECK(L == nullptr || offset == 0);
3584 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3585 balc(offset);
3586}
3587
3588
3589void MacroAssembler::BranchAndLinkShort(int32_t offset,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003590 BranchDelaySlot bdslot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003591 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3592 DCHECK(is_int26(offset));
3593 BranchAndLinkShortHelperR6(offset, nullptr);
3594 } else {
3595 DCHECK(is_int16(offset));
3596 BranchAndLinkShortHelper(offset, nullptr, bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003597 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003598}
3599
3600
3601void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003602 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3603 BranchAndLinkShortHelperR6(0, L);
3604 } else {
3605 BranchAndLinkShortHelper(0, L, bdslot);
3606 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003607}
3608
3609
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003610bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3611 Condition cond, Register rs,
3612 const Operand& rt) {
3613 DCHECK(L == nullptr || offset == 0);
3614 Register scratch = rs.is(at) ? t8 : at;
3615 OffsetSize bits = OffsetSize::kOffset16;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003616
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003617 BlockTrampolinePoolScope block_trampoline_pool(this);
3618 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3619 switch (cond) {
3620 case cc_always:
3621 bits = OffsetSize::kOffset26;
3622 if (!is_near(L, bits)) return false;
3623 offset = GetOffset(offset, L, bits);
3624 balc(offset);
3625 break;
3626 case eq:
3627 if (!is_near(L, bits)) return false;
3628 Subu(scratch, rs, rt);
3629 offset = GetOffset(offset, L, bits);
3630 beqzalc(scratch, offset);
3631 break;
3632 case ne:
3633 if (!is_near(L, bits)) return false;
3634 Subu(scratch, rs, rt);
3635 offset = GetOffset(offset, L, bits);
3636 bnezalc(scratch, offset);
3637 break;
3638
3639 // Signed comparison.
3640 case greater:
3641 // rs > rt
3642 if (rs.code() == rt.rm_.reg_code) {
3643 break; // No code needs to be emitted.
3644 } else if (rs.is(zero_reg)) {
3645 if (!is_near(L, bits)) return false;
3646 scratch = GetRtAsRegisterHelper(rt, scratch);
3647 offset = GetOffset(offset, L, bits);
3648 bltzalc(scratch, offset);
3649 } else if (IsZero(rt)) {
3650 if (!is_near(L, bits)) return false;
3651 offset = GetOffset(offset, L, bits);
3652 bgtzalc(rs, offset);
3653 } else {
3654 if (!is_near(L, bits)) return false;
3655 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3656 offset = GetOffset(offset, L, bits);
3657 bnezalc(scratch, offset);
3658 }
3659 break;
3660 case greater_equal:
3661 // rs >= rt
3662 if (rs.code() == rt.rm_.reg_code) {
3663 bits = OffsetSize::kOffset26;
3664 if (!is_near(L, bits)) return false;
3665 offset = GetOffset(offset, L, bits);
3666 balc(offset);
3667 } else if (rs.is(zero_reg)) {
3668 if (!is_near(L, bits)) return false;
3669 scratch = GetRtAsRegisterHelper(rt, scratch);
3670 offset = GetOffset(offset, L, bits);
3671 blezalc(scratch, offset);
3672 } else if (IsZero(rt)) {
3673 if (!is_near(L, bits)) return false;
3674 offset = GetOffset(offset, L, bits);
3675 bgezalc(rs, offset);
3676 } else {
3677 if (!is_near(L, bits)) return false;
3678 Slt(scratch, rs, rt);
3679 offset = GetOffset(offset, L, bits);
3680 beqzalc(scratch, offset);
3681 }
3682 break;
3683 case less:
3684 // rs < rt
3685 if (rs.code() == rt.rm_.reg_code) {
3686 break; // No code needs to be emitted.
3687 } else if (rs.is(zero_reg)) {
3688 if (!is_near(L, bits)) return false;
3689 scratch = GetRtAsRegisterHelper(rt, scratch);
3690 offset = GetOffset(offset, L, bits);
3691 bgtzalc(scratch, offset);
3692 } else if (IsZero(rt)) {
3693 if (!is_near(L, bits)) return false;
3694 offset = GetOffset(offset, L, bits);
3695 bltzalc(rs, offset);
3696 } else {
3697 if (!is_near(L, bits)) return false;
3698 Slt(scratch, rs, rt);
3699 offset = GetOffset(offset, L, bits);
3700 bnezalc(scratch, offset);
3701 }
3702 break;
3703 case less_equal:
3704 // rs <= r2
3705 if (rs.code() == rt.rm_.reg_code) {
3706 bits = OffsetSize::kOffset26;
3707 if (!is_near(L, bits)) return false;
3708 offset = GetOffset(offset, L, bits);
3709 balc(offset);
3710 } else if (rs.is(zero_reg)) {
3711 if (!is_near(L, bits)) return false;
3712 scratch = GetRtAsRegisterHelper(rt, scratch);
3713 offset = GetOffset(offset, L, bits);
3714 bgezalc(scratch, offset);
3715 } else if (IsZero(rt)) {
3716 if (!is_near(L, bits)) return false;
3717 offset = GetOffset(offset, L, bits);
3718 blezalc(rs, offset);
3719 } else {
3720 if (!is_near(L, bits)) return false;
3721 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3722 offset = GetOffset(offset, L, bits);
3723 beqzalc(scratch, offset);
3724 }
3725 break;
3726
3727
3728 // Unsigned comparison.
3729 case Ugreater:
3730 // rs > r2
3731 if (!is_near(L, bits)) return false;
3732 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3733 offset = GetOffset(offset, L, bits);
3734 bnezalc(scratch, offset);
3735 break;
3736 case Ugreater_equal:
3737 // rs >= r2
3738 if (!is_near(L, bits)) return false;
3739 Sltu(scratch, rs, rt);
3740 offset = GetOffset(offset, L, bits);
3741 beqzalc(scratch, offset);
3742 break;
3743 case Uless:
3744 // rs < r2
3745 if (!is_near(L, bits)) return false;
3746 Sltu(scratch, rs, rt);
3747 offset = GetOffset(offset, L, bits);
3748 bnezalc(scratch, offset);
3749 break;
3750 case Uless_equal:
3751 // rs <= r2
3752 if (!is_near(L, bits)) return false;
3753 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3754 offset = GetOffset(offset, L, bits);
3755 beqzalc(scratch, offset);
3756 break;
3757 default:
3758 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003759 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003760 return true;
3761}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003762
3763
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003764// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3765// with the slt instructions. We could use sub or add instead but we would miss
3766// overflow cases, so we keep slt and add an intermediate third instruction.
3767bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3768 Condition cond, Register rs,
3769 const Operand& rt,
3770 BranchDelaySlot bdslot) {
3771 DCHECK(L == nullptr || offset == 0);
3772 if (!is_near(L, OffsetSize::kOffset16)) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003773
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003774 Register scratch = t8;
3775 BlockTrampolinePoolScope block_trampoline_pool(this);
3776
3777 switch (cond) {
3778 case cc_always:
3779 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3780 bal(offset);
3781 break;
3782 case eq:
3783 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3784 nop();
3785 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3786 bal(offset);
3787 break;
3788 case ne:
3789 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3790 nop();
3791 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3792 bal(offset);
3793 break;
3794
3795 // Signed comparison.
3796 case greater:
3797 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3798 addiu(scratch, scratch, -1);
3799 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3800 bgezal(scratch, offset);
3801 break;
3802 case greater_equal:
3803 Slt(scratch, rs, rt);
3804 addiu(scratch, scratch, -1);
3805 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3806 bltzal(scratch, offset);
3807 break;
3808 case less:
3809 Slt(scratch, rs, rt);
3810 addiu(scratch, scratch, -1);
3811 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3812 bgezal(scratch, offset);
3813 break;
3814 case less_equal:
3815 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3816 addiu(scratch, scratch, -1);
3817 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3818 bltzal(scratch, offset);
3819 break;
3820
3821 // Unsigned comparison.
3822 case Ugreater:
3823 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3824 addiu(scratch, scratch, -1);
3825 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3826 bgezal(scratch, offset);
3827 break;
3828 case Ugreater_equal:
3829 Sltu(scratch, rs, rt);
3830 addiu(scratch, scratch, -1);
3831 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3832 bltzal(scratch, offset);
3833 break;
3834 case Uless:
3835 Sltu(scratch, rs, rt);
3836 addiu(scratch, scratch, -1);
3837 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3838 bgezal(scratch, offset);
3839 break;
3840 case Uless_equal:
3841 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3842 addiu(scratch, scratch, -1);
3843 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3844 bltzal(scratch, offset);
3845 break;
3846
3847 default:
3848 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003849 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003850
3851 // Emit a nop in the branch delay slot if required.
3852 if (bdslot == PROTECT)
3853 nop();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003854
3855 return true;
3856}
3857
3858
3859bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3860 Condition cond, Register rs,
3861 const Operand& rt,
3862 BranchDelaySlot bdslot) {
3863 BRANCH_ARGS_CHECK(cond, rs, rt);
3864
3865 if (!L) {
3866 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3867 DCHECK(is_int26(offset));
3868 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3869 } else {
3870 DCHECK(is_int16(offset));
3871 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3872 }
3873 } else {
3874 DCHECK(offset == 0);
3875 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3876 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3877 } else {
3878 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3879 }
3880 }
3881 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003882}
3883
3884
3885void MacroAssembler::Jump(Register target,
3886 Condition cond,
3887 Register rs,
3888 const Operand& rt,
3889 BranchDelaySlot bd) {
3890 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochda12d292016-06-02 14:46:10 +01003891 if (kArchVariant == kMips64r6 && bd == PROTECT) {
3892 if (cond == cc_always) {
3893 jic(target, 0);
3894 } else {
3895 BRANCH_ARGS_CHECK(cond, rs, rt);
3896 Branch(2, NegateCondition(cond), rs, rt);
3897 jic(target, 0);
3898 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003899 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003900 if (cond == cc_always) {
3901 jr(target);
3902 } else {
3903 BRANCH_ARGS_CHECK(cond, rs, rt);
3904 Branch(2, NegateCondition(cond), rs, rt);
3905 jr(target);
3906 }
3907 // Emit a nop in the branch delay slot if required.
3908 if (bd == PROTECT) nop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003909 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003910}
3911
3912
3913void MacroAssembler::Jump(intptr_t target,
3914 RelocInfo::Mode rmode,
3915 Condition cond,
3916 Register rs,
3917 const Operand& rt,
3918 BranchDelaySlot bd) {
3919 Label skip;
3920 if (cond != cc_always) {
3921 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3922 }
3923 // The first instruction of 'li' may be placed in the delay slot.
3924 // This is not an issue, t9 is expected to be clobbered anyway.
3925 li(t9, Operand(target, rmode));
3926 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3927 bind(&skip);
3928}
3929
3930
3931void MacroAssembler::Jump(Address target,
3932 RelocInfo::Mode rmode,
3933 Condition cond,
3934 Register rs,
3935 const Operand& rt,
3936 BranchDelaySlot bd) {
3937 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3938 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3939}
3940
3941
3942void MacroAssembler::Jump(Handle<Code> code,
3943 RelocInfo::Mode rmode,
3944 Condition cond,
3945 Register rs,
3946 const Operand& rt,
3947 BranchDelaySlot bd) {
3948 DCHECK(RelocInfo::IsCodeTarget(rmode));
3949 AllowDeferredHandleDereference embedding_raw_address;
3950 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3951}
3952
3953
3954int MacroAssembler::CallSize(Register target,
3955 Condition cond,
3956 Register rs,
3957 const Operand& rt,
3958 BranchDelaySlot bd) {
3959 int size = 0;
3960
3961 if (cond == cc_always) {
3962 size += 1;
3963 } else {
3964 size += 3;
3965 }
3966
Ben Murdochda12d292016-06-02 14:46:10 +01003967 if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003968
3969 return size * kInstrSize;
3970}
3971
3972
3973// Note: To call gcc-compiled C code on mips, you must call thru t9.
3974void MacroAssembler::Call(Register target,
3975 Condition cond,
3976 Register rs,
3977 const Operand& rt,
3978 BranchDelaySlot bd) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003979#ifdef DEBUG
3980 int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3981#endif
3982
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003983 BlockTrampolinePoolScope block_trampoline_pool(this);
3984 Label start;
3985 bind(&start);
Ben Murdochda12d292016-06-02 14:46:10 +01003986 if (kArchVariant == kMips64r6 && bd == PROTECT) {
3987 if (cond == cc_always) {
3988 jialc(target, 0);
3989 } else {
3990 BRANCH_ARGS_CHECK(cond, rs, rt);
3991 Branch(2, NegateCondition(cond), rs, rt);
3992 jialc(target, 0);
3993 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003994 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003995 if (cond == cc_always) {
3996 jalr(target);
3997 } else {
3998 BRANCH_ARGS_CHECK(cond, rs, rt);
3999 Branch(2, NegateCondition(cond), rs, rt);
4000 jalr(target);
4001 }
4002 // Emit a nop in the branch delay slot if required.
4003 if (bd == PROTECT) nop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004004 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004005
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004006#ifdef DEBUG
4007 CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
4008 SizeOfCodeGeneratedSince(&start));
4009#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004010}
4011
4012
4013int MacroAssembler::CallSize(Address target,
4014 RelocInfo::Mode rmode,
4015 Condition cond,
4016 Register rs,
4017 const Operand& rt,
4018 BranchDelaySlot bd) {
4019 int size = CallSize(t9, cond, rs, rt, bd);
4020 return size + 4 * kInstrSize;
4021}
4022
4023
4024void MacroAssembler::Call(Address target,
4025 RelocInfo::Mode rmode,
4026 Condition cond,
4027 Register rs,
4028 const Operand& rt,
4029 BranchDelaySlot bd) {
4030 BlockTrampolinePoolScope block_trampoline_pool(this);
4031 Label start;
4032 bind(&start);
4033 int64_t target_int = reinterpret_cast<int64_t>(target);
4034 // Must record previous source positions before the
4035 // li() generates a new code target.
4036 positions_recorder()->WriteRecordedPositions();
4037 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
4038 Call(t9, cond, rs, rt, bd);
4039 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
4040 SizeOfCodeGeneratedSince(&start));
4041}
4042
4043
4044int MacroAssembler::CallSize(Handle<Code> code,
4045 RelocInfo::Mode rmode,
4046 TypeFeedbackId ast_id,
4047 Condition cond,
4048 Register rs,
4049 const Operand& rt,
4050 BranchDelaySlot bd) {
4051 AllowDeferredHandleDereference using_raw_address;
4052 return CallSize(reinterpret_cast<Address>(code.location()),
4053 rmode, cond, rs, rt, bd);
4054}
4055
4056
4057void MacroAssembler::Call(Handle<Code> code,
4058 RelocInfo::Mode rmode,
4059 TypeFeedbackId ast_id,
4060 Condition cond,
4061 Register rs,
4062 const Operand& rt,
4063 BranchDelaySlot bd) {
4064 BlockTrampolinePoolScope block_trampoline_pool(this);
4065 Label start;
4066 bind(&start);
4067 DCHECK(RelocInfo::IsCodeTarget(rmode));
4068 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
4069 SetRecordedAstId(ast_id);
4070 rmode = RelocInfo::CODE_TARGET_WITH_ID;
4071 }
4072 AllowDeferredHandleDereference embedding_raw_address;
4073 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
4074 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
4075 SizeOfCodeGeneratedSince(&start));
4076}
4077
4078
4079void MacroAssembler::Ret(Condition cond,
4080 Register rs,
4081 const Operand& rt,
4082 BranchDelaySlot bd) {
4083 Jump(ra, cond, rs, rt, bd);
4084}
4085
4086
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004087void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
4088 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4089 (!L->is_bound() || is_near_r6(L))) {
4090 BranchShortHelperR6(0, L);
4091 } else {
4092 EmitForbiddenSlotInstruction();
4093 BlockTrampolinePoolScope block_trampoline_pool(this);
4094 {
4095 BlockGrowBufferScope block_buf_growth(this);
4096 // Buffer growth (and relocation) must be blocked for internal references
4097 // until associated instructions are emitted and available to be patched.
4098 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
4099 j(L);
4100 }
4101 // Emit a nop in the branch delay slot if required.
4102 if (bdslot == PROTECT) nop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004103 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004104}
4105
4106
4107void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
4108 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4109 (!L->is_bound() || is_near_r6(L))) {
4110 BranchAndLinkShortHelperR6(0, L);
4111 } else {
4112 EmitForbiddenSlotInstruction();
4113 BlockTrampolinePoolScope block_trampoline_pool(this);
4114 {
4115 BlockGrowBufferScope block_buf_growth(this);
4116 // Buffer growth (and relocation) must be blocked for internal references
4117 // until associated instructions are emitted and available to be patched.
4118 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
4119 jal(L);
4120 }
4121 // Emit a nop in the branch delay slot if required.
4122 if (bdslot == PROTECT) nop();
4123 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004124}
4125
4126
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004127void MacroAssembler::DropAndRet(int drop) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004128 DCHECK(is_int16(drop * kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004129 Ret(USE_DELAY_SLOT);
4130 daddiu(sp, sp, drop * kPointerSize);
4131}
4132
4133void MacroAssembler::DropAndRet(int drop,
4134 Condition cond,
4135 Register r1,
4136 const Operand& r2) {
4137 // Both Drop and Ret need to be conditional.
4138 Label skip;
4139 if (cond != cc_always) {
4140 Branch(&skip, NegateCondition(cond), r1, r2);
4141 }
4142
4143 Drop(drop);
4144 Ret();
4145
4146 if (cond != cc_always) {
4147 bind(&skip);
4148 }
4149}
4150
4151
4152void MacroAssembler::Drop(int count,
4153 Condition cond,
4154 Register reg,
4155 const Operand& op) {
4156 if (count <= 0) {
4157 return;
4158 }
4159
4160 Label skip;
4161
4162 if (cond != al) {
4163 Branch(&skip, NegateCondition(cond), reg, op);
4164 }
4165
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004166 Daddu(sp, sp, Operand(count * kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004167
4168 if (cond != al) {
4169 bind(&skip);
4170 }
4171}
4172
4173
4174
4175void MacroAssembler::Swap(Register reg1,
4176 Register reg2,
4177 Register scratch) {
4178 if (scratch.is(no_reg)) {
4179 Xor(reg1, reg1, Operand(reg2));
4180 Xor(reg2, reg2, Operand(reg1));
4181 Xor(reg1, reg1, Operand(reg2));
4182 } else {
4183 mov(scratch, reg1);
4184 mov(reg1, reg2);
4185 mov(reg2, scratch);
4186 }
4187}
4188
4189
4190void MacroAssembler::Call(Label* target) {
4191 BranchAndLink(target);
4192}
4193
4194
4195void MacroAssembler::Push(Handle<Object> handle) {
4196 li(at, Operand(handle));
4197 push(at);
4198}
4199
4200
4201void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
4202 DCHECK(!src.is(scratch));
4203 mov(scratch, src);
4204 dsrl32(src, src, 0);
4205 dsll32(src, src, 0);
4206 push(src);
4207 dsll32(scratch, scratch, 0);
4208 push(scratch);
4209}
4210
4211
4212void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
4213 DCHECK(!dst.is(scratch));
4214 pop(scratch);
4215 dsrl32(scratch, scratch, 0);
4216 pop(dst);
4217 dsrl32(dst, dst, 0);
4218 dsll32(dst, dst, 0);
4219 or_(dst, dst, scratch);
4220}
4221
4222
4223void MacroAssembler::DebugBreak() {
4224 PrepareCEntryArgs(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004225 PrepareCEntryFunction(
4226 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004227 CEntryStub ces(isolate(), 1);
4228 DCHECK(AllowThisStubCall(&ces));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004229 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004230}
4231
4232
4233// ---------------------------------------------------------------------------
4234// Exception handling.
4235
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004236void MacroAssembler::PushStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004237 // Adjust this code if not the case.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004238 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004239 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004240
4241 // Link the current handler as the next handler.
4242 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4243 ld(a5, MemOperand(a6));
4244 push(a5);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004245
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004246 // Set this new handler as the current one.
4247 sd(sp, MemOperand(a6));
4248}
4249
4250
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004251void MacroAssembler::PopStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004252 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4253 pop(a1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004254 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
4255 kPointerSize)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004256 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4257 sd(a1, MemOperand(at));
4258}
4259
4260
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004261void MacroAssembler::Allocate(int object_size,
4262 Register result,
4263 Register scratch1,
4264 Register scratch2,
4265 Label* gc_required,
4266 AllocationFlags flags) {
4267 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4268 if (!FLAG_inline_new) {
4269 if (emit_debug_code()) {
4270 // Trash the registers to simulate an allocation failure.
4271 li(result, 0x7091);
4272 li(scratch1, 0x7191);
4273 li(scratch2, 0x7291);
4274 }
4275 jmp(gc_required);
4276 return;
4277 }
4278
Ben Murdoch097c5b22016-05-18 11:27:45 +01004279 DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004280
4281 // Make object size into bytes.
4282 if ((flags & SIZE_IN_WORDS) != 0) {
4283 object_size *= kPointerSize;
4284 }
4285 DCHECK(0 == (object_size & kObjectAlignmentMask));
4286
4287 // Check relative positions of allocation top and limit addresses.
4288 // ARM adds additional checks to make sure the ldm instruction can be
4289 // used. On MIPS we don't have ldm so we don't need additional checks either.
4290 ExternalReference allocation_top =
4291 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4292 ExternalReference allocation_limit =
4293 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4294
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004295 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4296 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004297 DCHECK((limit - top) == kPointerSize);
4298
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004299 // Set up allocation top address and allocation limit registers.
4300 Register top_address = scratch1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004301 // This code stores a temporary value in t9.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004302 Register alloc_limit = t9;
4303 Register result_end = scratch2;
4304 li(top_address, Operand(allocation_top));
4305
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004306 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004307 // Load allocation top into result and allocation limit into alloc_limit.
4308 ld(result, MemOperand(top_address));
4309 ld(alloc_limit, MemOperand(top_address, kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004310 } else {
4311 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004312 // Assert that result actually contains top on entry.
4313 ld(alloc_limit, MemOperand(top_address));
4314 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004315 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004316 // Load allocation limit. Result already contains allocation top.
4317 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004318 }
4319
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004320 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4321 // the same alignment on ARM64.
4322 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4323
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004324 if (emit_debug_code()) {
4325 And(at, result, Operand(kDoubleAlignmentMask));
4326 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4327 }
4328
4329 // Calculate new top and bail out if new space is exhausted. Use result
4330 // to calculate the new top.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004331 Daddu(result_end, result, Operand(object_size));
4332 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004333
Ben Murdochc5610432016-08-08 18:44:38 +01004334 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4335 // The top pointer is not updated for allocation folding dominators.
4336 sd(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004337 }
Ben Murdochc5610432016-08-08 18:44:38 +01004338
4339 // Tag object.
4340 Daddu(result, result, Operand(kHeapObjectTag));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004341}
4342
4343
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004344void MacroAssembler::Allocate(Register object_size, Register result,
4345 Register result_end, Register scratch,
4346 Label* gc_required, AllocationFlags flags) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004347 if (!FLAG_inline_new) {
4348 if (emit_debug_code()) {
4349 // Trash the registers to simulate an allocation failure.
4350 li(result, 0x7091);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004351 li(scratch, 0x7191);
4352 li(result_end, 0x7291);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004353 }
4354 jmp(gc_required);
4355 return;
4356 }
4357
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004358 // |object_size| and |result_end| may overlap, other registers must not.
Ben Murdoch097c5b22016-05-18 11:27:45 +01004359 DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4360 DCHECK(!AreAliased(result_end, result, scratch, t9, at));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004361
4362 // Check relative positions of allocation top and limit addresses.
4363 // ARM adds additional checks to make sure the ldm instruction can be
4364 // used. On MIPS we don't have ldm so we don't need additional checks either.
4365 ExternalReference allocation_top =
4366 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4367 ExternalReference allocation_limit =
4368 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004369 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4370 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004371 DCHECK((limit - top) == kPointerSize);
4372
4373 // Set up allocation top address and object size registers.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004374 Register top_address = scratch;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004375 // This code stores a temporary value in t9.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004376 Register alloc_limit = t9;
4377 li(top_address, Operand(allocation_top));
4378
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004379 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004380 // Load allocation top into result and allocation limit into alloc_limit.
4381 ld(result, MemOperand(top_address));
4382 ld(alloc_limit, MemOperand(top_address, kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004383 } else {
4384 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004385 // Assert that result actually contains top on entry.
4386 ld(alloc_limit, MemOperand(top_address));
4387 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004388 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004389 // Load allocation limit. Result already contains allocation top.
4390 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004391 }
4392
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004393 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4394 // the same alignment on ARM64.
4395 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4396
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004397 if (emit_debug_code()) {
4398 And(at, result, Operand(kDoubleAlignmentMask));
4399 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4400 }
4401
4402 // Calculate new top and bail out if new space is exhausted. Use result
4403 // to calculate the new top. Object size may be in words so a shift is
4404 // required to get the number of bytes.
4405 if ((flags & SIZE_IN_WORDS) != 0) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004406 Dlsa(result_end, result, object_size, kPointerSizeLog2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004407 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004408 Daddu(result_end, result, Operand(object_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004409 }
Ben Murdochc5610432016-08-08 18:44:38 +01004410
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004411 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004412
4413 // Update allocation top. result temporarily holds the new top.
4414 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004415 And(at, result_end, Operand(kObjectAlignmentMask));
4416 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004417 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004418
Ben Murdochc5610432016-08-08 18:44:38 +01004419 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4420 // The top pointer is not updated for allocation folding dominators.
4421 sd(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004422 }
Ben Murdochc5610432016-08-08 18:44:38 +01004423
4424 // Tag object if.
4425 Daddu(result, result, Operand(kHeapObjectTag));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004426}
4427
Ben Murdochc5610432016-08-08 18:44:38 +01004428void MacroAssembler::FastAllocate(int object_size, Register result,
4429 Register scratch1, Register scratch2,
4430 AllocationFlags flags) {
4431 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4432 DCHECK(!AreAliased(result, scratch1, scratch2, at));
4433
4434 // Make object size into bytes.
4435 if ((flags & SIZE_IN_WORDS) != 0) {
4436 object_size *= kPointerSize;
4437 }
4438 DCHECK(0 == (object_size & kObjectAlignmentMask));
4439
4440 ExternalReference allocation_top =
4441 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4442
4443 Register top_address = scratch1;
4444 Register result_end = scratch2;
4445 li(top_address, Operand(allocation_top));
4446 ld(result, MemOperand(top_address));
4447
4448 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4449 // the same alignment on MIPS64.
4450 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4451
4452 if (emit_debug_code()) {
4453 And(at, result, Operand(kDoubleAlignmentMask));
4454 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4455 }
4456
4457 // Calculate new top and write it back.
4458 Daddu(result_end, result, Operand(object_size));
4459 sd(result_end, MemOperand(top_address));
4460
4461 Daddu(result, result, Operand(kHeapObjectTag));
4462}
4463
4464void MacroAssembler::FastAllocate(Register object_size, Register result,
4465 Register result_end, Register scratch,
4466 AllocationFlags flags) {
4467 // |object_size| and |result_end| may overlap, other registers must not.
4468 DCHECK(!AreAliased(object_size, result, scratch, at));
4469 DCHECK(!AreAliased(result_end, result, scratch, at));
4470
4471 ExternalReference allocation_top =
4472 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4473
4474 // Set up allocation top address and object size registers.
4475 Register top_address = scratch;
4476 li(top_address, Operand(allocation_top));
4477 ld(result, MemOperand(top_address));
4478
4479 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4480 // the same alignment on MIPS64.
4481 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4482
4483 if (emit_debug_code()) {
4484 And(at, result, Operand(kDoubleAlignmentMask));
4485 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4486 }
4487
4488 // Calculate new top and write it back
4489 if ((flags & SIZE_IN_WORDS) != 0) {
4490 Dlsa(result_end, result, object_size, kPointerSizeLog2);
4491 } else {
4492 Daddu(result_end, result, Operand(object_size));
4493 }
4494
4495 // Update allocation top. result temporarily holds the new top.
4496 if (emit_debug_code()) {
4497 And(at, result_end, Operand(kObjectAlignmentMask));
4498 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
4499 }
4500
4501 Daddu(result, result, Operand(kHeapObjectTag));
4502}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004503
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004504void MacroAssembler::AllocateTwoByteString(Register result,
4505 Register length,
4506 Register scratch1,
4507 Register scratch2,
4508 Register scratch3,
4509 Label* gc_required) {
4510 // Calculate the number of bytes needed for the characters in the string while
4511 // observing object alignment.
4512 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4513 dsll(scratch1, length, 1); // Length in bytes, not chars.
4514 daddiu(scratch1, scratch1,
4515 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
4516 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4517
4518 // Allocate two-byte string in new space.
Ben Murdochc5610432016-08-08 18:44:38 +01004519 Allocate(scratch1, result, scratch2, scratch3, gc_required,
4520 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004521
4522 // Set the map, length and hash field.
4523 InitializeNewString(result,
4524 length,
4525 Heap::kStringMapRootIndex,
4526 scratch1,
4527 scratch2);
4528}
4529
4530
4531void MacroAssembler::AllocateOneByteString(Register result, Register length,
4532 Register scratch1, Register scratch2,
4533 Register scratch3,
4534 Label* gc_required) {
4535 // Calculate the number of bytes needed for the characters in the string
4536 // while observing object alignment.
4537 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4538 DCHECK(kCharSize == 1);
4539 daddiu(scratch1, length,
4540 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
4541 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4542
4543 // Allocate one-byte string in new space.
Ben Murdochc5610432016-08-08 18:44:38 +01004544 Allocate(scratch1, result, scratch2, scratch3, gc_required,
4545 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004546
4547 // Set the map, length and hash field.
4548 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
4549 scratch1, scratch2);
4550}
4551
4552
4553void MacroAssembler::AllocateTwoByteConsString(Register result,
4554 Register length,
4555 Register scratch1,
4556 Register scratch2,
4557 Label* gc_required) {
4558 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
Ben Murdochc5610432016-08-08 18:44:38 +01004559 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004560 InitializeNewString(result,
4561 length,
4562 Heap::kConsStringMapRootIndex,
4563 scratch1,
4564 scratch2);
4565}
4566
4567
4568void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
4569 Register scratch1,
4570 Register scratch2,
4571 Label* gc_required) {
Ben Murdochc5610432016-08-08 18:44:38 +01004572 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4573 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004574
4575 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
4576 scratch1, scratch2);
4577}
4578
4579
4580void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4581 Register length,
4582 Register scratch1,
4583 Register scratch2,
4584 Label* gc_required) {
4585 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
Ben Murdochc5610432016-08-08 18:44:38 +01004586 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004587
4588 InitializeNewString(result,
4589 length,
4590 Heap::kSlicedStringMapRootIndex,
4591 scratch1,
4592 scratch2);
4593}
4594
4595
4596void MacroAssembler::AllocateOneByteSlicedString(Register result,
4597 Register length,
4598 Register scratch1,
4599 Register scratch2,
4600 Label* gc_required) {
4601 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
Ben Murdochc5610432016-08-08 18:44:38 +01004602 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004603
4604 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
4605 scratch1, scratch2);
4606}
4607
4608
4609void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
4610 Label* not_unique_name) {
4611 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4612 Label succeed;
4613 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4614 Branch(&succeed, eq, at, Operand(zero_reg));
4615 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
4616
4617 bind(&succeed);
4618}
4619
4620
4621// Allocates a heap number or jumps to the label if the young space is full and
4622// a scavenge is needed.
4623void MacroAssembler::AllocateHeapNumber(Register result,
4624 Register scratch1,
4625 Register scratch2,
4626 Register heap_number_map,
4627 Label* need_gc,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004628 MutableMode mode) {
4629 // Allocate an object in the heap for the heap number and tag it as a heap
4630 // object.
4631 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
Ben Murdochc5610432016-08-08 18:44:38 +01004632 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004633
4634 Heap::RootListIndex map_index = mode == MUTABLE
4635 ? Heap::kMutableHeapNumberMapRootIndex
4636 : Heap::kHeapNumberMapRootIndex;
4637 AssertIsRoot(heap_number_map, map_index);
4638
4639 // Store heap number map in the allocated object.
Ben Murdochc5610432016-08-08 18:44:38 +01004640 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004641}
4642
4643
4644void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4645 FPURegister value,
4646 Register scratch1,
4647 Register scratch2,
4648 Label* gc_required) {
4649 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4650 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4651 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4652}
4653
4654
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004655void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4656 Register value, Register scratch1,
4657 Register scratch2, Label* gc_required) {
4658 DCHECK(!result.is(constructor));
4659 DCHECK(!result.is(scratch1));
4660 DCHECK(!result.is(scratch2));
4661 DCHECK(!result.is(value));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004662
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004663 // Allocate JSValue in new space.
Ben Murdochc5610432016-08-08 18:44:38 +01004664 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
4665 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004666
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004667 // Initialize the JSValue.
4668 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4669 sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4670 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4671 sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4672 sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4673 sd(value, FieldMemOperand(result, JSValue::kValueOffset));
4674 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004675}
4676
4677
4678void MacroAssembler::CopyBytes(Register src,
4679 Register dst,
4680 Register length,
4681 Register scratch) {
4682 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
4683
4684 // Align src before copying in word size chunks.
4685 Branch(&byte_loop, le, length, Operand(kPointerSize));
4686 bind(&align_loop_1);
4687 And(scratch, src, kPointerSize - 1);
4688 Branch(&word_loop, eq, scratch, Operand(zero_reg));
4689 lbu(scratch, MemOperand(src));
4690 Daddu(src, src, 1);
4691 sb(scratch, MemOperand(dst));
4692 Daddu(dst, dst, 1);
4693 Dsubu(length, length, Operand(1));
4694 Branch(&align_loop_1, ne, length, Operand(zero_reg));
4695
4696 // Copy bytes in word size chunks.
4697 bind(&word_loop);
4698 if (emit_debug_code()) {
4699 And(scratch, src, kPointerSize - 1);
4700 Assert(eq, kExpectingAlignmentForCopyBytes,
4701 scratch, Operand(zero_reg));
4702 }
4703 Branch(&byte_loop, lt, length, Operand(kPointerSize));
4704 ld(scratch, MemOperand(src));
4705 Daddu(src, src, kPointerSize);
4706
4707 // TODO(kalmard) check if this can be optimized to use sw in most cases.
4708 // Can't use unaligned access - copy byte by byte.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004709 if (kArchEndian == kLittle) {
4710 sb(scratch, MemOperand(dst, 0));
4711 dsrl(scratch, scratch, 8);
4712 sb(scratch, MemOperand(dst, 1));
4713 dsrl(scratch, scratch, 8);
4714 sb(scratch, MemOperand(dst, 2));
4715 dsrl(scratch, scratch, 8);
4716 sb(scratch, MemOperand(dst, 3));
4717 dsrl(scratch, scratch, 8);
4718 sb(scratch, MemOperand(dst, 4));
4719 dsrl(scratch, scratch, 8);
4720 sb(scratch, MemOperand(dst, 5));
4721 dsrl(scratch, scratch, 8);
4722 sb(scratch, MemOperand(dst, 6));
4723 dsrl(scratch, scratch, 8);
4724 sb(scratch, MemOperand(dst, 7));
4725 } else {
4726 sb(scratch, MemOperand(dst, 7));
4727 dsrl(scratch, scratch, 8);
4728 sb(scratch, MemOperand(dst, 6));
4729 dsrl(scratch, scratch, 8);
4730 sb(scratch, MemOperand(dst, 5));
4731 dsrl(scratch, scratch, 8);
4732 sb(scratch, MemOperand(dst, 4));
4733 dsrl(scratch, scratch, 8);
4734 sb(scratch, MemOperand(dst, 3));
4735 dsrl(scratch, scratch, 8);
4736 sb(scratch, MemOperand(dst, 2));
4737 dsrl(scratch, scratch, 8);
4738 sb(scratch, MemOperand(dst, 1));
4739 dsrl(scratch, scratch, 8);
4740 sb(scratch, MemOperand(dst, 0));
4741 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004742 Daddu(dst, dst, 8);
4743
4744 Dsubu(length, length, Operand(kPointerSize));
4745 Branch(&word_loop);
4746
4747 // Copy the last bytes if any left.
4748 bind(&byte_loop);
4749 Branch(&done, eq, length, Operand(zero_reg));
4750 bind(&byte_loop_1);
4751 lbu(scratch, MemOperand(src));
4752 Daddu(src, src, 1);
4753 sb(scratch, MemOperand(dst));
4754 Daddu(dst, dst, 1);
4755 Dsubu(length, length, Operand(1));
4756 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
4757 bind(&done);
4758}
4759
4760
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004761void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4762 Register end_address,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004763 Register filler) {
4764 Label loop, entry;
4765 Branch(&entry);
4766 bind(&loop);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004767 sd(filler, MemOperand(current_address));
4768 Daddu(current_address, current_address, kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004769 bind(&entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004770 Branch(&loop, ult, current_address, Operand(end_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004771}
4772
4773
4774void MacroAssembler::CheckFastElements(Register map,
4775 Register scratch,
4776 Label* fail) {
4777 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4778 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4779 STATIC_ASSERT(FAST_ELEMENTS == 2);
4780 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4781 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4782 Branch(fail, hi, scratch,
4783 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4784}
4785
4786
4787void MacroAssembler::CheckFastObjectElements(Register map,
4788 Register scratch,
4789 Label* fail) {
4790 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4791 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4792 STATIC_ASSERT(FAST_ELEMENTS == 2);
4793 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4794 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4795 Branch(fail, ls, scratch,
4796 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4797 Branch(fail, hi, scratch,
4798 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4799}
4800
4801
4802void MacroAssembler::CheckFastSmiElements(Register map,
4803 Register scratch,
4804 Label* fail) {
4805 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4806 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4807 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4808 Branch(fail, hi, scratch,
4809 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4810}
4811
4812
4813void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
4814 Register key_reg,
4815 Register elements_reg,
4816 Register scratch1,
4817 Register scratch2,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004818 Label* fail,
4819 int elements_offset) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004820 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
4821 Label smi_value, done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004822
4823 // Handle smi values specially.
4824 JumpIfSmi(value_reg, &smi_value);
4825
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004826 // Ensure that the object is a heap number.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004827 CheckMap(value_reg,
4828 scratch1,
4829 Heap::kHeapNumberMapRootIndex,
4830 fail,
4831 DONT_DO_SMI_CHECK);
4832
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004833 // Double value, turn potential sNaN into qNan.
4834 DoubleRegister double_result = f0;
4835 DoubleRegister double_scratch = f2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004836
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004837 ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
4838 Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
4839 FPUCanonicalizeNaN(double_result, double_result);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004840
4841 bind(&smi_value);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004842 // Untag and transfer.
4843 dsrl32(scratch1, value_reg, 0);
4844 mtc1(scratch1, double_scratch);
4845 cvt_d_w(double_result, double_scratch);
4846
4847 bind(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004848 Daddu(scratch1, elements_reg,
4849 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
4850 elements_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004851 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
4852 Daddu(scratch1, scratch1, scratch2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004853 // scratch1 is now effective address of the double element.
4854 sdc1(double_result, MemOperand(scratch1, 0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004855}
4856
4857
4858void MacroAssembler::CompareMapAndBranch(Register obj,
4859 Register scratch,
4860 Handle<Map> map,
4861 Label* early_success,
4862 Condition cond,
4863 Label* branch_to) {
4864 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4865 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4866}
4867
4868
4869void MacroAssembler::CompareMapAndBranch(Register obj_map,
4870 Handle<Map> map,
4871 Label* early_success,
4872 Condition cond,
4873 Label* branch_to) {
4874 Branch(branch_to, cond, obj_map, Operand(map));
4875}
4876
4877
4878void MacroAssembler::CheckMap(Register obj,
4879 Register scratch,
4880 Handle<Map> map,
4881 Label* fail,
4882 SmiCheckType smi_check_type) {
4883 if (smi_check_type == DO_SMI_CHECK) {
4884 JumpIfSmi(obj, fail);
4885 }
4886 Label success;
4887 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4888 bind(&success);
4889}
4890
4891
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004892void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4893 Register scratch2, Handle<WeakCell> cell,
4894 Handle<Code> success,
4895 SmiCheckType smi_check_type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004896 Label fail;
4897 if (smi_check_type == DO_SMI_CHECK) {
4898 JumpIfSmi(obj, &fail);
4899 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004900 ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4901 GetWeakValue(scratch2, cell);
4902 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004903 bind(&fail);
4904}
4905
4906
4907void MacroAssembler::CheckMap(Register obj,
4908 Register scratch,
4909 Heap::RootListIndex index,
4910 Label* fail,
4911 SmiCheckType smi_check_type) {
4912 if (smi_check_type == DO_SMI_CHECK) {
4913 JumpIfSmi(obj, fail);
4914 }
4915 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4916 LoadRoot(at, index);
4917 Branch(fail, ne, scratch, Operand(at));
4918}
4919
4920
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004921void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4922 li(value, Operand(cell));
4923 ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
4924}
4925
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004926void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4927 const DoubleRegister src) {
4928 sub_d(dst, src, kDoubleRegZero);
4929}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004930
4931void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4932 Label* miss) {
4933 GetWeakValue(value, cell);
4934 JumpIfSmi(value, miss);
4935}
4936
4937
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004938void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
4939 if (IsMipsSoftFloatABI) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004940 if (kArchEndian == kLittle) {
4941 Move(dst, v0, v1);
4942 } else {
4943 Move(dst, v1, v0);
4944 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004945 } else {
4946 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4947 }
4948}
4949
4950
4951void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
4952 if (IsMipsSoftFloatABI) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004953 if (kArchEndian == kLittle) {
4954 Move(dst, a0, a1);
4955 } else {
4956 Move(dst, a1, a0);
4957 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004958 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004959 Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004960 }
4961}
4962
4963
4964void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4965 if (!IsMipsSoftFloatABI) {
4966 Move(f12, src);
4967 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004968 if (kArchEndian == kLittle) {
4969 Move(a0, a1, src);
4970 } else {
4971 Move(a1, a0, src);
4972 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004973 }
4974}
4975
4976
4977void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4978 if (!IsMipsSoftFloatABI) {
4979 Move(f0, src);
4980 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004981 if (kArchEndian == kLittle) {
4982 Move(v0, v1, src);
4983 } else {
4984 Move(v1, v0, src);
4985 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004986 }
4987}
4988
4989
4990void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4991 DoubleRegister src2) {
4992 if (!IsMipsSoftFloatABI) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004993 const DoubleRegister fparg2 = f13;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004994 if (src2.is(f12)) {
4995 DCHECK(!src1.is(fparg2));
4996 Move(fparg2, src2);
4997 Move(f12, src1);
4998 } else {
4999 Move(f12, src1);
5000 Move(fparg2, src2);
5001 }
5002 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005003 if (kArchEndian == kLittle) {
5004 Move(a0, a1, src1);
5005 Move(a2, a3, src2);
5006 } else {
5007 Move(a1, a0, src1);
5008 Move(a3, a2, src2);
5009 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005010 }
5011}
5012
5013
5014// -----------------------------------------------------------------------------
5015// JavaScript invokes.
5016
Ben Murdochda12d292016-06-02 14:46:10 +01005017void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
5018 Register caller_args_count_reg,
5019 Register scratch0, Register scratch1) {
5020#if DEBUG
5021 if (callee_args_count.is_reg()) {
5022 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
5023 scratch1));
5024 } else {
5025 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
5026 }
5027#endif
5028
5029 // Calculate the end of destination area where we will put the arguments
5030 // after we drop current frame. We add kPointerSize to count the receiver
5031 // argument which is not included into formal parameters count.
5032 Register dst_reg = scratch0;
5033 Dlsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
5034 Daddu(dst_reg, dst_reg,
5035 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
5036
5037 Register src_reg = caller_args_count_reg;
5038 // Calculate the end of source area. +kPointerSize is for the receiver.
5039 if (callee_args_count.is_reg()) {
5040 Dlsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
5041 Daddu(src_reg, src_reg, Operand(kPointerSize));
5042 } else {
5043 Daddu(src_reg, sp,
5044 Operand((callee_args_count.immediate() + 1) * kPointerSize));
5045 }
5046
5047 if (FLAG_debug_code) {
5048 Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
5049 }
5050
5051 // Restore caller's frame pointer and return address now as they will be
5052 // overwritten by the copying loop.
5053 ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
5054 ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5055
5056 // Now copy callee arguments to the caller frame going backwards to avoid
5057 // callee arguments corruption (source and destination areas could overlap).
5058
5059 // Both src_reg and dst_reg are pointing to the word after the one to copy,
5060 // so they must be pre-decremented in the loop.
5061 Register tmp_reg = scratch1;
5062 Label loop, entry;
5063 Branch(&entry);
5064 bind(&loop);
5065 Dsubu(src_reg, src_reg, Operand(kPointerSize));
5066 Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
5067 ld(tmp_reg, MemOperand(src_reg));
5068 sd(tmp_reg, MemOperand(dst_reg));
5069 bind(&entry);
5070 Branch(&loop, ne, sp, Operand(src_reg));
5071
5072 // Leave current frame.
5073 mov(sp, dst_reg);
5074}
5075
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005076void MacroAssembler::InvokePrologue(const ParameterCount& expected,
5077 const ParameterCount& actual,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005078 Label* done,
5079 bool* definitely_mismatches,
5080 InvokeFlag flag,
5081 const CallWrapper& call_wrapper) {
5082 bool definitely_matches = false;
5083 *definitely_mismatches = false;
5084 Label regular_invoke;
5085
5086 // Check whether the expected and actual arguments count match. If not,
5087 // setup registers according to contract with ArgumentsAdaptorTrampoline:
5088 // a0: actual arguments count
5089 // a1: function (passed through to callee)
5090 // a2: expected arguments count
5091
5092 // The code below is made a lot easier because the calling code already sets
5093 // up actual and expected registers according to the contract if values are
5094 // passed in registers.
5095 DCHECK(actual.is_immediate() || actual.reg().is(a0));
5096 DCHECK(expected.is_immediate() || expected.reg().is(a2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005097
5098 if (expected.is_immediate()) {
5099 DCHECK(actual.is_immediate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005100 li(a0, Operand(actual.immediate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005101 if (expected.immediate() == actual.immediate()) {
5102 definitely_matches = true;
5103 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005104 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
5105 if (expected.immediate() == sentinel) {
5106 // Don't worry about adapting arguments for builtins that
5107 // don't want that done. Skip adaption code by making it look
5108 // like we have a match between expected and actual number of
5109 // arguments.
5110 definitely_matches = true;
5111 } else {
5112 *definitely_mismatches = true;
5113 li(a2, Operand(expected.immediate()));
5114 }
5115 }
5116 } else if (actual.is_immediate()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005117 li(a0, Operand(actual.immediate()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005118 Branch(&regular_invoke, eq, expected.reg(), Operand(a0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005119 } else {
5120 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
5121 }
5122
5123 if (!definitely_matches) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005124 Handle<Code> adaptor =
5125 isolate()->builtins()->ArgumentsAdaptorTrampoline();
5126 if (flag == CALL_FUNCTION) {
5127 call_wrapper.BeforeCall(CallSize(adaptor));
5128 Call(adaptor);
5129 call_wrapper.AfterCall();
5130 if (!*definitely_mismatches) {
5131 Branch(done);
5132 }
5133 } else {
5134 Jump(adaptor, RelocInfo::CODE_TARGET);
5135 }
5136 bind(&regular_invoke);
5137 }
5138}
5139
5140
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005141void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
5142 const ParameterCount& expected,
5143 const ParameterCount& actual) {
5144 Label skip_flooding;
5145 ExternalReference step_in_enabled =
5146 ExternalReference::debug_step_in_enabled_address(isolate());
5147 li(t0, Operand(step_in_enabled));
5148 lb(t0, MemOperand(t0));
5149 Branch(&skip_flooding, eq, t0, Operand(zero_reg));
5150 {
5151 FrameScope frame(this,
5152 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
5153 if (expected.is_reg()) {
5154 SmiTag(expected.reg());
5155 Push(expected.reg());
5156 }
5157 if (actual.is_reg()) {
5158 SmiTag(actual.reg());
5159 Push(actual.reg());
5160 }
5161 if (new_target.is_valid()) {
5162 Push(new_target);
5163 }
5164 Push(fun);
5165 Push(fun);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005166 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005167 Pop(fun);
5168 if (new_target.is_valid()) {
5169 Pop(new_target);
5170 }
5171 if (actual.is_reg()) {
5172 Pop(actual.reg());
5173 SmiUntag(actual.reg());
5174 }
5175 if (expected.is_reg()) {
5176 Pop(expected.reg());
5177 SmiUntag(expected.reg());
5178 }
5179 }
5180 bind(&skip_flooding);
5181}
5182
5183
5184void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
5185 const ParameterCount& expected,
5186 const ParameterCount& actual,
5187 InvokeFlag flag,
5188 const CallWrapper& call_wrapper) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005189 // You can't call a function without a valid frame.
5190 DCHECK(flag == JUMP_FUNCTION || has_frame());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005191 DCHECK(function.is(a1));
5192 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
5193
5194 if (call_wrapper.NeedsDebugStepCheck()) {
5195 FloodFunctionIfStepping(function, new_target, expected, actual);
5196 }
5197
5198 // Clear the new.target register if not given.
5199 if (!new_target.is_valid()) {
5200 LoadRoot(a3, Heap::kUndefinedValueRootIndex);
5201 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005202
5203 Label done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005204 bool definitely_mismatches = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005205 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005206 call_wrapper);
5207 if (!definitely_mismatches) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005208 // We call indirectly through the code field in the function to
5209 // allow recompilation to take effect without changing any of the
5210 // call sites.
5211 Register code = t0;
5212 ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005213 if (flag == CALL_FUNCTION) {
5214 call_wrapper.BeforeCall(CallSize(code));
5215 Call(code);
5216 call_wrapper.AfterCall();
5217 } else {
5218 DCHECK(flag == JUMP_FUNCTION);
5219 Jump(code);
5220 }
5221 // Continue here if InvokePrologue does handle the invocation due to
5222 // mismatched parameter counts.
5223 bind(&done);
5224 }
5225}
5226
5227
5228void MacroAssembler::InvokeFunction(Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005229 Register new_target,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005230 const ParameterCount& actual,
5231 InvokeFlag flag,
5232 const CallWrapper& call_wrapper) {
5233 // You can't call a function without a valid frame.
5234 DCHECK(flag == JUMP_FUNCTION || has_frame());
5235
5236 // Contract with called JS functions requires that function is passed in a1.
5237 DCHECK(function.is(a1));
5238 Register expected_reg = a2;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005239 Register temp_reg = t0;
5240 ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005241 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5242 // The argument count is stored as int32_t on 64-bit platforms.
5243 // TODO(plind): Smi on 32-bit platforms.
5244 lw(expected_reg,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005245 FieldMemOperand(temp_reg,
5246 SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005247 ParameterCount expected(expected_reg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005248 InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005249}
5250
5251
5252void MacroAssembler::InvokeFunction(Register function,
5253 const ParameterCount& expected,
5254 const ParameterCount& actual,
5255 InvokeFlag flag,
5256 const CallWrapper& call_wrapper) {
5257 // You can't call a function without a valid frame.
5258 DCHECK(flag == JUMP_FUNCTION || has_frame());
5259
5260 // Contract with called JS functions requires that function is passed in a1.
5261 DCHECK(function.is(a1));
5262
5263 // Get the function and setup the context.
5264 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5265
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005266 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005267}
5268
5269
5270void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
5271 const ParameterCount& expected,
5272 const ParameterCount& actual,
5273 InvokeFlag flag,
5274 const CallWrapper& call_wrapper) {
5275 li(a1, function);
5276 InvokeFunction(a1, expected, actual, flag, call_wrapper);
5277}
5278
5279
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005280void MacroAssembler::IsObjectJSStringType(Register object,
5281 Register scratch,
5282 Label* fail) {
5283 DCHECK(kNotStringTag != 0);
5284
5285 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5286 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5287 And(scratch, scratch, Operand(kIsNotStringMask));
5288 Branch(fail, ne, scratch, Operand(zero_reg));
5289}
5290
5291
5292void MacroAssembler::IsObjectNameType(Register object,
5293 Register scratch,
5294 Label* fail) {
5295 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5296 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5297 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
5298}
5299
5300
5301// ---------------------------------------------------------------------------
5302// Support functions.
5303
5304
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005305void MacroAssembler::GetMapConstructor(Register result, Register map,
5306 Register temp, Register temp2) {
5307 Label done, loop;
5308 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
5309 bind(&loop);
5310 JumpIfSmi(result, &done);
5311 GetObjectType(result, temp, temp2);
5312 Branch(&done, ne, temp2, Operand(MAP_TYPE));
5313 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
5314 Branch(&loop);
5315 bind(&done);
5316}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005317
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005318
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005319void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
5320 Register scratch, Label* miss) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005321 // Get the prototype or initial map from the function.
5322 ld(result,
5323 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5324
5325 // If the prototype or initial map is the hole, don't return it and
5326 // simply miss the cache instead. This will allow us to allocate a
5327 // prototype object on-demand in the runtime system.
5328 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
5329 Branch(miss, eq, result, Operand(t8));
5330
5331 // If the function does not have an initial map, we're done.
5332 Label done;
5333 GetObjectType(result, scratch, scratch);
5334 Branch(&done, ne, scratch, Operand(MAP_TYPE));
5335
5336 // Get the prototype from the initial map.
5337 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
5338
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005339 // All done.
5340 bind(&done);
5341}
5342
5343
5344void MacroAssembler::GetObjectType(Register object,
5345 Register map,
5346 Register type_reg) {
5347 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
5348 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5349}
5350
5351
5352// -----------------------------------------------------------------------------
5353// Runtime calls.
5354
5355void MacroAssembler::CallStub(CodeStub* stub,
5356 TypeFeedbackId ast_id,
5357 Condition cond,
5358 Register r1,
5359 const Operand& r2,
5360 BranchDelaySlot bd) {
5361 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
5362 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
5363 cond, r1, r2, bd);
5364}
5365
5366
5367void MacroAssembler::TailCallStub(CodeStub* stub,
5368 Condition cond,
5369 Register r1,
5370 const Operand& r2,
5371 BranchDelaySlot bd) {
5372 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
5373}
5374
5375
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005376bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
5377 return has_frame_ || !stub->SometimesSetsUpAFrame();
5378}
5379
5380
5381void MacroAssembler::IndexFromHash(Register hash, Register index) {
5382 // If the hash field contains an array index pick it out. The assert checks
5383 // that the constants for the maximum number of digits for an array index
5384 // cached in the hash field and the number of bits reserved for it does not
5385 // conflict.
5386 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
5387 (1 << String::kArrayIndexValueBits));
5388 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
5389}
5390
5391
5392void MacroAssembler::ObjectToDoubleFPURegister(Register object,
5393 FPURegister result,
5394 Register scratch1,
5395 Register scratch2,
5396 Register heap_number_map,
5397 Label* not_number,
5398 ObjectToDoubleFlags flags) {
5399 Label done;
5400 if ((flags & OBJECT_NOT_SMI) == 0) {
5401 Label not_smi;
5402 JumpIfNotSmi(object, &not_smi);
5403 // Remove smi tag and convert to double.
5404 // dsra(scratch1, object, kSmiTagSize);
5405 dsra32(scratch1, object, 0);
5406 mtc1(scratch1, result);
5407 cvt_d_w(result, result);
5408 Branch(&done);
5409 bind(&not_smi);
5410 }
5411 // Check for heap number and load double value from it.
5412 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
5413 Branch(not_number, ne, scratch1, Operand(heap_number_map));
5414
5415 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
5416 // If exponent is all ones the number is either a NaN or +/-Infinity.
5417 Register exponent = scratch1;
5418 Register mask_reg = scratch2;
5419 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
5420 li(mask_reg, HeapNumber::kExponentMask);
5421
5422 And(exponent, exponent, mask_reg);
5423 Branch(not_number, eq, exponent, Operand(mask_reg));
5424 }
5425 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
5426 bind(&done);
5427}
5428
5429
5430void MacroAssembler::SmiToDoubleFPURegister(Register smi,
5431 FPURegister value,
5432 Register scratch1) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005433 dsra32(scratch1, smi, 0);
5434 mtc1(scratch1, value);
5435 cvt_d_w(value, value);
5436}
5437
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005438static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
5439 Label* overflow_label,
5440 Label* no_overflow_label) {
5441 DCHECK(overflow_label || no_overflow_label);
5442 if (!overflow_label) {
5443 DCHECK(no_overflow_label);
5444 masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
5445 } else {
5446 masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
5447 if (no_overflow_label) masm->Branch(no_overflow_label);
5448 }
5449}
5450
Ben Murdochda12d292016-06-02 14:46:10 +01005451void MacroAssembler::AddBranchOvf(Register dst, Register left,
5452 const Operand& right, Label* overflow_label,
5453 Label* no_overflow_label, Register scratch) {
5454 if (right.is_reg()) {
5455 AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5456 scratch);
5457 } else {
5458 if (kArchVariant == kMips64r6) {
5459 Register right_reg = t9;
5460 DCHECK(!left.is(right_reg));
5461 li(right_reg, Operand(right));
5462 AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
5463 } else {
5464 Register overflow_dst = t9;
5465 DCHECK(!dst.is(scratch));
5466 DCHECK(!dst.is(overflow_dst));
5467 DCHECK(!scratch.is(overflow_dst));
5468 DCHECK(!left.is(overflow_dst));
5469 if (dst.is(left)) {
5470 mov(scratch, left); // Preserve left.
5471 // Left is overwritten.
5472 Addu(dst, left, static_cast<int32_t>(right.immediate()));
5473 xor_(scratch, dst, scratch); // Original left.
5474 // Load right since xori takes uint16 as immediate.
5475 Addu(overflow_dst, zero_reg, right);
5476 xor_(overflow_dst, dst, overflow_dst);
5477 and_(overflow_dst, overflow_dst, scratch);
5478 } else {
5479 Addu(dst, left, static_cast<int32_t>(right.immediate()));
5480 xor_(overflow_dst, dst, left);
5481 // Load right since xori takes uint16 as immediate.
5482 Addu(scratch, zero_reg, right);
5483 xor_(scratch, dst, scratch);
5484 and_(overflow_dst, scratch, overflow_dst);
5485 }
5486 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5487 }
5488 }
5489}
5490
5491void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
5492 Label* overflow_label,
5493 Label* no_overflow_label, Register scratch) {
5494 if (kArchVariant == kMips64r6) {
5495 if (!overflow_label) {
5496 DCHECK(no_overflow_label);
5497 DCHECK(!dst.is(scratch));
5498 Register left_reg = left.is(dst) ? scratch : left;
5499 Register right_reg = right.is(dst) ? t9 : right;
5500 DCHECK(!dst.is(left_reg));
5501 DCHECK(!dst.is(right_reg));
5502 Move(left_reg, left);
5503 Move(right_reg, right);
5504 addu(dst, left, right);
5505 bnvc(left_reg, right_reg, no_overflow_label);
5506 } else {
5507 bovc(left, right, overflow_label);
5508 addu(dst, left, right);
5509 if (no_overflow_label) bc(no_overflow_label);
5510 }
5511 } else {
5512 Register overflow_dst = t9;
5513 DCHECK(!dst.is(scratch));
5514 DCHECK(!dst.is(overflow_dst));
5515 DCHECK(!scratch.is(overflow_dst));
5516 DCHECK(!left.is(overflow_dst));
5517 DCHECK(!right.is(overflow_dst));
5518 DCHECK(!left.is(scratch));
5519 DCHECK(!right.is(scratch));
5520
5521 if (left.is(right) && dst.is(left)) {
5522 mov(overflow_dst, right);
5523 right = overflow_dst;
5524 }
5525
5526 if (dst.is(left)) {
5527 mov(scratch, left); // Preserve left.
5528 addu(dst, left, right); // Left is overwritten.
5529 xor_(scratch, dst, scratch); // Original left.
5530 xor_(overflow_dst, dst, right);
5531 and_(overflow_dst, overflow_dst, scratch);
5532 } else if (dst.is(right)) {
5533 mov(scratch, right); // Preserve right.
5534 addu(dst, left, right); // Right is overwritten.
5535 xor_(scratch, dst, scratch); // Original right.
5536 xor_(overflow_dst, dst, left);
5537 and_(overflow_dst, overflow_dst, scratch);
5538 } else {
5539 addu(dst, left, right);
5540 xor_(overflow_dst, dst, left);
5541 xor_(scratch, dst, right);
5542 and_(overflow_dst, scratch, overflow_dst);
5543 }
5544 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5545 }
5546}
5547
5548void MacroAssembler::SubBranchOvf(Register dst, Register left,
5549 const Operand& right, Label* overflow_label,
5550 Label* no_overflow_label, Register scratch) {
5551 DCHECK(overflow_label || no_overflow_label);
5552 if (right.is_reg()) {
5553 SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5554 scratch);
5555 } else {
5556 Register overflow_dst = t9;
5557 DCHECK(!dst.is(scratch));
5558 DCHECK(!dst.is(overflow_dst));
5559 DCHECK(!scratch.is(overflow_dst));
5560 DCHECK(!left.is(overflow_dst));
5561 DCHECK(!left.is(scratch));
5562 if (dst.is(left)) {
5563 mov(scratch, left); // Preserve left.
5564 // Left is overwritten.
5565 Subu(dst, left, static_cast<int32_t>(right.immediate()));
5566 // Load right since xori takes uint16 as immediate.
5567 Addu(overflow_dst, zero_reg, right);
5568 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5569 xor_(scratch, dst, scratch); // scratch is original left.
5570 and_(overflow_dst, scratch, overflow_dst);
5571 } else {
5572 Subu(dst, left, right);
5573 xor_(overflow_dst, dst, left);
5574 // Load right since xori takes uint16 as immediate.
5575 Addu(scratch, zero_reg, right);
5576 xor_(scratch, left, scratch);
5577 and_(overflow_dst, scratch, overflow_dst);
5578 }
5579 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5580 }
5581}
5582
5583void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
5584 Label* overflow_label,
5585 Label* no_overflow_label, Register scratch) {
5586 DCHECK(overflow_label || no_overflow_label);
5587 Register overflow_dst = t9;
5588 DCHECK(!dst.is(scratch));
5589 DCHECK(!dst.is(overflow_dst));
5590 DCHECK(!scratch.is(overflow_dst));
5591 DCHECK(!overflow_dst.is(left));
5592 DCHECK(!overflow_dst.is(right));
5593 DCHECK(!scratch.is(left));
5594 DCHECK(!scratch.is(right));
5595
5596 // This happens with some crankshaft code. Since Subu works fine if
5597 // left == right, let's not make that restriction here.
5598 if (left.is(right)) {
5599 mov(dst, zero_reg);
5600 if (no_overflow_label) {
5601 Branch(no_overflow_label);
5602 }
5603 }
5604
5605 if (dst.is(left)) {
5606 mov(scratch, left); // Preserve left.
5607 subu(dst, left, right); // Left is overwritten.
5608 xor_(overflow_dst, dst, scratch); // scratch is original left.
5609 xor_(scratch, scratch, right); // scratch is original left.
5610 and_(overflow_dst, scratch, overflow_dst);
5611 } else if (dst.is(right)) {
5612 mov(scratch, right); // Preserve right.
5613 subu(dst, left, right); // Right is overwritten.
5614 xor_(overflow_dst, dst, left);
5615 xor_(scratch, left, scratch); // Original right.
5616 and_(overflow_dst, scratch, overflow_dst);
5617 } else {
5618 subu(dst, left, right);
5619 xor_(overflow_dst, dst, left);
5620 xor_(scratch, left, right);
5621 and_(overflow_dst, scratch, overflow_dst);
5622 }
5623 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5624}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005625
5626void MacroAssembler::DaddBranchOvf(Register dst, Register left,
5627 const Operand& right, Label* overflow_label,
5628 Label* no_overflow_label, Register scratch) {
5629 if (right.is_reg()) {
5630 DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5631 scratch);
5632 } else {
5633 Register overflow_dst = t9;
5634 DCHECK(!dst.is(scratch));
5635 DCHECK(!dst.is(overflow_dst));
5636 DCHECK(!scratch.is(overflow_dst));
5637 DCHECK(!left.is(overflow_dst));
5638 li(overflow_dst, right); // Load right.
5639 if (dst.is(left)) {
5640 mov(scratch, left); // Preserve left.
5641 Daddu(dst, left, overflow_dst); // Left is overwritten.
5642 xor_(scratch, dst, scratch); // Original left.
5643 xor_(overflow_dst, dst, overflow_dst);
5644 and_(overflow_dst, overflow_dst, scratch);
5645 } else {
5646 Daddu(dst, left, overflow_dst);
5647 xor_(scratch, dst, overflow_dst);
5648 xor_(overflow_dst, dst, left);
5649 and_(overflow_dst, scratch, overflow_dst);
5650 }
5651 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5652 }
5653}
5654
5655
5656void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
5657 Label* overflow_label,
5658 Label* no_overflow_label, Register scratch) {
5659 Register overflow_dst = t9;
5660 DCHECK(!dst.is(scratch));
5661 DCHECK(!dst.is(overflow_dst));
5662 DCHECK(!scratch.is(overflow_dst));
5663 DCHECK(!left.is(overflow_dst));
5664 DCHECK(!right.is(overflow_dst));
5665 DCHECK(!left.is(scratch));
5666 DCHECK(!right.is(scratch));
5667
5668 if (left.is(right) && dst.is(left)) {
5669 mov(overflow_dst, right);
5670 right = overflow_dst;
5671 }
5672
5673 if (dst.is(left)) {
5674 mov(scratch, left); // Preserve left.
5675 daddu(dst, left, right); // Left is overwritten.
5676 xor_(scratch, dst, scratch); // Original left.
5677 xor_(overflow_dst, dst, right);
5678 and_(overflow_dst, overflow_dst, scratch);
5679 } else if (dst.is(right)) {
5680 mov(scratch, right); // Preserve right.
5681 daddu(dst, left, right); // Right is overwritten.
5682 xor_(scratch, dst, scratch); // Original right.
5683 xor_(overflow_dst, dst, left);
5684 and_(overflow_dst, overflow_dst, scratch);
5685 } else {
5686 daddu(dst, left, right);
5687 xor_(overflow_dst, dst, left);
5688 xor_(scratch, dst, right);
5689 and_(overflow_dst, scratch, overflow_dst);
5690 }
5691 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5692}
5693
5694
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005695void MacroAssembler::DsubBranchOvf(Register dst, Register left,
5696 const Operand& right, Label* overflow_label,
5697 Label* no_overflow_label, Register scratch) {
5698 DCHECK(overflow_label || no_overflow_label);
5699 if (right.is_reg()) {
5700 DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5701 scratch);
5702 } else {
5703 Register overflow_dst = t9;
5704 DCHECK(!dst.is(scratch));
5705 DCHECK(!dst.is(overflow_dst));
5706 DCHECK(!scratch.is(overflow_dst));
5707 DCHECK(!left.is(overflow_dst));
5708 DCHECK(!left.is(scratch));
5709 li(overflow_dst, right); // Load right.
5710 if (dst.is(left)) {
5711 mov(scratch, left); // Preserve left.
5712 Dsubu(dst, left, overflow_dst); // Left is overwritten.
5713 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5714 xor_(scratch, dst, scratch); // scratch is original left.
5715 and_(overflow_dst, scratch, overflow_dst);
5716 } else {
5717 Dsubu(dst, left, overflow_dst);
5718 xor_(scratch, left, overflow_dst);
5719 xor_(overflow_dst, dst, left);
5720 and_(overflow_dst, scratch, overflow_dst);
5721 }
5722 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5723 }
5724}
5725
5726
5727void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
5728 Label* overflow_label,
5729 Label* no_overflow_label, Register scratch) {
5730 DCHECK(overflow_label || no_overflow_label);
5731 Register overflow_dst = t9;
5732 DCHECK(!dst.is(scratch));
5733 DCHECK(!dst.is(overflow_dst));
5734 DCHECK(!scratch.is(overflow_dst));
5735 DCHECK(!overflow_dst.is(left));
5736 DCHECK(!overflow_dst.is(right));
5737 DCHECK(!scratch.is(left));
5738 DCHECK(!scratch.is(right));
5739
5740 // This happens with some crankshaft code. Since Subu works fine if
5741 // left == right, let's not make that restriction here.
5742 if (left.is(right)) {
5743 mov(dst, zero_reg);
5744 if (no_overflow_label) {
5745 Branch(no_overflow_label);
5746 }
5747 }
5748
5749 if (dst.is(left)) {
5750 mov(scratch, left); // Preserve left.
5751 dsubu(dst, left, right); // Left is overwritten.
5752 xor_(overflow_dst, dst, scratch); // scratch is original left.
5753 xor_(scratch, scratch, right); // scratch is original left.
5754 and_(overflow_dst, scratch, overflow_dst);
5755 } else if (dst.is(right)) {
5756 mov(scratch, right); // Preserve right.
5757 dsubu(dst, left, right); // Right is overwritten.
5758 xor_(overflow_dst, dst, left);
5759 xor_(scratch, left, scratch); // Original right.
5760 and_(overflow_dst, scratch, overflow_dst);
5761 } else {
5762 dsubu(dst, left, right);
5763 xor_(overflow_dst, dst, left);
5764 xor_(scratch, left, right);
5765 and_(overflow_dst, scratch, overflow_dst);
5766 }
5767 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5768}
5769
5770
5771void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5772 SaveFPRegsMode save_doubles,
5773 BranchDelaySlot bd) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005774 // All parameters are on the stack. v0 has the return value after call.
5775
5776 // If the expected number of arguments of the runtime function is
5777 // constant, we check that the actual number of arguments match the
5778 // expectation.
5779 CHECK(f->nargs < 0 || f->nargs == num_arguments);
5780
5781 // TODO(1236192): Most runtime routines don't need the number of
5782 // arguments passed in because it is constant. At some point we
5783 // should remove this need and make the runtime routine entry code
5784 // smarter.
5785 PrepareCEntryArgs(num_arguments);
5786 PrepareCEntryFunction(ExternalReference(f, isolate()));
5787 CEntryStub stub(isolate(), 1, save_doubles);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005788 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005789}
5790
5791
5792void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5793 int num_arguments,
5794 BranchDelaySlot bd) {
5795 PrepareCEntryArgs(num_arguments);
5796 PrepareCEntryFunction(ext);
5797
5798 CEntryStub stub(isolate(), 1);
5799 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5800}
5801
5802
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005803void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5804 const Runtime::Function* function = Runtime::FunctionForId(fid);
5805 DCHECK_EQ(1, function->result_size);
5806 if (function->nargs >= 0) {
5807 PrepareCEntryArgs(function->nargs);
5808 }
5809 JumpToExternalReference(ExternalReference(fid, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005810}
5811
5812
5813void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5814 BranchDelaySlot bd) {
5815 PrepareCEntryFunction(builtin);
5816 CEntryStub stub(isolate(), 1);
5817 Jump(stub.GetCode(),
5818 RelocInfo::CODE_TARGET,
5819 al,
5820 zero_reg,
5821 Operand(zero_reg),
5822 bd);
5823}
5824
5825
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005826void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5827 Register scratch1, Register scratch2) {
5828 if (FLAG_native_code_counters && counter->Enabled()) {
5829 li(scratch1, Operand(value));
5830 li(scratch2, Operand(ExternalReference(counter)));
5831 sd(scratch1, MemOperand(scratch2));
5832 }
5833}
5834
5835
5836void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5837 Register scratch1, Register scratch2) {
5838 DCHECK(value > 0);
5839 if (FLAG_native_code_counters && counter->Enabled()) {
5840 li(scratch2, Operand(ExternalReference(counter)));
5841 ld(scratch1, MemOperand(scratch2));
5842 Daddu(scratch1, scratch1, Operand(value));
5843 sd(scratch1, MemOperand(scratch2));
5844 }
5845}
5846
5847
5848void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5849 Register scratch1, Register scratch2) {
5850 DCHECK(value > 0);
5851 if (FLAG_native_code_counters && counter->Enabled()) {
5852 li(scratch2, Operand(ExternalReference(counter)));
5853 ld(scratch1, MemOperand(scratch2));
5854 Dsubu(scratch1, scratch1, Operand(value));
5855 sd(scratch1, MemOperand(scratch2));
5856 }
5857}
5858
5859
5860// -----------------------------------------------------------------------------
5861// Debugging.
5862
5863void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5864 Register rs, Operand rt) {
5865 if (emit_debug_code())
5866 Check(cc, reason, rs, rt);
5867}
5868
5869
5870void MacroAssembler::AssertFastElements(Register elements) {
5871 if (emit_debug_code()) {
5872 DCHECK(!elements.is(at));
5873 Label ok;
5874 push(elements);
5875 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5876 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5877 Branch(&ok, eq, elements, Operand(at));
5878 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5879 Branch(&ok, eq, elements, Operand(at));
5880 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5881 Branch(&ok, eq, elements, Operand(at));
5882 Abort(kJSObjectWithFastElementsMapHasSlowElements);
5883 bind(&ok);
5884 pop(elements);
5885 }
5886}
5887
5888
5889void MacroAssembler::Check(Condition cc, BailoutReason reason,
5890 Register rs, Operand rt) {
5891 Label L;
5892 Branch(&L, cc, rs, rt);
5893 Abort(reason);
5894 // Will not return here.
5895 bind(&L);
5896}
5897
5898
5899void MacroAssembler::Abort(BailoutReason reason) {
5900 Label abort_start;
5901 bind(&abort_start);
5902#ifdef DEBUG
5903 const char* msg = GetBailoutReason(reason);
5904 if (msg != NULL) {
5905 RecordComment("Abort message: ");
5906 RecordComment(msg);
5907 }
5908
5909 if (FLAG_trap_on_abort) {
5910 stop(msg);
5911 return;
5912 }
5913#endif
5914
5915 li(a0, Operand(Smi::FromInt(reason)));
5916 push(a0);
5917 // Disable stub call restrictions to always allow calls to abort.
5918 if (!has_frame_) {
5919 // We don't actually want to generate a pile of code for this, so just
5920 // claim there is a stack frame, without generating one.
5921 FrameScope scope(this, StackFrame::NONE);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005922 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005923 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005924 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005925 }
5926 // Will not return here.
5927 if (is_trampoline_pool_blocked()) {
5928 // If the calling code cares about the exact number of
5929 // instructions generated, we insert padding here to keep the size
5930 // of the Abort macro constant.
5931 // Currently in debug mode with debug_code enabled the number of
5932 // generated instructions is 10, so we use this as a maximum value.
5933 static const int kExpectedAbortInstructions = 10;
5934 int abort_instructions = InstructionsGeneratedSince(&abort_start);
5935 DCHECK(abort_instructions <= kExpectedAbortInstructions);
5936 while (abort_instructions++ < kExpectedAbortInstructions) {
5937 nop();
5938 }
5939 }
5940}
5941
5942
5943void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5944 if (context_chain_length > 0) {
5945 // Move up the chain of contexts to the context containing the slot.
5946 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5947 for (int i = 1; i < context_chain_length; i++) {
5948 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5949 }
5950 } else {
5951 // Slot is in the current function context. Move it into the
5952 // destination register in case we store into it (the write barrier
5953 // cannot be allowed to destroy the context in esi).
5954 Move(dst, cp);
5955 }
5956}
5957
5958
5959void MacroAssembler::LoadTransitionedArrayMapConditional(
5960 ElementsKind expected_kind,
5961 ElementsKind transitioned_kind,
5962 Register map_in_out,
5963 Register scratch,
5964 Label* no_map_match) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005965 DCHECK(IsFastElementsKind(expected_kind));
5966 DCHECK(IsFastElementsKind(transitioned_kind));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005967
5968 // Check that the function's map is the same as the expected cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005969 ld(scratch, NativeContextMemOperand());
5970 ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005971 Branch(no_map_match, ne, map_in_out, Operand(at));
5972
5973 // Use the transitioned cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005974 ld(map_in_out,
5975 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005976}
5977
5978
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005979void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5980 ld(dst, NativeContextMemOperand());
5981 ld(dst, ContextMemOperand(dst, index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005982}
5983
5984
5985void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5986 Register map,
5987 Register scratch) {
5988 // Load the initial map. The global functions all have initial maps.
5989 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5990 if (emit_debug_code()) {
5991 Label ok, fail;
5992 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
5993 Branch(&ok);
5994 bind(&fail);
5995 Abort(kGlobalFunctionsMustHaveInitialMap);
5996 bind(&ok);
5997 }
5998}
5999
Ben Murdochda12d292016-06-02 14:46:10 +01006000void MacroAssembler::StubPrologue(StackFrame::Type type) {
6001 li(at, Operand(Smi::FromInt(type)));
6002 PushCommonFrame(at);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006003}
6004
6005
6006void MacroAssembler::Prologue(bool code_pre_aging) {
6007 PredictableCodeSizeScope predictible_code_size_scope(
6008 this, kNoCodeAgeSequenceLength);
6009 // The following three instructions must remain together and unmodified
6010 // for code aging to work properly.
6011 if (code_pre_aging) {
6012 // Pre-age the code.
6013 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
6014 nop(Assembler::CODE_AGE_MARKER_NOP);
6015 // Load the stub address to t9 and call it,
6016 // GetCodeAgeAndParity() extracts the stub address from this instruction.
6017 li(t9,
6018 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
6019 ADDRESS_LOAD);
6020 nop(); // Prevent jalr to jal optimization.
6021 jalr(t9, a0);
6022 nop(); // Branch delay slot nop.
6023 nop(); // Pad the empty space.
6024 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01006025 PushStandardFrame(a1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006026 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
6027 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
6028 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006029 }
6030}
6031
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006032void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
6033 ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
6034 ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
6035 ld(vector,
6036 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
6037}
6038
6039
Emily Bernierd0a1eb72015-03-24 16:35:39 -04006040void MacroAssembler::EnterFrame(StackFrame::Type type,
6041 bool load_constant_pool_pointer_reg) {
6042 // Out-of-line constant pool not implemented on mips64.
6043 UNREACHABLE();
6044}
6045
6046
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006047void MacroAssembler::EnterFrame(StackFrame::Type type) {
Ben Murdochda12d292016-06-02 14:46:10 +01006048 int stack_offset, fp_offset;
6049 if (type == StackFrame::INTERNAL) {
6050 stack_offset = -4 * kPointerSize;
6051 fp_offset = 2 * kPointerSize;
6052 } else {
6053 stack_offset = -3 * kPointerSize;
6054 fp_offset = 1 * kPointerSize;
6055 }
6056 daddiu(sp, sp, stack_offset);
6057 stack_offset = -stack_offset - kPointerSize;
6058 sd(ra, MemOperand(sp, stack_offset));
6059 stack_offset -= kPointerSize;
6060 sd(fp, MemOperand(sp, stack_offset));
6061 stack_offset -= kPointerSize;
6062 li(t9, Operand(Smi::FromInt(type)));
6063 sd(t9, MemOperand(sp, stack_offset));
6064 if (type == StackFrame::INTERNAL) {
6065 DCHECK_EQ(stack_offset, kPointerSize);
6066 li(t9, Operand(CodeObject()));
6067 sd(t9, MemOperand(sp, 0));
6068 } else {
6069 DCHECK_EQ(stack_offset, 0);
6070 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006071 // Adjust FP to point to saved FP.
Ben Murdochda12d292016-06-02 14:46:10 +01006072 Daddu(fp, sp, Operand(fp_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006073}
6074
6075
6076void MacroAssembler::LeaveFrame(StackFrame::Type type) {
Ben Murdochda12d292016-06-02 14:46:10 +01006077 daddiu(sp, fp, 2 * kPointerSize);
6078 ld(ra, MemOperand(fp, 1 * kPointerSize));
6079 ld(fp, MemOperand(fp, 0 * kPointerSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006080}
6081
Ben Murdochda12d292016-06-02 14:46:10 +01006082void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006083 // Set up the frame structure on the stack.
6084 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
6085 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
6086 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
6087
6088 // This is how the stack will look:
6089 // fp + 2 (==kCallerSPDisplacement) - old stack's end
6090 // [fp + 1 (==kCallerPCOffset)] - saved old ra
6091 // [fp + 0 (==kCallerFPOffset)] - saved old fp
Ben Murdochda12d292016-06-02 14:46:10 +01006092 // [fp - 1 StackFrame::EXIT Smi
6093 // [fp - 2 (==kSPOffset)] - sp of the called function
6094 // [fp - 3 (==kCodeOffset)] - CodeObject
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006095 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
6096 // new stack (will contain saved ra)
6097
Ben Murdochda12d292016-06-02 14:46:10 +01006098 // Save registers and reserve room for saved entry sp and code object.
6099 daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
6100 sd(ra, MemOperand(sp, 4 * kPointerSize));
6101 sd(fp, MemOperand(sp, 3 * kPointerSize));
6102 li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
6103 sd(at, MemOperand(sp, 2 * kPointerSize));
6104 // Set up new frame pointer.
6105 daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006106
6107 if (emit_debug_code()) {
6108 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
6109 }
6110
6111 // Accessed from ExitFrame::code_slot.
6112 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
6113 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
6114
6115 // Save the frame pointer and the context in top.
6116 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
6117 sd(fp, MemOperand(t8));
6118 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6119 sd(cp, MemOperand(t8));
6120
6121 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
6122 if (save_doubles) {
6123 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
6124 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006125 int space = kNumOfSavedRegisters * kDoubleSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006126 Dsubu(sp, sp, Operand(space));
6127 // Remember: we only need to save every 2nd double FPU value.
6128 for (int i = 0; i < kNumOfSavedRegisters; i++) {
6129 FPURegister reg = FPURegister::from_code(2 * i);
6130 sdc1(reg, MemOperand(sp, i * kDoubleSize));
6131 }
6132 }
6133
6134 // Reserve place for the return address, stack space and an optional slot
6135 // (used by the DirectCEntryStub to hold the return value if a struct is
6136 // returned) and align the frame preparing for calling the runtime function.
6137 DCHECK(stack_space >= 0);
6138 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
6139 if (frame_alignment > 0) {
6140 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6141 And(sp, sp, Operand(-frame_alignment)); // Align stack.
6142 }
6143
6144 // Set the exit frame sp value to point just before the return address
6145 // location.
6146 daddiu(at, sp, kPointerSize);
6147 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
6148}
6149
6150
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006151void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
6152 bool restore_context, bool do_return,
6153 bool argument_count_is_length) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006154 // Optionally restore all double registers.
6155 if (save_doubles) {
6156 // Remember: we only need to restore every 2nd double FPU value.
6157 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
Ben Murdochda12d292016-06-02 14:46:10 +01006158 Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
6159 kNumOfSavedRegisters * kDoubleSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006160 for (int i = 0; i < kNumOfSavedRegisters; i++) {
6161 FPURegister reg = FPURegister::from_code(2 * i);
6162 ldc1(reg, MemOperand(t8, i * kDoubleSize));
6163 }
6164 }
6165
6166 // Clear top frame.
6167 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
6168 sd(zero_reg, MemOperand(t8));
6169
6170 // Restore current context from top and clear it in debug mode.
6171 if (restore_context) {
6172 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6173 ld(cp, MemOperand(t8));
6174 }
6175#ifdef DEBUG
6176 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6177 sd(a3, MemOperand(t8));
6178#endif
6179
6180 // Pop the arguments, restore registers, and return.
6181 mov(sp, fp); // Respect ABI stack constraint.
6182 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
6183 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
6184
6185 if (argument_count.is_valid()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006186 if (argument_count_is_length) {
6187 daddu(sp, sp, argument_count);
6188 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01006189 Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006190 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006191 }
6192
6193 if (do_return) {
6194 Ret(USE_DELAY_SLOT);
6195 // If returning, the instruction in the delay slot will be the addiu below.
6196 }
6197 daddiu(sp, sp, 2 * kPointerSize);
6198}
6199
6200
6201void MacroAssembler::InitializeNewString(Register string,
6202 Register length,
6203 Heap::RootListIndex map_index,
6204 Register scratch1,
6205 Register scratch2) {
6206 // dsll(scratch1, length, kSmiTagSize);
6207 dsll32(scratch1, length, 0);
6208 LoadRoot(scratch2, map_index);
6209 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
6210 li(scratch1, Operand(String::kEmptyHashField));
6211 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006212 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006213}
6214
6215
6216int MacroAssembler::ActivationFrameAlignment() {
6217#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6218 // Running on the real platform. Use the alignment as mandated by the local
6219 // environment.
6220 // Note: This will break if we ever start generating snapshots on one Mips
6221 // platform for another Mips platform with a different alignment.
6222 return base::OS::ActivationFrameAlignment();
6223#else // V8_HOST_ARCH_MIPS
6224 // If we are using the simulator then we should always align to the expected
6225 // alignment. As the simulator is used to generate snapshots we do not know
6226 // if the target platform will need alignment, so this is controlled from a
6227 // flag.
6228 return FLAG_sim_stack_alignment;
6229#endif // V8_HOST_ARCH_MIPS
6230}
6231
6232
6233void MacroAssembler::AssertStackIsAligned() {
6234 if (emit_debug_code()) {
6235 const int frame_alignment = ActivationFrameAlignment();
6236 const int frame_alignment_mask = frame_alignment - 1;
6237
6238 if (frame_alignment > kPointerSize) {
6239 Label alignment_as_expected;
6240 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6241 andi(at, sp, frame_alignment_mask);
6242 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6243 // Don't use Check here, as it will call Runtime_Abort re-entering here.
6244 stop("Unexpected stack alignment");
6245 bind(&alignment_as_expected);
6246 }
6247 }
6248}
6249
6250
6251void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
6252 Register reg,
6253 Register scratch,
6254 Label* not_power_of_two_or_zero) {
6255 Dsubu(scratch, reg, Operand(1));
6256 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
6257 scratch, Operand(zero_reg));
6258 and_(at, scratch, reg); // In the delay slot.
6259 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
6260}
6261
6262
6263void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
6264 DCHECK(!reg.is(overflow));
6265 mov(overflow, reg); // Save original value.
6266 SmiTag(reg);
6267 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
6268}
6269
6270
6271void MacroAssembler::SmiTagCheckOverflow(Register dst,
6272 Register src,
6273 Register overflow) {
6274 if (dst.is(src)) {
6275 // Fall back to slower case.
6276 SmiTagCheckOverflow(dst, overflow);
6277 } else {
6278 DCHECK(!dst.is(src));
6279 DCHECK(!dst.is(overflow));
6280 DCHECK(!src.is(overflow));
6281 SmiTag(dst, src);
6282 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
6283 }
6284}
6285
6286
6287void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
6288 if (SmiValuesAre32Bits()) {
6289 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
6290 } else {
6291 lw(dst, src);
6292 SmiUntag(dst);
6293 }
6294}
6295
6296
6297void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
6298 if (SmiValuesAre32Bits()) {
6299 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
6300 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
6301 dsll(dst, dst, scale);
6302 } else {
6303 lw(dst, src);
6304 DCHECK(scale >= kSmiTagSize);
6305 sll(dst, dst, scale - kSmiTagSize);
6306 }
6307}
6308
6309
6310// Returns 2 values: the Smi and a scaled version of the int within the Smi.
6311void MacroAssembler::SmiLoadWithScale(Register d_smi,
6312 Register d_scaled,
6313 MemOperand src,
6314 int scale) {
6315 if (SmiValuesAre32Bits()) {
6316 ld(d_smi, src);
6317 dsra(d_scaled, d_smi, kSmiShift - scale);
6318 } else {
6319 lw(d_smi, src);
6320 DCHECK(scale >= kSmiTagSize);
6321 sll(d_scaled, d_smi, scale - kSmiTagSize);
6322 }
6323}
6324
6325
6326// Returns 2 values: the untagged Smi (int32) and scaled version of that int.
6327void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
6328 Register d_scaled,
6329 MemOperand src,
6330 int scale) {
6331 if (SmiValuesAre32Bits()) {
6332 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
6333 dsll(d_scaled, d_int, scale);
6334 } else {
6335 lw(d_int, src);
6336 // Need both the int and the scaled in, so use two instructions.
6337 SmiUntag(d_int);
6338 sll(d_scaled, d_int, scale);
6339 }
6340}
6341
6342
6343void MacroAssembler::UntagAndJumpIfSmi(Register dst,
6344 Register src,
6345 Label* smi_case) {
6346 // DCHECK(!dst.is(src));
6347 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
6348 SmiUntag(dst, src);
6349}
6350
6351
6352void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
6353 Register src,
6354 Label* non_smi_case) {
6355 // DCHECK(!dst.is(src));
6356 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
6357 SmiUntag(dst, src);
6358}
6359
6360void MacroAssembler::JumpIfSmi(Register value,
6361 Label* smi_label,
6362 Register scratch,
6363 BranchDelaySlot bd) {
6364 DCHECK_EQ(0, kSmiTag);
6365 andi(scratch, value, kSmiTagMask);
6366 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
6367}
6368
6369void MacroAssembler::JumpIfNotSmi(Register value,
6370 Label* not_smi_label,
6371 Register scratch,
6372 BranchDelaySlot bd) {
6373 DCHECK_EQ(0, kSmiTag);
6374 andi(scratch, value, kSmiTagMask);
6375 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
6376}
6377
6378
6379void MacroAssembler::JumpIfNotBothSmi(Register reg1,
6380 Register reg2,
6381 Label* on_not_both_smi) {
6382 STATIC_ASSERT(kSmiTag == 0);
6383 // TODO(plind): Find some better to fix this assert issue.
6384#if defined(__APPLE__)
6385 DCHECK_EQ(1, kSmiTagMask);
6386#else
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006387 DCHECK_EQ((int64_t)1, kSmiTagMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006388#endif
6389 or_(at, reg1, reg2);
6390 JumpIfNotSmi(at, on_not_both_smi);
6391}
6392
6393
6394void MacroAssembler::JumpIfEitherSmi(Register reg1,
6395 Register reg2,
6396 Label* on_either_smi) {
6397 STATIC_ASSERT(kSmiTag == 0);
6398 // TODO(plind): Find some better to fix this assert issue.
6399#if defined(__APPLE__)
6400 DCHECK_EQ(1, kSmiTagMask);
6401#else
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006402 DCHECK_EQ((int64_t)1, kSmiTagMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006403#endif
6404 // Both Smi tags must be 1 (not Smi).
6405 and_(at, reg1, reg2);
6406 JumpIfSmi(at, on_either_smi);
6407}
6408
Ben Murdochda12d292016-06-02 14:46:10 +01006409void MacroAssembler::AssertNotNumber(Register object) {
6410 if (emit_debug_code()) {
6411 STATIC_ASSERT(kSmiTag == 0);
6412 andi(at, object, kSmiTagMask);
6413 Check(ne, kOperandIsANumber, at, Operand(zero_reg));
6414 GetObjectType(object, t8, t8);
6415 Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
6416 }
6417}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006418
6419void MacroAssembler::AssertNotSmi(Register object) {
6420 if (emit_debug_code()) {
6421 STATIC_ASSERT(kSmiTag == 0);
6422 andi(at, object, kSmiTagMask);
6423 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
6424 }
6425}
6426
6427
6428void MacroAssembler::AssertSmi(Register object) {
6429 if (emit_debug_code()) {
6430 STATIC_ASSERT(kSmiTag == 0);
6431 andi(at, object, kSmiTagMask);
6432 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
6433 }
6434}
6435
6436
6437void MacroAssembler::AssertString(Register object) {
6438 if (emit_debug_code()) {
6439 STATIC_ASSERT(kSmiTag == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006440 SmiTst(object, t8);
6441 Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
6442 GetObjectType(object, t8, t8);
6443 Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006444 }
6445}
6446
6447
6448void MacroAssembler::AssertName(Register object) {
6449 if (emit_debug_code()) {
6450 STATIC_ASSERT(kSmiTag == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006451 SmiTst(object, t8);
6452 Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
6453 GetObjectType(object, t8, t8);
6454 Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
6455 }
6456}
6457
6458
6459void MacroAssembler::AssertFunction(Register object) {
6460 if (emit_debug_code()) {
6461 STATIC_ASSERT(kSmiTag == 0);
6462 SmiTst(object, t8);
6463 Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
6464 GetObjectType(object, t8, t8);
6465 Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
6466 }
6467}
6468
6469
6470void MacroAssembler::AssertBoundFunction(Register object) {
6471 if (emit_debug_code()) {
6472 STATIC_ASSERT(kSmiTag == 0);
6473 SmiTst(object, t8);
6474 Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
6475 GetObjectType(object, t8, t8);
6476 Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006477 }
6478}
6479
Ben Murdochc5610432016-08-08 18:44:38 +01006480void MacroAssembler::AssertGeneratorObject(Register object) {
6481 if (emit_debug_code()) {
6482 STATIC_ASSERT(kSmiTag == 0);
6483 SmiTst(object, t8);
6484 Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
6485 GetObjectType(object, t8, t8);
6486 Check(eq, kOperandIsNotAGeneratorObject, t8,
6487 Operand(JS_GENERATOR_OBJECT_TYPE));
6488 }
6489}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006490
Ben Murdoch097c5b22016-05-18 11:27:45 +01006491void MacroAssembler::AssertReceiver(Register object) {
6492 if (emit_debug_code()) {
6493 STATIC_ASSERT(kSmiTag == 0);
6494 SmiTst(object, t8);
6495 Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
6496 GetObjectType(object, t8, t8);
6497 Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
6498 }
6499}
6500
6501
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006502void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
6503 Register scratch) {
6504 if (emit_debug_code()) {
6505 Label done_checking;
6506 AssertNotSmi(object);
6507 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
6508 Branch(&done_checking, eq, object, Operand(scratch));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006509 ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006510 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006511 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006512 bind(&done_checking);
6513 }
6514}
6515
6516
6517void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
6518 if (emit_debug_code()) {
6519 DCHECK(!reg.is(at));
6520 LoadRoot(at, index);
6521 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
6522 }
6523}
6524
6525
6526void MacroAssembler::JumpIfNotHeapNumber(Register object,
6527 Register heap_number_map,
6528 Register scratch,
6529 Label* on_not_heap_number) {
6530 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
6531 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
6532 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
6533}
6534
6535
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006536void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
6537 Register first, Register second, Register scratch1, Register scratch2,
6538 Label* failure) {
6539 // Test that both first and second are sequential one-byte strings.
6540 // Assume that they are non-smis.
6541 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
6542 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
6543 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
6544 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
6545
6546 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
6547 scratch2, failure);
6548}
6549
6550
6551void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
6552 Register second,
6553 Register scratch1,
6554 Register scratch2,
6555 Label* failure) {
6556 // Check that neither is a smi.
6557 STATIC_ASSERT(kSmiTag == 0);
6558 And(scratch1, first, Operand(second));
6559 JumpIfSmi(scratch1, failure);
6560 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
6561 scratch2, failure);
6562}
6563
6564
6565void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
6566 Register first, Register second, Register scratch1, Register scratch2,
6567 Label* failure) {
6568 const int kFlatOneByteStringMask =
6569 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6570 const int kFlatOneByteStringTag =
6571 kStringTag | kOneByteStringTag | kSeqStringTag;
6572 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
6573 andi(scratch1, first, kFlatOneByteStringMask);
6574 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
6575 andi(scratch2, second, kFlatOneByteStringMask);
6576 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
6577}
6578
6579
6580void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
6581 Register scratch,
6582 Label* failure) {
6583 const int kFlatOneByteStringMask =
6584 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6585 const int kFlatOneByteStringTag =
6586 kStringTag | kOneByteStringTag | kSeqStringTag;
6587 And(scratch, type, Operand(kFlatOneByteStringMask));
6588 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
6589}
6590
Ben Murdoch097c5b22016-05-18 11:27:45 +01006591static const int kRegisterPassedArguments = 8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006592
6593int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
6594 int num_double_arguments) {
6595 int stack_passed_words = 0;
6596 num_reg_arguments += 2 * num_double_arguments;
6597
6598 // O32: Up to four simple arguments are passed in registers a0..a3.
6599 // N64: Up to eight simple arguments are passed in registers a0..a7.
6600 if (num_reg_arguments > kRegisterPassedArguments) {
6601 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
6602 }
6603 stack_passed_words += kCArgSlotCount;
6604 return stack_passed_words;
6605}
6606
6607
6608void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
6609 Register index,
6610 Register value,
6611 Register scratch,
6612 uint32_t encoding_mask) {
6613 Label is_object;
6614 SmiTst(string, at);
6615 Check(ne, kNonObject, at, Operand(zero_reg));
6616
6617 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
6618 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
6619
6620 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
6621 li(scratch, Operand(encoding_mask));
6622 Check(eq, kUnexpectedStringType, at, Operand(scratch));
6623
6624 // TODO(plind): requires Smi size check code for mips32.
6625
6626 ld(at, FieldMemOperand(string, String::kLengthOffset));
6627 Check(lt, kIndexIsTooLarge, index, Operand(at));
6628
6629 DCHECK(Smi::FromInt(0) == 0);
6630 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6631}
6632
6633
6634void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6635 int num_double_arguments,
6636 Register scratch) {
6637 int frame_alignment = ActivationFrameAlignment();
6638
6639 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
6640 // O32: Up to four simple arguments are passed in registers a0..a3.
6641 // Those four arguments must have reserved argument slots on the stack for
6642 // mips, even though those argument slots are not normally used.
6643 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
6644 // address than) the (O32) argument slots. (arg slot calculation handled by
6645 // CalculateStackPassedWords()).
6646 int stack_passed_arguments = CalculateStackPassedWords(
6647 num_reg_arguments, num_double_arguments);
6648 if (frame_alignment > kPointerSize) {
6649 // Make stack end at alignment and make room for num_arguments - 4 words
6650 // and the original value of sp.
6651 mov(scratch, sp);
6652 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6653 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6654 And(sp, sp, Operand(-frame_alignment));
6655 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6656 } else {
6657 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6658 }
6659}
6660
6661
6662void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6663 Register scratch) {
6664 PrepareCallCFunction(num_reg_arguments, 0, scratch);
6665}
6666
6667
6668void MacroAssembler::CallCFunction(ExternalReference function,
6669 int num_reg_arguments,
6670 int num_double_arguments) {
6671 li(t8, Operand(function));
6672 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6673}
6674
6675
6676void MacroAssembler::CallCFunction(Register function,
6677 int num_reg_arguments,
6678 int num_double_arguments) {
6679 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6680}
6681
6682
6683void MacroAssembler::CallCFunction(ExternalReference function,
6684 int num_arguments) {
6685 CallCFunction(function, num_arguments, 0);
6686}
6687
6688
6689void MacroAssembler::CallCFunction(Register function,
6690 int num_arguments) {
6691 CallCFunction(function, num_arguments, 0);
6692}
6693
6694
6695void MacroAssembler::CallCFunctionHelper(Register function,
6696 int num_reg_arguments,
6697 int num_double_arguments) {
6698 DCHECK(has_frame());
6699 // Make sure that the stack is aligned before calling a C function unless
6700 // running in the simulator. The simulator has its own alignment check which
6701 // provides more information.
6702 // The argument stots are presumed to have been set up by
6703 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6704
6705#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6706 if (emit_debug_code()) {
6707 int frame_alignment = base::OS::ActivationFrameAlignment();
6708 int frame_alignment_mask = frame_alignment - 1;
6709 if (frame_alignment > kPointerSize) {
6710 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6711 Label alignment_as_expected;
6712 And(at, sp, Operand(frame_alignment_mask));
6713 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6714 // Don't use Check here, as it will call Runtime_Abort possibly
6715 // re-entering here.
6716 stop("Unexpected alignment in CallCFunction");
6717 bind(&alignment_as_expected);
6718 }
6719 }
6720#endif // V8_HOST_ARCH_MIPS
6721
6722 // Just call directly. The function called cannot cause a GC, or
6723 // allow preemption, so the return address in the link register
6724 // stays correct.
6725
6726 if (!function.is(t9)) {
6727 mov(t9, function);
6728 function = t9;
6729 }
6730
6731 Call(function);
6732
6733 int stack_passed_arguments = CalculateStackPassedWords(
6734 num_reg_arguments, num_double_arguments);
6735
6736 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6737 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6738 } else {
6739 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6740 }
6741}
6742
6743
6744#undef BRANCH_ARGS_CHECK
6745
6746
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006747void MacroAssembler::CheckPageFlag(
6748 Register object,
6749 Register scratch,
6750 int mask,
6751 Condition cc,
6752 Label* condition_met) {
6753 And(scratch, object, Operand(~Page::kPageAlignmentMask));
6754 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6755 And(scratch, scratch, Operand(mask));
6756 Branch(condition_met, cc, scratch, Operand(zero_reg));
6757}
6758
6759
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006760void MacroAssembler::JumpIfBlack(Register object,
6761 Register scratch0,
6762 Register scratch1,
6763 Label* on_black) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006764 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
6765 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006766}
6767
6768
6769void MacroAssembler::HasColor(Register object,
6770 Register bitmap_scratch,
6771 Register mask_scratch,
6772 Label* has_color,
6773 int first_bit,
6774 int second_bit) {
6775 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6776 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6777
6778 GetMarkBits(object, bitmap_scratch, mask_scratch);
6779
6780 Label other_color;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006781 // Note that we are using two 4-byte aligned loads.
6782 LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006783 And(t8, t9, Operand(mask_scratch));
6784 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6785 // Shift left 1 by adding.
6786 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
6787 And(t8, t9, Operand(mask_scratch));
6788 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6789
6790 bind(&other_color);
6791}
6792
6793
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006794void MacroAssembler::GetMarkBits(Register addr_reg,
6795 Register bitmap_reg,
6796 Register mask_reg) {
6797 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6798 // addr_reg is divided into fields:
6799 // |63 page base 20|19 high 8|7 shift 3|2 0|
6800 // 'high' gives the index of the cell holding color bits for the object.
6801 // 'shift' gives the offset in the cell for this object's color.
6802 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6803 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6804 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6805 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
Ben Murdoch097c5b22016-05-18 11:27:45 +01006806 Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006807 li(t8, Operand(1));
6808 dsllv(mask_reg, t8, mask_reg);
6809}
6810
6811
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006812void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6813 Register mask_scratch, Register load_scratch,
6814 Label* value_is_white) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006815 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6816 GetMarkBits(value, bitmap_scratch, mask_scratch);
6817
6818 // If the value is black or grey we don't need to do anything.
6819 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006820 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6821 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006822 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6823
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006824 // Since both black and grey have a 1 in the first position and white does
6825 // not have a 1 there we only need to check one bit.
6826 // Note that we are using a 4-byte aligned 8-byte load.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006827 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006828 LoadWordPair(load_scratch,
6829 MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6830 } else {
6831 lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006832 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006833 And(t8, mask_scratch, load_scratch);
6834 Branch(value_is_white, eq, t8, Operand(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006835}
6836
6837
6838void MacroAssembler::LoadInstanceDescriptors(Register map,
6839 Register descriptors) {
6840 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6841}
6842
6843
6844void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006845 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006846 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6847}
6848
6849
6850void MacroAssembler::EnumLength(Register dst, Register map) {
6851 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006852 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006853 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6854 SmiTag(dst);
6855}
6856
6857
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006858void MacroAssembler::LoadAccessor(Register dst, Register holder,
6859 int accessor_index,
6860 AccessorComponent accessor) {
6861 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6862 LoadInstanceDescriptors(dst, dst);
6863 ld(dst,
6864 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6865 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6866 : AccessorPair::kSetterOffset;
6867 ld(dst, FieldMemOperand(dst, offset));
6868}
6869
6870
Ben Murdoch097c5b22016-05-18 11:27:45 +01006871void MacroAssembler::CheckEnumCache(Label* call_runtime) {
6872 Register null_value = a5;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006873 Register empty_fixed_array_value = a6;
6874 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6875 Label next, start;
6876 mov(a2, a0);
6877
6878 // Check if the enum length field is properly initialized, indicating that
6879 // there is an enum cache.
6880 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6881
6882 EnumLength(a3, a1);
6883 Branch(
6884 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
6885
Ben Murdoch097c5b22016-05-18 11:27:45 +01006886 LoadRoot(null_value, Heap::kNullValueRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006887 jmp(&start);
6888
6889 bind(&next);
6890 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6891
6892 // For all objects but the receiver, check that the cache is empty.
6893 EnumLength(a3, a1);
6894 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
6895
6896 bind(&start);
6897
6898 // Check that there are no elements. Register a2 contains the current JS
6899 // object we've reached through the prototype chain.
6900 Label no_elements;
6901 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
6902 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
6903
6904 // Second chance, the object may be using the empty slow element dictionary.
6905 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
6906 Branch(call_runtime, ne, a2, Operand(at));
6907
6908 bind(&no_elements);
6909 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
6910 Branch(&next, ne, a2, Operand(null_value));
6911}
6912
6913
6914void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
6915 DCHECK(!output_reg.is(input_reg));
6916 Label done;
6917 li(output_reg, Operand(255));
6918 // Normal branch: nop in delay slot.
6919 Branch(&done, gt, input_reg, Operand(output_reg));
6920 // Use delay slot in this branch.
6921 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6922 mov(output_reg, zero_reg); // In delay slot.
6923 mov(output_reg, input_reg); // Value is in range 0..255.
6924 bind(&done);
6925}
6926
6927
6928void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6929 DoubleRegister input_reg,
6930 DoubleRegister temp_double_reg) {
6931 Label above_zero;
6932 Label done;
6933 Label in_bounds;
6934
6935 Move(temp_double_reg, 0.0);
6936 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6937
6938 // Double value is less than zero, NaN or Inf, return 0.
6939 mov(result_reg, zero_reg);
6940 Branch(&done);
6941
6942 // Double value is >= 255, return 255.
6943 bind(&above_zero);
6944 Move(temp_double_reg, 255.0);
6945 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6946 li(result_reg, Operand(255));
6947 Branch(&done);
6948
6949 // In 0-255 range, round and truncate.
6950 bind(&in_bounds);
6951 cvt_w_d(temp_double_reg, input_reg);
6952 mfc1(result_reg, temp_double_reg);
6953 bind(&done);
6954}
6955
Ben Murdochda12d292016-06-02 14:46:10 +01006956void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
6957 Register scratch_reg,
6958 Label* no_memento_found) {
6959 Label map_check;
6960 Label top_check;
Ben Murdochc5610432016-08-08 18:44:38 +01006961 ExternalReference new_space_allocation_top_adr =
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006962 ExternalReference::new_space_allocation_top_address(isolate());
Ben Murdochda12d292016-06-02 14:46:10 +01006963 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
6964 const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
6965
6966 // Bail out if the object is not in new space.
6967 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
6968 // If the object is in new space, we need to check whether it is on the same
6969 // page as the current top.
Ben Murdochc5610432016-08-08 18:44:38 +01006970 Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
6971 li(at, Operand(new_space_allocation_top_adr));
6972 ld(at, MemOperand(at));
6973 Xor(scratch_reg, scratch_reg, Operand(at));
Ben Murdochda12d292016-06-02 14:46:10 +01006974 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6975 Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
6976 // The object is on a different page than allocation top. Bail out if the
6977 // object sits on the page boundary as no memento can follow and we cannot
6978 // touch the memory following it.
Ben Murdochc5610432016-08-08 18:44:38 +01006979 Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01006980 Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
6981 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6982 Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
6983 // Continue with the actual map check.
6984 jmp(&map_check);
6985 // If top is on the same page as the current object, we need to check whether
6986 // we are below top.
6987 bind(&top_check);
Ben Murdochc5610432016-08-08 18:44:38 +01006988 Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
6989 li(at, Operand(new_space_allocation_top_adr));
6990 ld(at, MemOperand(at));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006991 Branch(no_memento_found, gt, scratch_reg, Operand(at));
Ben Murdochda12d292016-06-02 14:46:10 +01006992 // Memento map check.
6993 bind(&map_check);
Ben Murdochc5610432016-08-08 18:44:38 +01006994 ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01006995 Branch(no_memento_found, ne, scratch_reg,
6996 Operand(isolate()->factory()->allocation_memento_map()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006997}
6998
6999
7000Register GetRegisterThatIsNotOneOf(Register reg1,
7001 Register reg2,
7002 Register reg3,
7003 Register reg4,
7004 Register reg5,
7005 Register reg6) {
7006 RegList regs = 0;
7007 if (reg1.is_valid()) regs |= reg1.bit();
7008 if (reg2.is_valid()) regs |= reg2.bit();
7009 if (reg3.is_valid()) regs |= reg3.bit();
7010 if (reg4.is_valid()) regs |= reg4.bit();
7011 if (reg5.is_valid()) regs |= reg5.bit();
7012 if (reg6.is_valid()) regs |= reg6.bit();
7013
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007014 const RegisterConfiguration* config =
7015 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
7016 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
7017 int code = config->GetAllocatableGeneralCode(i);
7018 Register candidate = Register::from_code(code);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007019 if (regs & candidate.bit()) continue;
7020 return candidate;
7021 }
7022 UNREACHABLE();
7023 return no_reg;
7024}
7025
7026
7027void MacroAssembler::JumpIfDictionaryInPrototypeChain(
7028 Register object,
7029 Register scratch0,
7030 Register scratch1,
7031 Label* found) {
7032 DCHECK(!scratch1.is(scratch0));
7033 Factory* factory = isolate()->factory();
7034 Register current = scratch0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007035 Label loop_again, end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007036
7037 // Scratch contained elements pointer.
7038 Move(current, object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007039 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
7040 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
7041 Branch(&end, eq, current, Operand(factory->null_value()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007042
7043 // Loop based on the map going up the prototype chain.
7044 bind(&loop_again);
7045 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007046 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
7047 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
7048 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
7049 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007050 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
7051 DecodeField<Map::ElementsKindBits>(scratch1);
7052 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
7053 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
7054 Branch(&loop_again, ne, current, Operand(factory->null_value()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007055
7056 bind(&end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007057}
7058
7059
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007060bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
7061 Register reg5, Register reg6, Register reg7, Register reg8,
7062 Register reg9, Register reg10) {
7063 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
7064 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
7065 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
7066 reg10.is_valid();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007067
7068 RegList regs = 0;
7069 if (reg1.is_valid()) regs |= reg1.bit();
7070 if (reg2.is_valid()) regs |= reg2.bit();
7071 if (reg3.is_valid()) regs |= reg3.bit();
7072 if (reg4.is_valid()) regs |= reg4.bit();
7073 if (reg5.is_valid()) regs |= reg5.bit();
7074 if (reg6.is_valid()) regs |= reg6.bit();
7075 if (reg7.is_valid()) regs |= reg7.bit();
7076 if (reg8.is_valid()) regs |= reg8.bit();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007077 if (reg9.is_valid()) regs |= reg9.bit();
7078 if (reg10.is_valid()) regs |= reg10.bit();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007079 int n_of_non_aliasing_regs = NumRegs(regs);
7080
7081 return n_of_valid_regs != n_of_non_aliasing_regs;
7082}
7083
7084
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007085CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007086 FlushICache flush_cache)
7087 : address_(address),
7088 size_(instructions * Assembler::kInstrSize),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007089 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007090 flush_cache_(flush_cache) {
7091 // Create a new macro assembler pointing to the address of the code to patch.
7092 // The size is adjusted with kGap on order for the assembler to generate size
7093 // bytes of instructions without failing with buffer size constraints.
7094 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
7095}
7096
7097
7098CodePatcher::~CodePatcher() {
7099 // Indicate that code has changed.
7100 if (flush_cache_ == FLUSH) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007101 Assembler::FlushICache(masm_.isolate(), address_, size_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007102 }
7103 // Check that the code was patched as expected.
7104 DCHECK(masm_.pc_ == address_ + size_);
7105 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
7106}
7107
7108
7109void CodePatcher::Emit(Instr instr) {
7110 masm()->emit(instr);
7111}
7112
7113
7114void CodePatcher::Emit(Address addr) {
7115 // masm()->emit(reinterpret_cast<Instr>(addr));
7116}
7117
7118
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007119void CodePatcher::ChangeBranchCondition(Instr current_instr,
7120 uint32_t new_opcode) {
7121 current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
7122 masm_.emit(current_instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007123}
7124
7125
7126void MacroAssembler::TruncatingDiv(Register result,
7127 Register dividend,
7128 int32_t divisor) {
7129 DCHECK(!dividend.is(result));
7130 DCHECK(!dividend.is(at));
7131 DCHECK(!result.is(at));
7132 base::MagicNumbersForDivision<uint32_t> mag =
7133 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04007134 li(at, Operand(static_cast<int32_t>(mag.multiplier)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007135 Mulh(result, dividend, Operand(at));
7136 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
7137 if (divisor > 0 && neg) {
7138 Addu(result, result, Operand(dividend));
7139 }
7140 if (divisor < 0 && !neg && mag.multiplier > 0) {
7141 Subu(result, result, Operand(dividend));
7142 }
7143 if (mag.shift > 0) sra(result, result, mag.shift);
7144 srl(at, dividend, 31);
7145 Addu(result, result, Operand(at));
7146}
7147
7148
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007149} // namespace internal
7150} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007151
7152#endif // V8_TARGET_ARCH_MIPS64