blob: 7f44ab9cc59f392ea4185d9d743e58aeb5a0b97a [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
7
8#include "src/assembler.h"
9#include "src/globals.h"
10#include "src/mips64/assembler-mips64.h"
11
12namespace v8 {
13namespace internal {
14
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000015// Give alias names to registers for calling conventions.
16const Register kReturnRegister0 = {Register::kCode_v0};
17const Register kReturnRegister1 = {Register::kCode_v1};
Ben Murdoch097c5b22016-05-18 11:27:45 +010018const Register kReturnRegister2 = {Register::kCode_a0};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000019const Register kJSFunctionRegister = {Register::kCode_a1};
20const Register kContextRegister = {Register::kCpRegister};
21const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
22const Register kInterpreterRegisterFileRegister = {Register::kCode_a7};
23const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
24const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};
25const Register kInterpreterDispatchTableRegister = {Register::kCode_t2};
26const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
27const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3};
28const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
29const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
30
Ben Murdochb8a8cc12014-11-26 15:28:44 +000031// Forward declaration.
32class JumpTarget;
33
34// Reserved Register Usage Summary.
35//
36// Registers t8, t9, and at are reserved for use by the MacroAssembler.
37//
38// The programmer should know that the MacroAssembler may clobber these three,
39// but won't touch other registers except in special cases.
40//
41// Per the MIPS ABI, register t9 must be used for indirect function call
42// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
43// trying to update gp register for position-independent-code. Whenever
44// MIPS generated code calls C code, it must be via t9 register.
45
46
47// Flags used for LeaveExitFrame function.
48enum LeaveExitFrameMode {
49 EMIT_RETURN = true,
50 NO_EMIT_RETURN = false
51};
52
53// Flags used for AllocateHeapNumber
54enum TaggingMode {
55 // Tag the result.
56 TAG_RESULT,
57 // Don't tag
58 DONT_TAG_RESULT
59};
60
61// Flags used for the ObjectToDoubleFPURegister function.
62enum ObjectToDoubleFlags {
63 // No special flags.
64 NO_OBJECT_TO_DOUBLE_FLAGS = 0,
65 // Object is known to be a non smi.
66 OBJECT_NOT_SMI = 1 << 0,
67 // Don't load NaNs or infinities, branch to the non number case instead.
68 AVOID_NANS_AND_INFINITIES = 1 << 1
69};
70
71// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
72enum BranchDelaySlot {
73 USE_DELAY_SLOT,
74 PROTECT
75};
76
77// Flags used for the li macro-assembler function.
78enum LiFlags {
79 // If the constant value can be represented in just 16 bits, then
80 // optimize the li to use a single instruction, rather than lui/ori/dsll
81 // sequence.
82 OPTIMIZE_SIZE = 0,
83 // Always use 6 instructions (lui/ori/dsll sequence), even if the constant
84 // could be loaded with just one, so that this value is patchable later.
85 CONSTANT_SIZE = 1,
86 // For address loads only 4 instruction are required. Used to mark
87 // constant load that will be used as address without relocation
88 // information. It ensures predictable code size, so specific sites
89 // in code are patchable.
90 ADDRESS_LOAD = 2
91};
92
93
94enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
95enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
96enum PointersToHereCheck {
97 kPointersToHereMaybeInteresting,
98 kPointersToHereAreAlwaysInteresting
99};
100enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
101
102Register GetRegisterThatIsNotOneOf(Register reg1,
103 Register reg2 = no_reg,
104 Register reg3 = no_reg,
105 Register reg4 = no_reg,
106 Register reg5 = no_reg,
107 Register reg6 = no_reg);
108
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000109bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
110 Register reg4 = no_reg, Register reg5 = no_reg,
111 Register reg6 = no_reg, Register reg7 = no_reg,
112 Register reg8 = no_reg, Register reg9 = no_reg,
113 Register reg10 = no_reg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000114
115
116// -----------------------------------------------------------------------------
117// Static helper functions.
118
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000119#if defined(V8_TARGET_LITTLE_ENDIAN)
120#define SmiWordOffset(offset) (offset + kPointerSize / 2)
121#else
122#define SmiWordOffset(offset) offset
123#endif
124
125
126inline MemOperand ContextMemOperand(Register context, int index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000127 return MemOperand(context, Context::SlotOffset(index));
128}
129
130
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000131inline MemOperand NativeContextMemOperand() {
132 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000133}
134
135
136// Generate a MemOperand for loading a field from an object.
137inline MemOperand FieldMemOperand(Register object, int offset) {
138 return MemOperand(object, offset - kHeapObjectTag);
139}
140
141
142inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143 // Assumes that Smis are shifted by 32 bits.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000144 STATIC_ASSERT(kSmiShift == 32);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000145 return MemOperand(rm, SmiWordOffset(offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000146}
147
148
149inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
150 return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
151}
152
153
154// Generate a MemOperand for storing arguments 5..N on the stack
155// when calling CallCFunction().
156// TODO(plind): Currently ONLY used for O32. Should be fixed for
157// n64, and used in RegExp code, and other places
158// with more than 8 arguments.
159inline MemOperand CFunctionArgumentOperand(int index) {
160 DCHECK(index > kCArgSlotCount);
161 // Argument 5 takes the slot just past the four Arg-slots.
162 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
163 return MemOperand(sp, offset);
164}
165
166
167// MacroAssembler implements a collection of frequently used macros.
168class MacroAssembler: public Assembler {
169 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000170 MacroAssembler(Isolate* isolate, void* buffer, int size,
171 CodeObjectRequired create_code_object);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000172
173 // Arguments macros.
174#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
175#define COND_ARGS cond, r1, r2
176
177 // Cases when relocation is not needed.
178#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
179 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
180 inline void Name(BranchDelaySlot bd, target_type target) { \
181 Name(target, bd); \
182 } \
183 void Name(target_type target, \
184 COND_TYPED_ARGS, \
185 BranchDelaySlot bd = PROTECT); \
186 inline void Name(BranchDelaySlot bd, \
187 target_type target, \
188 COND_TYPED_ARGS) { \
189 Name(target, COND_ARGS, bd); \
190 }
191
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000192#define DECLARE_BRANCH_PROTOTYPES(Name) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000193 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194 DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000195
196 DECLARE_BRANCH_PROTOTYPES(Branch)
197 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
198 DECLARE_BRANCH_PROTOTYPES(BranchShort)
199
200#undef DECLARE_BRANCH_PROTOTYPES
201#undef COND_TYPED_ARGS
202#undef COND_ARGS
203
204
205 // Jump, Call, and Ret pseudo instructions implementing inter-working.
206#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
207 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
208
209 void Jump(Register target, COND_ARGS);
210 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
211 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
212 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
213 static int CallSize(Register target, COND_ARGS);
214 void Call(Register target, COND_ARGS);
215 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
216 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
217 int CallSize(Handle<Code> code,
218 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
219 TypeFeedbackId ast_id = TypeFeedbackId::None(),
220 COND_ARGS);
221 void Call(Handle<Code> code,
222 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
223 TypeFeedbackId ast_id = TypeFeedbackId::None(),
224 COND_ARGS);
225 void Ret(COND_ARGS);
226 inline void Ret(BranchDelaySlot bd, Condition cond = al,
227 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
228 Ret(cond, rs, rt, bd);
229 }
230
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000231 bool IsNear(Label* L, Condition cond, int rs_reg);
232
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000233 void Branch(Label* L,
234 Condition cond,
235 Register rs,
236 Heap::RootListIndex index,
237 BranchDelaySlot bdslot = PROTECT);
238
Ben Murdoch097c5b22016-05-18 11:27:45 +0100239 // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
240 // functor/function with 'Label *func(size_t index)' declaration.
241 template <typename Func>
242 void GenerateSwitchTable(Register index, size_t case_count,
243 Func GetLabelFunction);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000244#undef COND_ARGS
245
246 // Emit code to discard a non-negative number of pointer-sized elements
247 // from the stack, clobbering only the sp register.
248 void Drop(int count,
249 Condition cond = cc_always,
250 Register reg = no_reg,
251 const Operand& op = Operand(no_reg));
252
253 // Trivial case of DropAndRet that utilizes the delay slot and only emits
254 // 2 instructions.
255 void DropAndRet(int drop);
256
257 void DropAndRet(int drop,
258 Condition cond,
259 Register reg,
260 const Operand& op);
261
262 // Swap two registers. If the scratch register is omitted then a slightly
263 // less efficient form using xor instead of mov is emitted.
264 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
265
266 void Call(Label* target);
267
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000268 void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
269
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000270 inline void Move(Register dst, Register src) {
271 if (!dst.is(src)) {
272 mov(dst, src);
273 }
274 }
275
276 inline void Move(FPURegister dst, FPURegister src) {
277 if (!dst.is(src)) {
278 mov_d(dst, src);
279 }
280 }
281
282 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
283 mfc1(dst_low, src);
284 mfhc1(dst_high, src);
285 }
286
287 inline void FmoveHigh(Register dst_high, FPURegister src) {
288 mfhc1(dst_high, src);
289 }
290
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000291 inline void FmoveHigh(FPURegister dst, Register src_high) {
292 mthc1(src_high, dst);
293 }
294
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000295 inline void FmoveLow(Register dst_low, FPURegister src) {
296 mfc1(dst_low, src);
297 }
298
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000299 void FmoveLow(FPURegister dst, Register src_low);
300
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000301 inline void Move(FPURegister dst, Register src_low, Register src_high) {
302 mtc1(src_low, dst);
303 mthc1(src_high, dst);
304 }
305
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400306 void Move(FPURegister dst, float imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000307 void Move(FPURegister dst, double imm);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400308
309 // Conditional move.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000310 void Movz(Register rd, Register rs, Register rt);
311 void Movn(Register rd, Register rs, Register rt);
312 void Movt(Register rd, Register rs, uint16_t cc = 0);
313 void Movf(Register rd, Register rs, uint16_t cc = 0);
314
315 void Clz(Register rd, Register rs);
316
317 // Jump unconditionally to given label.
318 // We NEED a nop in the branch delay slot, as it used by v8, for example in
319 // CodeGenerator::ProcessDeferred().
320 // Currently the branch delay slot is filled by the MacroAssembler.
321 // Use rather b(Label) for code generation.
322 void jmp(Label* L) {
323 Branch(L);
324 }
325
326 void Load(Register dst, const MemOperand& src, Representation r);
327 void Store(Register src, const MemOperand& dst, Representation r);
328
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000329 void PushRoot(Heap::RootListIndex index) {
330 LoadRoot(at, index);
331 Push(at);
332 }
333
334 // Compare the object in a register to a value and jump if they are equal.
335 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
336 LoadRoot(at, index);
337 Branch(if_equal, eq, with, Operand(at));
338 }
339
340 // Compare the object in a register to a value and jump if they are not equal.
341 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
342 Label* if_not_equal) {
343 LoadRoot(at, index);
344 Branch(if_not_equal, ne, with, Operand(at));
345 }
346
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000347 // Load an object from the root table.
348 void LoadRoot(Register destination,
349 Heap::RootListIndex index);
350 void LoadRoot(Register destination,
351 Heap::RootListIndex index,
352 Condition cond, Register src1, const Operand& src2);
353
354 // Store an object to the root table.
355 void StoreRoot(Register source,
356 Heap::RootListIndex index);
357 void StoreRoot(Register source,
358 Heap::RootListIndex index,
359 Condition cond, Register src1, const Operand& src2);
360
361 // ---------------------------------------------------------------------------
362 // GC Support
363
364 void IncrementalMarkingRecordWriteHelper(Register object,
365 Register value,
366 Register address);
367
368 enum RememberedSetFinalAction {
369 kReturnAtEnd,
370 kFallThroughAtEnd
371 };
372
373
374 // Record in the remembered set the fact that we have a pointer to new space
375 // at the address pointed to by the addr register. Only works if addr is not
376 // in new space.
377 void RememberedSetHelper(Register object, // Used for debug code.
378 Register addr,
379 Register scratch,
380 SaveFPRegsMode save_fp,
381 RememberedSetFinalAction and_then);
382
383 void CheckPageFlag(Register object,
384 Register scratch,
385 int mask,
386 Condition cc,
387 Label* condition_met);
388
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000389 // Check if object is in new space. Jumps if the object is not in new space.
390 // The register scratch can be object itself, but it will be clobbered.
391 void JumpIfNotInNewSpace(Register object,
392 Register scratch,
393 Label* branch) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100394 InNewSpace(object, scratch, eq, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000395 }
396
397 // Check if object is in new space. Jumps if the object is in new space.
398 // The register scratch can be object itself, but scratch will be clobbered.
399 void JumpIfInNewSpace(Register object,
400 Register scratch,
401 Label* branch) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100402 InNewSpace(object, scratch, ne, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000403 }
404
405 // Check if an object has a given incremental marking color.
406 void HasColor(Register object,
407 Register scratch0,
408 Register scratch1,
409 Label* has_color,
410 int first_bit,
411 int second_bit);
412
413 void JumpIfBlack(Register object,
414 Register scratch0,
415 Register scratch1,
416 Label* on_black);
417
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000418 // Checks the color of an object. If the object is white we jump to the
419 // incremental marker.
420 void JumpIfWhite(Register value, Register scratch1, Register scratch2,
421 Register scratch3, Label* value_is_white);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000422
423 // Notify the garbage collector that we wrote a pointer into an object.
424 // |object| is the object being stored into, |value| is the object being
425 // stored. value and scratch registers are clobbered by the operation.
426 // The offset is the offset from the start of the object, not the offset from
427 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
428 void RecordWriteField(
429 Register object,
430 int offset,
431 Register value,
432 Register scratch,
433 RAStatus ra_status,
434 SaveFPRegsMode save_fp,
435 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
436 SmiCheck smi_check = INLINE_SMI_CHECK,
437 PointersToHereCheck pointers_to_here_check_for_value =
438 kPointersToHereMaybeInteresting);
439
440 // As above, but the offset has the tag presubtracted. For use with
441 // MemOperand(reg, off).
442 inline void RecordWriteContextSlot(
443 Register context,
444 int offset,
445 Register value,
446 Register scratch,
447 RAStatus ra_status,
448 SaveFPRegsMode save_fp,
449 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
450 SmiCheck smi_check = INLINE_SMI_CHECK,
451 PointersToHereCheck pointers_to_here_check_for_value =
452 kPointersToHereMaybeInteresting) {
453 RecordWriteField(context,
454 offset + kHeapObjectTag,
455 value,
456 scratch,
457 ra_status,
458 save_fp,
459 remembered_set_action,
460 smi_check,
461 pointers_to_here_check_for_value);
462 }
463
Ben Murdoch097c5b22016-05-18 11:27:45 +0100464 // Notify the garbage collector that we wrote a code entry into a
465 // JSFunction. Only scratch is clobbered by the operation.
466 void RecordWriteCodeEntryField(Register js_function, Register code_entry,
467 Register scratch);
468
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000469 void RecordWriteForMap(
470 Register object,
471 Register map,
472 Register dst,
473 RAStatus ra_status,
474 SaveFPRegsMode save_fp);
475
476 // For a given |object| notify the garbage collector that the slot |address|
477 // has been written. |value| is the object being stored. The value and
478 // address registers are clobbered by the operation.
479 void RecordWrite(
480 Register object,
481 Register address,
482 Register value,
483 RAStatus ra_status,
484 SaveFPRegsMode save_fp,
485 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
486 SmiCheck smi_check = INLINE_SMI_CHECK,
487 PointersToHereCheck pointers_to_here_check_for_value =
488 kPointersToHereMaybeInteresting);
489
490
491 // ---------------------------------------------------------------------------
492 // Inline caching support.
493
494 // Generate code for checking access rights - used for security checks
495 // on access to global objects across environments. The holder register
496 // is left untouched, whereas both scratch registers are clobbered.
497 void CheckAccessGlobalProxy(Register holder_reg,
498 Register scratch,
499 Label* miss);
500
501 void GetNumberHash(Register reg0, Register scratch);
502
503 void LoadFromNumberDictionary(Label* miss,
504 Register elements,
505 Register key,
506 Register result,
507 Register reg0,
508 Register reg1,
509 Register reg2);
510
511
512 inline void MarkCode(NopMarkerTypes type) {
513 nop(type);
514 }
515
516 // Check if the given instruction is a 'type' marker.
517 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
518 // nop(type)). These instructions are generated to mark special location in
519 // the code, like some special IC code.
520 static inline bool IsMarkedCode(Instr instr, int type) {
521 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
522 return IsNop(instr, type);
523 }
524
525
526 static inline int GetCodeMarker(Instr instr) {
527 uint32_t opcode = ((instr & kOpcodeMask));
528 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
529 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
530 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
531
532 // Return <n> if we have a sll zero_reg, zero_reg, n
533 // else return -1.
534 bool sllzz = (opcode == SLL &&
535 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
536 rs == static_cast<uint32_t>(ToNumber(zero_reg)));
537 int type =
538 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
539 DCHECK((type == -1) ||
540 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
541 return type;
542 }
543
544
545
546 // ---------------------------------------------------------------------------
547 // Allocation support.
548
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000549 // Allocate an object in new space or old space. The object_size is
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000550 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
551 // is passed. If the space is exhausted control continues at the gc_required
552 // label. The allocated object is returned in result. If the flag
553 // tag_allocated_object is true the result is tagged as as a heap object.
554 // All registers are clobbered also when control continues at the gc_required
555 // label.
556 void Allocate(int object_size,
557 Register result,
558 Register scratch1,
559 Register scratch2,
560 Label* gc_required,
561 AllocationFlags flags);
562
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000563 void Allocate(Register object_size, Register result, Register result_end,
564 Register scratch, Label* gc_required, AllocationFlags flags);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000565
566 void AllocateTwoByteString(Register result,
567 Register length,
568 Register scratch1,
569 Register scratch2,
570 Register scratch3,
571 Label* gc_required);
572 void AllocateOneByteString(Register result, Register length,
573 Register scratch1, Register scratch2,
574 Register scratch3, Label* gc_required);
575 void AllocateTwoByteConsString(Register result,
576 Register length,
577 Register scratch1,
578 Register scratch2,
579 Label* gc_required);
580 void AllocateOneByteConsString(Register result, Register length,
581 Register scratch1, Register scratch2,
582 Label* gc_required);
583 void AllocateTwoByteSlicedString(Register result,
584 Register length,
585 Register scratch1,
586 Register scratch2,
587 Label* gc_required);
588 void AllocateOneByteSlicedString(Register result, Register length,
589 Register scratch1, Register scratch2,
590 Label* gc_required);
591
592 // Allocates a heap number or jumps to the gc_required label if the young
593 // space is full and a scavenge is needed. All registers are clobbered also
594 // when control continues at the gc_required label.
595 void AllocateHeapNumber(Register result,
596 Register scratch1,
597 Register scratch2,
598 Register heap_number_map,
599 Label* gc_required,
600 TaggingMode tagging_mode = TAG_RESULT,
601 MutableMode mode = IMMUTABLE);
602
603 void AllocateHeapNumberWithValue(Register result,
604 FPURegister value,
605 Register scratch1,
606 Register scratch2,
607 Label* gc_required);
608
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000609 // Allocate and initialize a JSValue wrapper with the specified {constructor}
610 // and {value}.
611 void AllocateJSValue(Register result, Register constructor, Register value,
612 Register scratch1, Register scratch2,
613 Label* gc_required);
614
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615 // ---------------------------------------------------------------------------
616 // Instruction macros.
617
618#define DEFINE_INSTRUCTION(instr) \
619 void instr(Register rd, Register rs, const Operand& rt); \
620 void instr(Register rd, Register rs, Register rt) { \
621 instr(rd, rs, Operand(rt)); \
622 } \
623 void instr(Register rs, Register rt, int32_t j) { \
624 instr(rs, rt, Operand(j)); \
625 }
626
627#define DEFINE_INSTRUCTION2(instr) \
628 void instr(Register rs, const Operand& rt); \
629 void instr(Register rs, Register rt) { \
630 instr(rs, Operand(rt)); \
631 } \
632 void instr(Register rs, int32_t j) { \
633 instr(rs, Operand(j)); \
634 }
635
636 DEFINE_INSTRUCTION(Addu);
637 DEFINE_INSTRUCTION(Daddu);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400638 DEFINE_INSTRUCTION(Div);
639 DEFINE_INSTRUCTION(Divu);
640 DEFINE_INSTRUCTION(Ddivu);
641 DEFINE_INSTRUCTION(Mod);
642 DEFINE_INSTRUCTION(Modu);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000643 DEFINE_INSTRUCTION(Ddiv);
644 DEFINE_INSTRUCTION(Subu);
645 DEFINE_INSTRUCTION(Dsubu);
646 DEFINE_INSTRUCTION(Dmod);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400647 DEFINE_INSTRUCTION(Dmodu);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000648 DEFINE_INSTRUCTION(Mul);
649 DEFINE_INSTRUCTION(Mulh);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400650 DEFINE_INSTRUCTION(Mulhu);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000651 DEFINE_INSTRUCTION(Dmul);
652 DEFINE_INSTRUCTION(Dmulh);
653 DEFINE_INSTRUCTION2(Mult);
654 DEFINE_INSTRUCTION2(Dmult);
655 DEFINE_INSTRUCTION2(Multu);
656 DEFINE_INSTRUCTION2(Dmultu);
657 DEFINE_INSTRUCTION2(Div);
658 DEFINE_INSTRUCTION2(Ddiv);
659 DEFINE_INSTRUCTION2(Divu);
660 DEFINE_INSTRUCTION2(Ddivu);
661
662 DEFINE_INSTRUCTION(And);
663 DEFINE_INSTRUCTION(Or);
664 DEFINE_INSTRUCTION(Xor);
665 DEFINE_INSTRUCTION(Nor);
666 DEFINE_INSTRUCTION2(Neg);
667
668 DEFINE_INSTRUCTION(Slt);
669 DEFINE_INSTRUCTION(Sltu);
670
671 // MIPS32 R2 instruction macro.
672 DEFINE_INSTRUCTION(Ror);
673 DEFINE_INSTRUCTION(Dror);
674
675#undef DEFINE_INSTRUCTION
676#undef DEFINE_INSTRUCTION2
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000677#undef DEFINE_INSTRUCTION3
678
679 void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
680 Register scratch = at);
681 void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
682 Register scratch = at);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000683
684 void Pref(int32_t hint, const MemOperand& rs);
685
686
687 // ---------------------------------------------------------------------------
688 // Pseudo-instructions.
689
690 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
691
692 void Ulw(Register rd, const MemOperand& rs);
693 void Usw(Register rd, const MemOperand& rs);
694 void Uld(Register rd, const MemOperand& rs, Register scratch = at);
695 void Usd(Register rd, const MemOperand& rs, Register scratch = at);
696
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000697 void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
698 void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
699
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000700 // Load int32 in the rd register.
701 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100702 inline bool LiLower32BitHelper(Register rd, Operand j);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000703 inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
704 li(rd, Operand(j), mode);
705 }
706 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
707
708 // Push multiple registers on the stack.
709 // Registers are saved in numerical order, with higher numbered registers
710 // saved in higher memory addresses.
711 void MultiPush(RegList regs);
712 void MultiPushReversed(RegList regs);
713
714 void MultiPushFPU(RegList regs);
715 void MultiPushReversedFPU(RegList regs);
716
717 void push(Register src) {
718 Daddu(sp, sp, Operand(-kPointerSize));
719 sd(src, MemOperand(sp, 0));
720 }
721 void Push(Register src) { push(src); }
722
723 // Push a handle.
724 void Push(Handle<Object> handle);
725 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
726
727 // Push two registers. Pushes leftmost register first (to highest address).
728 void Push(Register src1, Register src2) {
729 Dsubu(sp, sp, Operand(2 * kPointerSize));
730 sd(src1, MemOperand(sp, 1 * kPointerSize));
731 sd(src2, MemOperand(sp, 0 * kPointerSize));
732 }
733
734 // Push three registers. Pushes leftmost register first (to highest address).
735 void Push(Register src1, Register src2, Register src3) {
736 Dsubu(sp, sp, Operand(3 * kPointerSize));
737 sd(src1, MemOperand(sp, 2 * kPointerSize));
738 sd(src2, MemOperand(sp, 1 * kPointerSize));
739 sd(src3, MemOperand(sp, 0 * kPointerSize));
740 }
741
742 // Push four registers. Pushes leftmost register first (to highest address).
743 void Push(Register src1, Register src2, Register src3, Register src4) {
744 Dsubu(sp, sp, Operand(4 * kPointerSize));
745 sd(src1, MemOperand(sp, 3 * kPointerSize));
746 sd(src2, MemOperand(sp, 2 * kPointerSize));
747 sd(src3, MemOperand(sp, 1 * kPointerSize));
748 sd(src4, MemOperand(sp, 0 * kPointerSize));
749 }
750
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000751 // Push five registers. Pushes leftmost register first (to highest address).
752 void Push(Register src1, Register src2, Register src3, Register src4,
753 Register src5) {
754 Dsubu(sp, sp, Operand(5 * kPointerSize));
755 sd(src1, MemOperand(sp, 4 * kPointerSize));
756 sd(src2, MemOperand(sp, 3 * kPointerSize));
757 sd(src3, MemOperand(sp, 2 * kPointerSize));
758 sd(src4, MemOperand(sp, 1 * kPointerSize));
759 sd(src5, MemOperand(sp, 0 * kPointerSize));
760 }
761
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000762 void Push(Register src, Condition cond, Register tst1, Register tst2) {
763 // Since we don't have conditional execution we use a Branch.
764 Branch(3, cond, tst1, Operand(tst2));
765 Dsubu(sp, sp, Operand(kPointerSize));
766 sd(src, MemOperand(sp, 0));
767 }
768
769 void PushRegisterAsTwoSmis(Register src, Register scratch = at);
770 void PopRegisterAsTwoSmis(Register dst, Register scratch = at);
771
772 // Pops multiple values from the stack and load them in the
773 // registers specified in regs. Pop order is the opposite as in MultiPush.
774 void MultiPop(RegList regs);
775 void MultiPopReversed(RegList regs);
776
777 void MultiPopFPU(RegList regs);
778 void MultiPopReversedFPU(RegList regs);
779
780 void pop(Register dst) {
781 ld(dst, MemOperand(sp, 0));
782 Daddu(sp, sp, Operand(kPointerSize));
783 }
784 void Pop(Register dst) { pop(dst); }
785
786 // Pop two registers. Pops rightmost register first (from lower address).
787 void Pop(Register src1, Register src2) {
788 DCHECK(!src1.is(src2));
789 ld(src2, MemOperand(sp, 0 * kPointerSize));
790 ld(src1, MemOperand(sp, 1 * kPointerSize));
791 Daddu(sp, sp, 2 * kPointerSize);
792 }
793
794 // Pop three registers. Pops rightmost register first (from lower address).
795 void Pop(Register src1, Register src2, Register src3) {
796 ld(src3, MemOperand(sp, 0 * kPointerSize));
797 ld(src2, MemOperand(sp, 1 * kPointerSize));
798 ld(src1, MemOperand(sp, 2 * kPointerSize));
799 Daddu(sp, sp, 3 * kPointerSize);
800 }
801
802 void Pop(uint32_t count = 1) {
803 Daddu(sp, sp, Operand(count * kPointerSize));
804 }
805
806 // Push and pop the registers that can hold pointers, as defined by the
807 // RegList constant kSafepointSavedRegisters.
808 void PushSafepointRegisters();
809 void PopSafepointRegisters();
810 // Store value in register src in the safepoint stack slot for
811 // register dst.
812 void StoreToSafepointRegisterSlot(Register src, Register dst);
813 // Load the value of the src register from its safepoint stack slot
814 // into register dst.
815 void LoadFromSafepointRegisterSlot(Register dst, Register src);
816
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000817 // MIPS64 R2 instruction macro.
818 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000819 void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000820 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400821 void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000822 void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
823 void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000824
825 // ---------------------------------------------------------------------------
826 // FPU macros. These do not handle special cases like NaN or +- inf.
827
828 // Convert unsigned word to double.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000829 void Cvt_d_uw(FPURegister fd, FPURegister fs);
830 void Cvt_d_uw(FPURegister fd, Register rs);
831
832 // Convert unsigned long to double.
833 void Cvt_d_ul(FPURegister fd, FPURegister fs);
834 void Cvt_d_ul(FPURegister fd, Register rs);
835
Ben Murdoch097c5b22016-05-18 11:27:45 +0100836 // Convert unsigned word to float.
837 void Cvt_s_uw(FPURegister fd, FPURegister fs);
838 void Cvt_s_uw(FPURegister fd, Register rs);
839
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000840 // Convert unsigned long to float.
841 void Cvt_s_ul(FPURegister fd, FPURegister fs);
842 void Cvt_s_ul(FPURegister fd, Register rs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000843
844 // Convert double to unsigned long.
845 void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
846
847 void Trunc_l_d(FPURegister fd, FPURegister fs);
848 void Round_l_d(FPURegister fd, FPURegister fs);
849 void Floor_l_d(FPURegister fd, FPURegister fs);
850 void Ceil_l_d(FPURegister fd, FPURegister fs);
851
852 // Convert double to unsigned word.
853 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
854 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
855
Ben Murdoch097c5b22016-05-18 11:27:45 +0100856 // Convert single to unsigned word.
857 void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
858 void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
859
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000860 // Convert double to unsigned long.
861 void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch,
862 Register result = no_reg);
863 void Trunc_ul_d(FPURegister fd, Register rs, FPURegister scratch,
864 Register result = no_reg);
865
866 // Convert single to unsigned long.
867 void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch,
868 Register result = no_reg);
869 void Trunc_ul_s(FPURegister fd, Register rs, FPURegister scratch,
870 Register result = no_reg);
871
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000872 void Trunc_w_d(FPURegister fd, FPURegister fs);
873 void Round_w_d(FPURegister fd, FPURegister fs);
874 void Floor_w_d(FPURegister fd, FPURegister fs);
875 void Ceil_w_d(FPURegister fd, FPURegister fs);
876
877 void Madd_d(FPURegister fd,
878 FPURegister fr,
879 FPURegister fs,
880 FPURegister ft,
881 FPURegister scratch);
882
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000883 // Wrapper functions for the different cmp/branch types.
884 inline void BranchF32(Label* target, Label* nan, Condition cc,
885 FPURegister cmp1, FPURegister cmp2,
886 BranchDelaySlot bd = PROTECT) {
887 BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
888 }
889
890 inline void BranchF64(Label* target, Label* nan, Condition cc,
891 FPURegister cmp1, FPURegister cmp2,
892 BranchDelaySlot bd = PROTECT) {
893 BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
894 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000895
896 // Alternate (inline) version for better readability with USE_DELAY_SLOT.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000897 inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
898 Condition cc, FPURegister cmp1, FPURegister cmp2) {
899 BranchF64(target, nan, cc, cmp1, cmp2, bd);
900 }
901
902 inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
903 Condition cc, FPURegister cmp1, FPURegister cmp2) {
904 BranchF32(target, nan, cc, cmp1, cmp2, bd);
905 }
906
907 // Alias functions for backward compatibility.
908 inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
909 FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
910 BranchF64(target, nan, cc, cmp1, cmp2, bd);
911 }
912
913 inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
914 Condition cc, FPURegister cmp1, FPURegister cmp2) {
915 BranchF64(bd, target, nan, cc, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000916 }
917
918 // Truncates a double using a specific rounding mode, and writes the value
919 // to the result register.
920 // The except_flag will contain any exceptions caused by the instruction.
921 // If check_inexact is kDontCheckForInexactConversion, then the inexact
922 // exception is masked.
923 void EmitFPUTruncate(FPURoundingMode rounding_mode,
924 Register result,
925 DoubleRegister double_input,
926 Register scratch,
927 DoubleRegister double_scratch,
928 Register except_flag,
929 CheckForInexactConversion check_inexact
930 = kDontCheckForInexactConversion);
931
932 // Performs a truncating conversion of a floating point number as used by
933 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
934 // succeeds, otherwise falls through if result is saturated. On return
935 // 'result' either holds answer, or is clobbered on fall through.
936 //
937 // Only public for the test code in test-code-stubs-arm.cc.
938 void TryInlineTruncateDoubleToI(Register result,
939 DoubleRegister input,
940 Label* done);
941
942 // Performs a truncating conversion of a floating point number as used by
943 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
944 // Exits with 'result' holding the answer.
945 void TruncateDoubleToI(Register result, DoubleRegister double_input);
946
947 // Performs a truncating conversion of a heap number as used by
948 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
949 // must be different registers. Exits with 'result' holding the answer.
950 void TruncateHeapNumberToI(Register result, Register object);
951
952 // Converts the smi or heap number in object to an int32 using the rules
953 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
954 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
955 // different registers.
956 void TruncateNumberToI(Register object,
957 Register result,
958 Register heap_number_map,
959 Register scratch,
960 Label* not_int32);
961
962 // Loads the number from object into dst register.
963 // If |object| is neither smi nor heap number, |not_number| is jumped to
964 // with |object| still intact.
965 void LoadNumber(Register object,
966 FPURegister dst,
967 Register heap_number_map,
968 Register scratch,
969 Label* not_number);
970
971 // Loads the number from object into double_dst in the double format.
972 // Control will jump to not_int32 if the value cannot be exactly represented
973 // by a 32-bit integer.
974 // Floating point value in the 32-bit integer range that are not exact integer
975 // won't be loaded.
976 void LoadNumberAsInt32Double(Register object,
977 DoubleRegister double_dst,
978 Register heap_number_map,
979 Register scratch1,
980 Register scratch2,
981 FPURegister double_scratch,
982 Label* not_int32);
983
984 // Loads the number from object into dst as a 32-bit integer.
985 // Control will jump to not_int32 if the object cannot be exactly represented
986 // by a 32-bit integer.
987 // Floating point value in the 32-bit integer range that are not exact integer
988 // won't be converted.
989 void LoadNumberAsInt32(Register object,
990 Register dst,
991 Register heap_number_map,
992 Register scratch1,
993 Register scratch2,
994 FPURegister double_scratch0,
995 FPURegister double_scratch1,
996 Label* not_int32);
997
998 // Enter exit frame.
999 // argc - argument count to be dropped by LeaveExitFrame.
1000 // save_doubles - saves FPU registers on stack, currently disabled.
1001 // stack_space - extra stack space.
1002 void EnterExitFrame(bool save_doubles,
1003 int stack_space = 0);
1004
1005 // Leave the current exit frame.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001006 void LeaveExitFrame(bool save_doubles, Register arg_count,
1007 bool restore_context, bool do_return = NO_EMIT_RETURN,
1008 bool argument_count_is_length = false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001009
1010 // Get the actual activation frame alignment for target environment.
1011 static int ActivationFrameAlignment();
1012
1013 // Make sure the stack is aligned. Only emits code in debug mode.
1014 void AssertStackIsAligned();
1015
1016 void LoadContext(Register dst, int context_chain_length);
1017
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001018 // Load the global object from the current context.
1019 void LoadGlobalObject(Register dst) {
1020 LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
1021 }
1022
1023 // Load the global proxy from the current context.
1024 void LoadGlobalProxy(Register dst) {
1025 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1026 }
1027
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001028 // Conditionally load the cached Array transitioned map of type
1029 // transitioned_kind from the native context if the map in register
1030 // map_in_out is the cached Array map in the native context of
1031 // expected_kind.
1032 void LoadTransitionedArrayMapConditional(
1033 ElementsKind expected_kind,
1034 ElementsKind transitioned_kind,
1035 Register map_in_out,
1036 Register scratch,
1037 Label* no_map_match);
1038
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001039 void LoadNativeContextSlot(int index, Register dst);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001040
1041 // Load the initial map from the global function. The registers
1042 // function and map can be the same, function is then overwritten.
1043 void LoadGlobalFunctionInitialMap(Register function,
1044 Register map,
1045 Register scratch);
1046
1047 void InitializeRootRegister() {
1048 ExternalReference roots_array_start =
1049 ExternalReference::roots_array_start(isolate());
1050 li(kRootRegister, Operand(roots_array_start));
1051 }
1052
1053 // -------------------------------------------------------------------------
1054 // JavaScript invokes.
1055
1056 // Invoke the JavaScript function code by either calling or jumping.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001057 void InvokeFunctionCode(Register function, Register new_target,
1058 const ParameterCount& expected,
1059 const ParameterCount& actual, InvokeFlag flag,
1060 const CallWrapper& call_wrapper);
1061
1062 void FloodFunctionIfStepping(Register fun, Register new_target,
1063 const ParameterCount& expected,
1064 const ParameterCount& actual);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001065
1066 // Invoke the JavaScript function in the given register. Changes the
1067 // current context to the context in the function before invoking.
1068 void InvokeFunction(Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001069 Register new_target,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001070 const ParameterCount& actual,
1071 InvokeFlag flag,
1072 const CallWrapper& call_wrapper);
1073
1074 void InvokeFunction(Register function,
1075 const ParameterCount& expected,
1076 const ParameterCount& actual,
1077 InvokeFlag flag,
1078 const CallWrapper& call_wrapper);
1079
1080 void InvokeFunction(Handle<JSFunction> function,
1081 const ParameterCount& expected,
1082 const ParameterCount& actual,
1083 InvokeFlag flag,
1084 const CallWrapper& call_wrapper);
1085
1086
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001087 void IsObjectJSStringType(Register object,
1088 Register scratch,
1089 Label* fail);
1090
1091 void IsObjectNameType(Register object,
1092 Register scratch,
1093 Label* fail);
1094
1095 // -------------------------------------------------------------------------
1096 // Debugger Support.
1097
1098 void DebugBreak();
1099
1100 // -------------------------------------------------------------------------
1101 // Exception handling.
1102
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001103 // Push a new stack handler and link into stack handler chain.
1104 void PushStackHandler();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001105
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001106 // Unlink the stack handler on top of the stack from the stack handler chain.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001107 // Must preserve the result register.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001108 void PopStackHandler();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001109
1110 // Copies a number of bytes from src to dst. All registers are clobbered. On
1111 // exit src and dst will point to the place just after where the last byte was
1112 // read or written and length will be zero.
1113 void CopyBytes(Register src,
1114 Register dst,
1115 Register length,
1116 Register scratch);
1117
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001118 // Initialize fields with filler values. Fields starting at |current_address|
1119 // not including |end_address| are overwritten with the value in |filler|. At
1120 // the end the loop, |current_address| takes the value of |end_address|.
1121 void InitializeFieldsWithFiller(Register current_address,
1122 Register end_address, Register filler);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001123
1124 // -------------------------------------------------------------------------
1125 // Support functions.
1126
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001127 // Machine code version of Map::GetConstructor().
1128 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1129 void GetMapConstructor(Register result, Register map, Register temp,
1130 Register temp2);
1131
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001132 // Try to get function prototype of a function and puts the value in
1133 // the result register. Checks that the function really is a
1134 // function and jumps to the miss label if the fast checks fail. The
1135 // function register will be untouched; the other registers may be
1136 // clobbered.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001137 void TryGetFunctionPrototype(Register function, Register result,
1138 Register scratch, Label* miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001139
1140 void GetObjectType(Register function,
1141 Register map,
1142 Register type_reg);
1143
Ben Murdoch097c5b22016-05-18 11:27:45 +01001144 void GetInstanceType(Register object_map, Register object_instance_type) {
1145 lbu(object_instance_type,
1146 FieldMemOperand(object_map, Map::kInstanceTypeOffset));
1147 }
1148
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001149 // Check if a map for a JSObject indicates that the object has fast elements.
1150 // Jump to the specified label if it does not.
1151 void CheckFastElements(Register map,
1152 Register scratch,
1153 Label* fail);
1154
1155 // Check if a map for a JSObject indicates that the object can have both smi
1156 // and HeapObject elements. Jump to the specified label if it does not.
1157 void CheckFastObjectElements(Register map,
1158 Register scratch,
1159 Label* fail);
1160
1161 // Check if a map for a JSObject indicates that the object has fast smi only
1162 // elements. Jump to the specified label if it does not.
1163 void CheckFastSmiElements(Register map,
1164 Register scratch,
1165 Label* fail);
1166
1167 // Check to see if maybe_number can be stored as a double in
1168 // FastDoubleElements. If it can, store it at the index specified by key in
1169 // the FastDoubleElements array elements. Otherwise jump to fail.
1170 void StoreNumberToDoubleElements(Register value_reg,
1171 Register key_reg,
1172 Register elements_reg,
1173 Register scratch1,
1174 Register scratch2,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001175 Label* fail,
1176 int elements_offset = 0);
1177
1178 // Compare an object's map with the specified map and its transitioned
1179 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1180 // "branch_to" if the result of the comparison is "cond". If multiple map
1181 // compares are required, the compare sequences branches to early_success.
1182 void CompareMapAndBranch(Register obj,
1183 Register scratch,
1184 Handle<Map> map,
1185 Label* early_success,
1186 Condition cond,
1187 Label* branch_to);
1188
1189 // As above, but the map of the object is already loaded into the register
1190 // which is preserved by the code generated.
1191 void CompareMapAndBranch(Register obj_map,
1192 Handle<Map> map,
1193 Label* early_success,
1194 Condition cond,
1195 Label* branch_to);
1196
1197 // Check if the map of an object is equal to a specified map and branch to
1198 // label if not. Skip the smi check if not required (object is known to be a
1199 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1200 // against maps that are ElementsKind transition maps of the specificed map.
1201 void CheckMap(Register obj,
1202 Register scratch,
1203 Handle<Map> map,
1204 Label* fail,
1205 SmiCheckType smi_check_type);
1206
1207
1208 void CheckMap(Register obj,
1209 Register scratch,
1210 Heap::RootListIndex index,
1211 Label* fail,
1212 SmiCheckType smi_check_type);
1213
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001214 // Check if the map of an object is equal to a specified weak map and branch
1215 // to a specified target if equal. Skip the smi check if not required
1216 // (object is known to be a heap object)
1217 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1218 Handle<WeakCell> cell, Handle<Code> success,
1219 SmiCheckType smi_check_type);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001220
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001221 // If the value is a NaN, canonicalize the value else, do nothing.
1222 void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
1223
1224
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001225 // Get value of the weak cell.
1226 void GetWeakValue(Register value, Handle<WeakCell> cell);
1227
1228 // Load the value of the weak cell in the value register. Branch to the
1229 // given miss label is the weak cell was cleared.
1230 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001231
1232 // Load and check the instance type of an object for being a string.
1233 // Loads the type into the second argument register.
1234 // Returns a condition that will be enabled if the object was a string.
1235 Condition IsObjectStringType(Register obj,
1236 Register type,
1237 Register result) {
1238 ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1239 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1240 And(type, type, Operand(kIsNotStringMask));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001241 DCHECK_EQ(0u, kStringTag);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001242 return eq;
1243 }
1244
1245
1246 // Picks out an array index from the hash field.
1247 // Register use:
1248 // hash - holds the index's hash. Clobbered.
1249 // index - holds the overwritten index on exit.
1250 void IndexFromHash(Register hash, Register index);
1251
1252 // Get the number of least significant bits from a register.
1253 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1254 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1255
1256 // Load the value of a number object into a FPU double register. If the
1257 // object is not a number a jump to the label not_number is performed
1258 // and the FPU double register is unchanged.
1259 void ObjectToDoubleFPURegister(
1260 Register object,
1261 FPURegister value,
1262 Register scratch1,
1263 Register scratch2,
1264 Register heap_number_map,
1265 Label* not_number,
1266 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1267
1268 // Load the value of a smi object into a FPU double register. The register
1269 // scratch1 can be the same register as smi in which case smi will hold the
1270 // untagged value afterwards.
1271 void SmiToDoubleFPURegister(Register smi,
1272 FPURegister value,
1273 Register scratch1);
1274
1275 // -------------------------------------------------------------------------
1276 // Overflow handling functions.
1277 // Usage: first call the appropriate arithmetic function, then call one of the
1278 // jump functions with the overflow_dst register as the second parameter.
1279
1280 void AdduAndCheckForOverflow(Register dst,
1281 Register left,
1282 Register right,
1283 Register overflow_dst,
1284 Register scratch = at);
1285
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001286 void AdduAndCheckForOverflow(Register dst, Register left,
1287 const Operand& right, Register overflow_dst,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001288 Register scratch);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001289
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001290 void SubuAndCheckForOverflow(Register dst,
1291 Register left,
1292 Register right,
1293 Register overflow_dst,
1294 Register scratch = at);
1295
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001296 void SubuAndCheckForOverflow(Register dst, Register left,
1297 const Operand& right, Register overflow_dst,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001298 Register scratch);
1299
1300 void DadduAndCheckForOverflow(Register dst, Register left, Register right,
1301 Register overflow_dst, Register scratch = at);
1302
1303 void DadduAndCheckForOverflow(Register dst, Register left,
1304 const Operand& right, Register overflow_dst,
1305 Register scratch);
1306
1307 inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
1308 Label* overflow_label, Register scratch = at) {
1309 DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
1310 }
1311
1312 inline void DaddBranchNoOvf(Register dst, Register left, const Operand& right,
1313 Label* no_overflow_label, Register scratch = at) {
1314 DaddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
1315 }
1316
1317 void DaddBranchOvf(Register dst, Register left, const Operand& right,
1318 Label* overflow_label, Label* no_overflow_label,
1319 Register scratch = at);
1320
1321 void DaddBranchOvf(Register dst, Register left, Register right,
1322 Label* overflow_label, Label* no_overflow_label,
1323 Register scratch = at);
1324
1325 void DsubuAndCheckForOverflow(Register dst, Register left, Register right,
1326 Register overflow_dst, Register scratch = at);
1327
1328 void DsubuAndCheckForOverflow(Register dst, Register left,
1329 const Operand& right, Register overflow_dst,
1330 Register scratch);
1331
1332 inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
1333 Label* overflow_label, Register scratch = at) {
1334 DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
1335 }
1336
1337 inline void DsubBranchNoOvf(Register dst, Register left, const Operand& right,
1338 Label* no_overflow_label, Register scratch = at) {
1339 DsubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
1340 }
1341
1342 void DsubBranchOvf(Register dst, Register left, const Operand& right,
1343 Label* overflow_label, Label* no_overflow_label,
1344 Register scratch = at);
1345
1346 void DsubBranchOvf(Register dst, Register left, Register right,
1347 Label* overflow_label, Label* no_overflow_label,
1348 Register scratch = at);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001349
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001350 void BranchOnOverflow(Label* label,
1351 Register overflow_check,
1352 BranchDelaySlot bd = PROTECT) {
1353 Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1354 }
1355
1356 void BranchOnNoOverflow(Label* label,
1357 Register overflow_check,
1358 BranchDelaySlot bd = PROTECT) {
1359 Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1360 }
1361
1362 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1363 Ret(lt, overflow_check, Operand(zero_reg), bd);
1364 }
1365
1366 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1367 Ret(ge, overflow_check, Operand(zero_reg), bd);
1368 }
1369
1370 // -------------------------------------------------------------------------
1371 // Runtime calls.
1372
1373 // See comments at the beginning of CEntryStub::Generate.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001374 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001375
1376 inline void PrepareCEntryFunction(const ExternalReference& ref) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001377 li(a1, Operand(ref));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001378 }
1379
1380#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1381const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1382
1383 // Call a code stub.
1384 void CallStub(CodeStub* stub,
1385 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1386 COND_ARGS);
1387
1388 // Tail call a code stub (jump).
1389 void TailCallStub(CodeStub* stub, COND_ARGS);
1390
1391#undef COND_ARGS
1392
1393 void CallJSExitStub(CodeStub* stub);
1394
1395 // Call a runtime routine.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001396 void CallRuntime(const Runtime::Function* f, int num_arguments,
1397 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1398 BranchDelaySlot bd = PROTECT);
1399 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
1400 const Runtime::Function* function = Runtime::FunctionForId(fid);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001401 CallRuntime(function, function->nargs, kSaveFPRegs);
1402 }
1403
1404 // Convenience function: Same as above, but takes the fid instead.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001405 void CallRuntime(Runtime::FunctionId fid,
1406 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1407 BranchDelaySlot bd = PROTECT) {
1408 const Runtime::Function* function = Runtime::FunctionForId(fid);
1409 CallRuntime(function, function->nargs, save_doubles, bd);
1410 }
1411
1412 // Convenience function: Same as above, but takes the fid instead.
1413 void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1414 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1415 BranchDelaySlot bd = PROTECT) {
1416 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles, bd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001417 }
1418
1419 // Convenience function: call an external reference.
1420 void CallExternalReference(const ExternalReference& ext,
1421 int num_arguments,
1422 BranchDelaySlot bd = PROTECT);
1423
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001424 // Convenience function: tail call a runtime routine (jump).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001425 void TailCallRuntime(Runtime::FunctionId fid);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001426
1427 int CalculateStackPassedWords(int num_reg_arguments,
1428 int num_double_arguments);
1429
1430 // Before calling a C-function from generated code, align arguments on stack
1431 // and add space for the four mips argument slots.
1432 // After aligning the frame, non-register arguments must be stored on the
1433 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1434 // The argument count assumes all arguments are word sized.
1435 // Some compilers/platforms require the stack to be aligned when calling
1436 // C++ code.
1437 // Needs a scratch register to do some arithmetic. This register will be
1438 // trashed.
1439 void PrepareCallCFunction(int num_reg_arguments,
1440 int num_double_registers,
1441 Register scratch);
1442 void PrepareCallCFunction(int num_reg_arguments,
1443 Register scratch);
1444
1445 // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1446 // Arguments 5..n are stored to stack using following:
1447 // sw(a4, CFunctionArgumentOperand(5));
1448
1449 // Calls a C function and cleans up the space for arguments allocated
1450 // by PrepareCallCFunction. The called function is not allowed to trigger a
1451 // garbage collection, since that might move the code and invalidate the
1452 // return address (unless this is somehow accounted for by the called
1453 // function).
1454 void CallCFunction(ExternalReference function, int num_arguments);
1455 void CallCFunction(Register function, int num_arguments);
1456 void CallCFunction(ExternalReference function,
1457 int num_reg_arguments,
1458 int num_double_arguments);
1459 void CallCFunction(Register function,
1460 int num_reg_arguments,
1461 int num_double_arguments);
1462 void MovFromFloatResult(DoubleRegister dst);
1463 void MovFromFloatParameter(DoubleRegister dst);
1464
1465 // There are two ways of passing double arguments on MIPS, depending on
1466 // whether soft or hard floating point ABI is used. These functions
1467 // abstract parameter passing for the three different ways we call
1468 // C functions from generated code.
1469 void MovToFloatParameter(DoubleRegister src);
1470 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1471 void MovToFloatResult(DoubleRegister src);
1472
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001473 // Jump to the builtin routine.
1474 void JumpToExternalReference(const ExternalReference& builtin,
1475 BranchDelaySlot bd = PROTECT);
1476
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001477 struct Unresolved {
1478 int pc;
1479 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1480 const char* name;
1481 };
1482
1483 Handle<Object> CodeObject() {
1484 DCHECK(!code_object_.is_null());
1485 return code_object_;
1486 }
1487
1488 // Emit code for a truncating division by a constant. The dividend register is
1489 // unchanged and at gets clobbered. Dividend and result must be different.
1490 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1491
1492 // -------------------------------------------------------------------------
1493 // StatsCounter support.
1494
1495 void SetCounter(StatsCounter* counter, int value,
1496 Register scratch1, Register scratch2);
1497 void IncrementCounter(StatsCounter* counter, int value,
1498 Register scratch1, Register scratch2);
1499 void DecrementCounter(StatsCounter* counter, int value,
1500 Register scratch1, Register scratch2);
1501
1502
1503 // -------------------------------------------------------------------------
1504 // Debugging.
1505
1506 // Calls Abort(msg) if the condition cc is not satisfied.
1507 // Use --debug_code to enable.
1508 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1509 void AssertFastElements(Register elements);
1510
1511 // Like Assert(), but always enabled.
1512 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1513
1514 // Print a message to stdout and abort execution.
1515 void Abort(BailoutReason msg);
1516
1517 // Verify restrictions about code generated in stubs.
1518 void set_generating_stub(bool value) { generating_stub_ = value; }
1519 bool generating_stub() { return generating_stub_; }
1520 void set_has_frame(bool value) { has_frame_ = value; }
1521 bool has_frame() { return has_frame_; }
1522 inline bool AllowThisStubCall(CodeStub* stub);
1523
1524 // ---------------------------------------------------------------------------
1525 // Number utilities.
1526
1527 // Check whether the value of reg is a power of two and not zero. If not
1528 // control continues at the label not_power_of_two. If reg is a power of two
1529 // the register scratch contains the value of (reg - 1) when control falls
1530 // through.
1531 void JumpIfNotPowerOfTwoOrZero(Register reg,
1532 Register scratch,
1533 Label* not_power_of_two_or_zero);
1534
1535 // -------------------------------------------------------------------------
1536 // Smi utilities.
1537
1538 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1539 void SmiTagCheckOverflow(Register reg, Register overflow);
1540 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1541
1542 void SmiTag(Register dst, Register src) {
1543 STATIC_ASSERT(kSmiTag == 0);
1544 if (SmiValuesAre32Bits()) {
1545 STATIC_ASSERT(kSmiShift == 32);
1546 dsll32(dst, src, 0);
1547 } else {
1548 Addu(dst, src, src);
1549 }
1550 }
1551
1552 void SmiTag(Register reg) {
1553 SmiTag(reg, reg);
1554 }
1555
1556 // Try to convert int32 to smi. If the value is to large, preserve
1557 // the original value and jump to not_a_smi. Destroys scratch and
1558 // sets flags.
1559 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1560 TrySmiTag(reg, reg, scratch, not_a_smi);
1561 }
1562
1563 void TrySmiTag(Register dst,
1564 Register src,
1565 Register scratch,
1566 Label* not_a_smi) {
1567 if (SmiValuesAre32Bits()) {
1568 SmiTag(dst, src);
1569 } else {
1570 SmiTagCheckOverflow(at, src, scratch);
1571 BranchOnOverflow(not_a_smi, scratch);
1572 mov(dst, at);
1573 }
1574 }
1575
1576 void SmiUntag(Register dst, Register src) {
1577 if (SmiValuesAre32Bits()) {
1578 STATIC_ASSERT(kSmiShift == 32);
1579 dsra32(dst, src, 0);
1580 } else {
1581 sra(dst, src, kSmiTagSize);
1582 }
1583 }
1584
1585 void SmiUntag(Register reg) {
1586 SmiUntag(reg, reg);
1587 }
1588
1589 // Left-shifted from int32 equivalent of Smi.
1590 void SmiScale(Register dst, Register src, int scale) {
1591 if (SmiValuesAre32Bits()) {
1592 // The int portion is upper 32-bits of 64-bit word.
1593 dsra(dst, src, kSmiShift - scale);
1594 } else {
1595 DCHECK(scale >= kSmiTagSize);
1596 sll(dst, src, scale - kSmiTagSize);
1597 }
1598 }
1599
1600 // Combine load with untagging or scaling.
1601 void SmiLoadUntag(Register dst, MemOperand src);
1602
1603 void SmiLoadScale(Register dst, MemOperand src, int scale);
1604
1605 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
1606 void SmiLoadWithScale(Register d_smi,
1607 Register d_scaled,
1608 MemOperand src,
1609 int scale);
1610
1611 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
1612 void SmiLoadUntagWithScale(Register d_int,
1613 Register d_scaled,
1614 MemOperand src,
1615 int scale);
1616
1617
1618 // Test if the register contains a smi.
1619 inline void SmiTst(Register value, Register scratch) {
1620 And(scratch, value, Operand(kSmiTagMask));
1621 }
1622 inline void NonNegativeSmiTst(Register value, Register scratch) {
1623 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1624 }
1625
1626 // Untag the source value into destination and jump if source is a smi.
1627 // Source and destination can be the same register.
1628 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1629
1630 // Untag the source value into destination and jump if source is not a smi.
1631 // Source and destination can be the same register.
1632 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1633
1634 // Jump the register contains a smi.
1635 void JumpIfSmi(Register value,
1636 Label* smi_label,
1637 Register scratch = at,
1638 BranchDelaySlot bd = PROTECT);
1639
1640 // Jump if the register contains a non-smi.
1641 void JumpIfNotSmi(Register value,
1642 Label* not_smi_label,
1643 Register scratch = at,
1644 BranchDelaySlot bd = PROTECT);
1645
1646 // Jump if either of the registers contain a non-smi.
1647 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1648 // Jump if either of the registers contain a smi.
1649 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1650
1651 // Abort execution if argument is a smi, enabled via --debug-code.
1652 void AssertNotSmi(Register object);
1653 void AssertSmi(Register object);
1654
1655 // Abort execution if argument is not a string, enabled via --debug-code.
1656 void AssertString(Register object);
1657
1658 // Abort execution if argument is not a name, enabled via --debug-code.
1659 void AssertName(Register object);
1660
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001661 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1662 void AssertFunction(Register object);
1663
1664 // Abort execution if argument is not a JSBoundFunction,
1665 // enabled via --debug-code.
1666 void AssertBoundFunction(Register object);
1667
Ben Murdoch097c5b22016-05-18 11:27:45 +01001668 // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
1669 void AssertReceiver(Register object);
1670
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001671 // Abort execution if argument is not undefined or an AllocationSite, enabled
1672 // via --debug-code.
1673 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1674
1675 // Abort execution if reg is not the root value with the given index,
1676 // enabled via --debug-code.
1677 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1678
1679 // ---------------------------------------------------------------------------
1680 // HeapNumber utilities.
1681
1682 void JumpIfNotHeapNumber(Register object,
1683 Register heap_number_map,
1684 Register scratch,
1685 Label* on_not_heap_number);
1686
1687 // -------------------------------------------------------------------------
1688 // String utilities.
1689
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001690 // Checks if both instance types are sequential one-byte strings and jumps to
1691 // label if either is not.
1692 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1693 Register first_object_instance_type, Register second_object_instance_type,
1694 Register scratch1, Register scratch2, Label* failure);
1695
1696 // Check if instance type is sequential one-byte string and jump to label if
1697 // it is not.
1698 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1699 Label* failure);
1700
1701 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1702
1703 void EmitSeqStringSetCharCheck(Register string,
1704 Register index,
1705 Register value,
1706 Register scratch,
1707 uint32_t encoding_mask);
1708
1709 // Checks if both objects are sequential one-byte strings and jumps to label
1710 // if either is not. Assumes that neither object is a smi.
1711 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1712 Register second,
1713 Register scratch1,
1714 Register scratch2,
1715 Label* failure);
1716
1717 // Checks if both objects are sequential one-byte strings and jumps to label
1718 // if either is not.
1719 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1720 Register scratch1,
1721 Register scratch2,
1722 Label* not_flat_one_byte_strings);
1723
1724 void ClampUint8(Register output_reg, Register input_reg);
1725
1726 void ClampDoubleToUint8(Register result_reg,
1727 DoubleRegister input_reg,
1728 DoubleRegister temp_double_reg);
1729
1730
1731 void LoadInstanceDescriptors(Register map, Register descriptors);
1732 void EnumLength(Register dst, Register map);
1733 void NumberOfOwnDescriptors(Register dst, Register map);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001734 void LoadAccessor(Register dst, Register holder, int accessor_index,
1735 AccessorComponent accessor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001736
1737 template<typename Field>
1738 void DecodeField(Register dst, Register src) {
1739 Ext(dst, src, Field::kShift, Field::kSize);
1740 }
1741
1742 template<typename Field>
1743 void DecodeField(Register reg) {
1744 DecodeField<Field>(reg, reg);
1745 }
1746
1747 template<typename Field>
1748 void DecodeFieldToSmi(Register dst, Register src) {
1749 static const int shift = Field::kShift;
1750 static const int mask = Field::kMask >> shift;
1751 dsrl(dst, src, shift);
1752 And(dst, dst, Operand(mask));
1753 dsll32(dst, dst, 0);
1754 }
1755
1756 template<typename Field>
1757 void DecodeFieldToSmi(Register reg) {
1758 DecodeField<Field>(reg, reg);
1759 }
1760 // Generates function and stub prologue code.
1761 void StubPrologue();
1762 void Prologue(bool code_pre_aging);
1763
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001764 // Load the type feedback vector from a JavaScript frame.
1765 void EmitLoadTypeFeedbackVector(Register vector);
1766
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001767 // Activation support.
1768 void EnterFrame(StackFrame::Type type);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001769 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001770 void LeaveFrame(StackFrame::Type type);
1771
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001772 // Expects object in a0 and returns map with validated enum cache
1773 // in a0. Assumes that any other register can be used as a scratch.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001774 void CheckEnumCache(Label* call_runtime);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001775
1776 // AllocationMemento support. Arrays may have an associated
1777 // AllocationMemento object that can be checked for in order to pretransition
1778 // to another type.
1779 // On entry, receiver_reg should point to the array object.
1780 // scratch_reg gets clobbered.
1781 // If allocation info is present, jump to allocation_memento_present.
1782 void TestJSArrayForAllocationMemento(
1783 Register receiver_reg,
1784 Register scratch_reg,
1785 Label* no_memento_found,
1786 Condition cond = al,
1787 Label* allocation_memento_present = NULL);
1788
1789 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1790 Register scratch_reg,
1791 Label* memento_found) {
1792 Label no_memento_found;
1793 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1794 &no_memento_found, eq, memento_found);
1795 bind(&no_memento_found);
1796 }
1797
1798 // Jumps to found label if a prototype map has dictionary elements.
1799 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1800 Register scratch1, Label* found);
1801
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001802 bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
1803
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001804 private:
1805 void CallCFunctionHelper(Register function,
1806 int num_reg_arguments,
1807 int num_double_arguments);
1808
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001809 inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
1810 inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
1811 void BranchShortHelperR6(int32_t offset, Label* L);
1812 void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
1813 bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
1814 Register rs, const Operand& rt);
1815 bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
1816 const Operand& rt, BranchDelaySlot bdslot);
1817 bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
1818 const Operand& rt, BranchDelaySlot bdslot);
1819
1820 void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
1821 void BranchAndLinkShortHelper(int16_t offset, Label* L,
1822 BranchDelaySlot bdslot);
1823 void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001824 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001825 bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
1826 Register rs, const Operand& rt);
1827 bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
1828 Register rs, const Operand& rt,
1829 BranchDelaySlot bdslot);
1830 bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
1831 Register rs, const Operand& rt,
1832 BranchDelaySlot bdslot);
1833 void BranchLong(Label* L, BranchDelaySlot bdslot);
1834 void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001835 void Jr(Label* L, BranchDelaySlot bdslot);
1836 void Jalr(Label* L, BranchDelaySlot bdslot);
1837
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001838 // Common implementation of BranchF functions for the different formats.
1839 void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
1840 Condition cc, FPURegister cmp1, FPURegister cmp2,
1841 BranchDelaySlot bd = PROTECT);
1842
1843 void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
1844 FPURegister cmp1, FPURegister cmp2,
1845 BranchDelaySlot bd = PROTECT);
1846
1847
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001848 // Helper functions for generating invokes.
1849 void InvokePrologue(const ParameterCount& expected,
1850 const ParameterCount& actual,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001851 Label* done,
1852 bool* definitely_mismatches,
1853 InvokeFlag flag,
1854 const CallWrapper& call_wrapper);
1855
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001856 void InitializeNewString(Register string,
1857 Register length,
1858 Heap::RootListIndex map_index,
1859 Register scratch1,
1860 Register scratch2);
1861
1862 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001863 void InNewSpace(Register object, Register scratch,
1864 Condition cond, // ne for new space, eq otherwise.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001865 Label* branch);
1866
1867 // Helper for finding the mark bits for an address. Afterwards, the
1868 // bitmap register points at the word with the mark bits and the mask
1869 // the position of the first bit. Leaves addr_reg unchanged.
1870 inline void GetMarkBits(Register addr_reg,
1871 Register bitmap_reg,
1872 Register mask_reg);
1873
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001874 // Compute memory operands for safepoint stack slots.
1875 static int SafepointRegisterStackIndex(int reg_code);
1876 MemOperand SafepointRegisterSlot(Register reg);
1877 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1878
1879 bool generating_stub_;
1880 bool has_frame_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001881 bool has_double_zero_reg_set_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001882 // This handle will be patched with the code object on installation.
1883 Handle<Object> code_object_;
1884
1885 // Needs access to SafepointRegisterStackIndex for compiled frame
1886 // traversal.
1887 friend class StandardFrame;
1888};
1889
1890
1891// The code patcher is used to patch (typically) small parts of code e.g. for
1892// debugging and other types of instrumentation. When using the code patcher
1893// the exact number of bytes specified must be emitted. It is not legal to emit
1894// relocation information. If any of these constraints are violated it causes
1895// an assertion to fail.
1896class CodePatcher {
1897 public:
1898 enum FlushICache {
1899 FLUSH,
1900 DONT_FLUSH
1901 };
1902
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001903 CodePatcher(Isolate* isolate, byte* address, int instructions,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001904 FlushICache flush_cache = FLUSH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001905 ~CodePatcher();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001906
1907 // Macro assembler to emit code.
1908 MacroAssembler* masm() { return &masm_; }
1909
1910 // Emit an instruction directly.
1911 void Emit(Instr instr);
1912
1913 // Emit an address directly.
1914 void Emit(Address addr);
1915
1916 // Change the condition part of an instruction leaving the rest of the current
1917 // instruction unchanged.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001918 void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001919
1920 private:
1921 byte* address_; // The address of the code being patched.
1922 int size_; // Number of bytes of the expected patch size.
1923 MacroAssembler masm_; // Macro assembler used to generate the code.
1924 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1925};
1926
Ben Murdoch097c5b22016-05-18 11:27:45 +01001927template <typename Func>
1928void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
1929 Func GetLabelFunction) {
1930 // Ensure that dd-ed labels following this instruction use 8 bytes aligned
1931 // addresses.
1932 if (kArchVariant >= kMips64r6) {
1933 BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 6);
1934 // Opposite of Align(8) as we have odd number of instructions in this case.
1935 if ((pc_offset() & 7) == 0) {
1936 nop();
1937 }
1938 addiupc(at, 5);
1939 dlsa(at, at, index, kPointerSizeLog2);
1940 ld(at, MemOperand(at));
1941 } else {
1942 Label here;
1943 BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
1944 Align(8);
1945 bal(&here);
1946 dsll(at, index, kPointerSizeLog2); // Branch delay slot.
1947 bind(&here);
1948 daddu(at, at, ra);
1949 ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
1950 }
1951 jr(at);
1952 nop(); // Branch delay slot nop.
1953 for (size_t index = 0; index < case_count; ++index) {
1954 dd(GetLabelFunction(index));
1955 }
1956}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001957
1958#ifdef GENERATED_CODE_COVERAGE
1959#define CODE_COVERAGE_STRINGIFY(x) #x
1960#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1961#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1962#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1963#else
1964#define ACCESS_MASM(masm) masm->
1965#endif
1966
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001967} // namespace internal
1968} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001969
1970#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_