blob: 576d30aae4e6561931c075bb10005af8a6aaa6ae [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
7
8#include "src/assembler.h"
9#include "src/globals.h"
10#include "src/mips64/assembler-mips64.h"
11
12namespace v8 {
13namespace internal {
14
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000015// Give alias names to registers for calling conventions.
16const Register kReturnRegister0 = {Register::kCode_v0};
17const Register kReturnRegister1 = {Register::kCode_v1};
Ben Murdoch097c5b22016-05-18 11:27:45 +010018const Register kReturnRegister2 = {Register::kCode_a0};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000019const Register kJSFunctionRegister = {Register::kCode_a1};
20const Register kContextRegister = {Register::kCpRegister};
Ben Murdochc5610432016-08-08 18:44:38 +010021const Register kAllocateSizeRegister = {Register::kCode_a0};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000022const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000023const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
24const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};
25const Register kInterpreterDispatchTableRegister = {Register::kCode_t2};
26const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
27const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3};
28const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
29const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
30
Ben Murdochb8a8cc12014-11-26 15:28:44 +000031// Forward declaration.
32class JumpTarget;
33
34// Reserved Register Usage Summary.
35//
36// Registers t8, t9, and at are reserved for use by the MacroAssembler.
37//
38// The programmer should know that the MacroAssembler may clobber these three,
39// but won't touch other registers except in special cases.
40//
41// Per the MIPS ABI, register t9 must be used for indirect function call
42// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
43// trying to update gp register for position-independent-code. Whenever
44// MIPS generated code calls C code, it must be via t9 register.
45
46
47// Flags used for LeaveExitFrame function.
48enum LeaveExitFrameMode {
49 EMIT_RETURN = true,
50 NO_EMIT_RETURN = false
51};
52
53// Flags used for AllocateHeapNumber
54enum TaggingMode {
55 // Tag the result.
56 TAG_RESULT,
57 // Don't tag
58 DONT_TAG_RESULT
59};
60
61// Flags used for the ObjectToDoubleFPURegister function.
62enum ObjectToDoubleFlags {
63 // No special flags.
64 NO_OBJECT_TO_DOUBLE_FLAGS = 0,
65 // Object is known to be a non smi.
66 OBJECT_NOT_SMI = 1 << 0,
67 // Don't load NaNs or infinities, branch to the non number case instead.
68 AVOID_NANS_AND_INFINITIES = 1 << 1
69};
70
71// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
72enum BranchDelaySlot {
73 USE_DELAY_SLOT,
74 PROTECT
75};
76
77// Flags used for the li macro-assembler function.
78enum LiFlags {
79 // If the constant value can be represented in just 16 bits, then
80 // optimize the li to use a single instruction, rather than lui/ori/dsll
81 // sequence.
82 OPTIMIZE_SIZE = 0,
83 // Always use 6 instructions (lui/ori/dsll sequence), even if the constant
84 // could be loaded with just one, so that this value is patchable later.
85 CONSTANT_SIZE = 1,
86 // For address loads only 4 instruction are required. Used to mark
87 // constant load that will be used as address without relocation
88 // information. It ensures predictable code size, so specific sites
89 // in code are patchable.
90 ADDRESS_LOAD = 2
91};
92
93
94enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
95enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
96enum PointersToHereCheck {
97 kPointersToHereMaybeInteresting,
98 kPointersToHereAreAlwaysInteresting
99};
100enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
101
102Register GetRegisterThatIsNotOneOf(Register reg1,
103 Register reg2 = no_reg,
104 Register reg3 = no_reg,
105 Register reg4 = no_reg,
106 Register reg5 = no_reg,
107 Register reg6 = no_reg);
108
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000109bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
110 Register reg4 = no_reg, Register reg5 = no_reg,
111 Register reg6 = no_reg, Register reg7 = no_reg,
112 Register reg8 = no_reg, Register reg9 = no_reg,
113 Register reg10 = no_reg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000114
115
116// -----------------------------------------------------------------------------
117// Static helper functions.
118
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000119#if defined(V8_TARGET_LITTLE_ENDIAN)
120#define SmiWordOffset(offset) (offset + kPointerSize / 2)
121#else
122#define SmiWordOffset(offset) offset
123#endif
124
125
126inline MemOperand ContextMemOperand(Register context, int index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000127 return MemOperand(context, Context::SlotOffset(index));
128}
129
130
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000131inline MemOperand NativeContextMemOperand() {
132 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000133}
134
135
136// Generate a MemOperand for loading a field from an object.
137inline MemOperand FieldMemOperand(Register object, int offset) {
138 return MemOperand(object, offset - kHeapObjectTag);
139}
140
141
142inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143 // Assumes that Smis are shifted by 32 bits.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000144 STATIC_ASSERT(kSmiShift == 32);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000145 return MemOperand(rm, SmiWordOffset(offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000146}
147
148
149inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
150 return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
151}
152
153
154// Generate a MemOperand for storing arguments 5..N on the stack
155// when calling CallCFunction().
156// TODO(plind): Currently ONLY used for O32. Should be fixed for
157// n64, and used in RegExp code, and other places
158// with more than 8 arguments.
159inline MemOperand CFunctionArgumentOperand(int index) {
160 DCHECK(index > kCArgSlotCount);
161 // Argument 5 takes the slot just past the four Arg-slots.
162 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
163 return MemOperand(sp, offset);
164}
165
166
167// MacroAssembler implements a collection of frequently used macros.
168class MacroAssembler: public Assembler {
169 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000170 MacroAssembler(Isolate* isolate, void* buffer, int size,
171 CodeObjectRequired create_code_object);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000172
173 // Arguments macros.
174#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
175#define COND_ARGS cond, r1, r2
176
177 // Cases when relocation is not needed.
178#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
179 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
180 inline void Name(BranchDelaySlot bd, target_type target) { \
181 Name(target, bd); \
182 } \
183 void Name(target_type target, \
184 COND_TYPED_ARGS, \
185 BranchDelaySlot bd = PROTECT); \
186 inline void Name(BranchDelaySlot bd, \
187 target_type target, \
188 COND_TYPED_ARGS) { \
189 Name(target, COND_ARGS, bd); \
190 }
191
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000192#define DECLARE_BRANCH_PROTOTYPES(Name) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000193 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194 DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000195
196 DECLARE_BRANCH_PROTOTYPES(Branch)
197 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
198 DECLARE_BRANCH_PROTOTYPES(BranchShort)
199
200#undef DECLARE_BRANCH_PROTOTYPES
201#undef COND_TYPED_ARGS
202#undef COND_ARGS
203
204
205 // Jump, Call, and Ret pseudo instructions implementing inter-working.
206#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
207 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
208
209 void Jump(Register target, COND_ARGS);
210 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
211 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
212 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
213 static int CallSize(Register target, COND_ARGS);
214 void Call(Register target, COND_ARGS);
215 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
216 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
217 int CallSize(Handle<Code> code,
218 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
219 TypeFeedbackId ast_id = TypeFeedbackId::None(),
220 COND_ARGS);
221 void Call(Handle<Code> code,
222 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
223 TypeFeedbackId ast_id = TypeFeedbackId::None(),
224 COND_ARGS);
225 void Ret(COND_ARGS);
226 inline void Ret(BranchDelaySlot bd, Condition cond = al,
227 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
228 Ret(cond, rs, rt, bd);
229 }
230
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000231 bool IsNear(Label* L, Condition cond, int rs_reg);
232
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000233 void Branch(Label* L,
234 Condition cond,
235 Register rs,
236 Heap::RootListIndex index,
237 BranchDelaySlot bdslot = PROTECT);
238
Ben Murdoch097c5b22016-05-18 11:27:45 +0100239 // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
240 // functor/function with 'Label *func(size_t index)' declaration.
241 template <typename Func>
242 void GenerateSwitchTable(Register index, size_t case_count,
243 Func GetLabelFunction);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000244#undef COND_ARGS
245
246 // Emit code to discard a non-negative number of pointer-sized elements
247 // from the stack, clobbering only the sp register.
248 void Drop(int count,
249 Condition cond = cc_always,
250 Register reg = no_reg,
251 const Operand& op = Operand(no_reg));
252
253 // Trivial case of DropAndRet that utilizes the delay slot and only emits
254 // 2 instructions.
255 void DropAndRet(int drop);
256
257 void DropAndRet(int drop,
258 Condition cond,
259 Register reg,
260 const Operand& op);
261
262 // Swap two registers. If the scratch register is omitted then a slightly
263 // less efficient form using xor instead of mov is emitted.
264 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
265
266 void Call(Label* target);
267
Ben Murdochda12d292016-06-02 14:46:10 +0100268 inline void Move(Register dst, Handle<Object> handle) { li(dst, handle); }
269 inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000271 inline void Move(Register dst, Register src) {
272 if (!dst.is(src)) {
273 mov(dst, src);
274 }
275 }
276
Ben Murdochda12d292016-06-02 14:46:10 +0100277 inline void Move_d(FPURegister dst, FPURegister src) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000278 if (!dst.is(src)) {
279 mov_d(dst, src);
280 }
281 }
282
Ben Murdochda12d292016-06-02 14:46:10 +0100283 inline void Move_s(FPURegister dst, FPURegister src) {
284 if (!dst.is(src)) {
285 mov_s(dst, src);
286 }
287 }
288
289 inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
290
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000291 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
292 mfc1(dst_low, src);
293 mfhc1(dst_high, src);
294 }
295
Ben Murdochda12d292016-06-02 14:46:10 +0100296 inline void Move(Register dst, FPURegister src) { dmfc1(dst, src); }
297
298 inline void Move(FPURegister dst, Register src) { dmtc1(src, dst); }
299
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000300 inline void FmoveHigh(Register dst_high, FPURegister src) {
301 mfhc1(dst_high, src);
302 }
303
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000304 inline void FmoveHigh(FPURegister dst, Register src_high) {
305 mthc1(src_high, dst);
306 }
307
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000308 inline void FmoveLow(Register dst_low, FPURegister src) {
309 mfc1(dst_low, src);
310 }
311
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000312 void FmoveLow(FPURegister dst, Register src_low);
313
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000314 inline void Move(FPURegister dst, Register src_low, Register src_high) {
315 mtc1(src_low, dst);
316 mthc1(src_high, dst);
317 }
318
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400319 void Move(FPURegister dst, float imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000320 void Move(FPURegister dst, double imm);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400321
322 // Conditional move.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000323 void Movz(Register rd, Register rs, Register rt);
324 void Movn(Register rd, Register rs, Register rt);
325 void Movt(Register rd, Register rs, uint16_t cc = 0);
326 void Movf(Register rd, Register rs, uint16_t cc = 0);
327
Ben Murdochda12d292016-06-02 14:46:10 +0100328 // Min, Max macros.
329 // On pre-r6 these functions may modify at and t8 registers.
330 void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
331 Label* nan = nullptr);
332 void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
333 Label* nan = nullptr);
334 void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
335 Label* nan = nullptr);
336 void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
337 Label* nan = nullptr);
338
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000339 void Clz(Register rd, Register rs);
340
341 // Jump unconditionally to given label.
342 // We NEED a nop in the branch delay slot, as it used by v8, for example in
343 // CodeGenerator::ProcessDeferred().
344 // Currently the branch delay slot is filled by the MacroAssembler.
345 // Use rather b(Label) for code generation.
346 void jmp(Label* L) {
347 Branch(L);
348 }
349
350 void Load(Register dst, const MemOperand& src, Representation r);
351 void Store(Register src, const MemOperand& dst, Representation r);
352
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000353 void PushRoot(Heap::RootListIndex index) {
354 LoadRoot(at, index);
355 Push(at);
356 }
357
358 // Compare the object in a register to a value and jump if they are equal.
359 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
360 LoadRoot(at, index);
361 Branch(if_equal, eq, with, Operand(at));
362 }
363
364 // Compare the object in a register to a value and jump if they are not equal.
365 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
366 Label* if_not_equal) {
367 LoadRoot(at, index);
368 Branch(if_not_equal, ne, with, Operand(at));
369 }
370
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000371 // Load an object from the root table.
372 void LoadRoot(Register destination,
373 Heap::RootListIndex index);
374 void LoadRoot(Register destination,
375 Heap::RootListIndex index,
376 Condition cond, Register src1, const Operand& src2);
377
378 // Store an object to the root table.
379 void StoreRoot(Register source,
380 Heap::RootListIndex index);
381 void StoreRoot(Register source,
382 Heap::RootListIndex index,
383 Condition cond, Register src1, const Operand& src2);
384
385 // ---------------------------------------------------------------------------
386 // GC Support
387
388 void IncrementalMarkingRecordWriteHelper(Register object,
389 Register value,
390 Register address);
391
392 enum RememberedSetFinalAction {
393 kReturnAtEnd,
394 kFallThroughAtEnd
395 };
396
397
398 // Record in the remembered set the fact that we have a pointer to new space
399 // at the address pointed to by the addr register. Only works if addr is not
400 // in new space.
401 void RememberedSetHelper(Register object, // Used for debug code.
402 Register addr,
403 Register scratch,
404 SaveFPRegsMode save_fp,
405 RememberedSetFinalAction and_then);
406
407 void CheckPageFlag(Register object,
408 Register scratch,
409 int mask,
410 Condition cc,
411 Label* condition_met);
412
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000413 // Check if object is in new space. Jumps if the object is not in new space.
414 // The register scratch can be object itself, but it will be clobbered.
415 void JumpIfNotInNewSpace(Register object,
416 Register scratch,
417 Label* branch) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100418 InNewSpace(object, scratch, eq, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000419 }
420
421 // Check if object is in new space. Jumps if the object is in new space.
422 // The register scratch can be object itself, but scratch will be clobbered.
423 void JumpIfInNewSpace(Register object,
424 Register scratch,
425 Label* branch) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100426 InNewSpace(object, scratch, ne, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000427 }
428
429 // Check if an object has a given incremental marking color.
430 void HasColor(Register object,
431 Register scratch0,
432 Register scratch1,
433 Label* has_color,
434 int first_bit,
435 int second_bit);
436
437 void JumpIfBlack(Register object,
438 Register scratch0,
439 Register scratch1,
440 Label* on_black);
441
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000442 // Checks the color of an object. If the object is white we jump to the
443 // incremental marker.
444 void JumpIfWhite(Register value, Register scratch1, Register scratch2,
445 Register scratch3, Label* value_is_white);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000446
447 // Notify the garbage collector that we wrote a pointer into an object.
448 // |object| is the object being stored into, |value| is the object being
449 // stored. value and scratch registers are clobbered by the operation.
450 // The offset is the offset from the start of the object, not the offset from
451 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
452 void RecordWriteField(
453 Register object,
454 int offset,
455 Register value,
456 Register scratch,
457 RAStatus ra_status,
458 SaveFPRegsMode save_fp,
459 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
460 SmiCheck smi_check = INLINE_SMI_CHECK,
461 PointersToHereCheck pointers_to_here_check_for_value =
462 kPointersToHereMaybeInteresting);
463
464 // As above, but the offset has the tag presubtracted. For use with
465 // MemOperand(reg, off).
466 inline void RecordWriteContextSlot(
467 Register context,
468 int offset,
469 Register value,
470 Register scratch,
471 RAStatus ra_status,
472 SaveFPRegsMode save_fp,
473 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
474 SmiCheck smi_check = INLINE_SMI_CHECK,
475 PointersToHereCheck pointers_to_here_check_for_value =
476 kPointersToHereMaybeInteresting) {
477 RecordWriteField(context,
478 offset + kHeapObjectTag,
479 value,
480 scratch,
481 ra_status,
482 save_fp,
483 remembered_set_action,
484 smi_check,
485 pointers_to_here_check_for_value);
486 }
487
Ben Murdoch097c5b22016-05-18 11:27:45 +0100488 // Notify the garbage collector that we wrote a code entry into a
489 // JSFunction. Only scratch is clobbered by the operation.
490 void RecordWriteCodeEntryField(Register js_function, Register code_entry,
491 Register scratch);
492
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000493 void RecordWriteForMap(
494 Register object,
495 Register map,
496 Register dst,
497 RAStatus ra_status,
498 SaveFPRegsMode save_fp);
499
500 // For a given |object| notify the garbage collector that the slot |address|
501 // has been written. |value| is the object being stored. The value and
502 // address registers are clobbered by the operation.
503 void RecordWrite(
504 Register object,
505 Register address,
506 Register value,
507 RAStatus ra_status,
508 SaveFPRegsMode save_fp,
509 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
510 SmiCheck smi_check = INLINE_SMI_CHECK,
511 PointersToHereCheck pointers_to_here_check_for_value =
512 kPointersToHereMaybeInteresting);
513
514
515 // ---------------------------------------------------------------------------
516 // Inline caching support.
517
518 // Generate code for checking access rights - used for security checks
519 // on access to global objects across environments. The holder register
520 // is left untouched, whereas both scratch registers are clobbered.
521 void CheckAccessGlobalProxy(Register holder_reg,
522 Register scratch,
523 Label* miss);
524
525 void GetNumberHash(Register reg0, Register scratch);
526
527 void LoadFromNumberDictionary(Label* miss,
528 Register elements,
529 Register key,
530 Register result,
531 Register reg0,
532 Register reg1,
533 Register reg2);
534
535
536 inline void MarkCode(NopMarkerTypes type) {
537 nop(type);
538 }
539
540 // Check if the given instruction is a 'type' marker.
541 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
542 // nop(type)). These instructions are generated to mark special location in
543 // the code, like some special IC code.
544 static inline bool IsMarkedCode(Instr instr, int type) {
545 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
546 return IsNop(instr, type);
547 }
548
549
550 static inline int GetCodeMarker(Instr instr) {
551 uint32_t opcode = ((instr & kOpcodeMask));
552 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
553 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
554 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
555
556 // Return <n> if we have a sll zero_reg, zero_reg, n
557 // else return -1.
558 bool sllzz = (opcode == SLL &&
559 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
560 rs == static_cast<uint32_t>(ToNumber(zero_reg)));
561 int type =
562 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
563 DCHECK((type == -1) ||
564 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
565 return type;
566 }
567
568
569
570 // ---------------------------------------------------------------------------
571 // Allocation support.
572
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000573 // Allocate an object in new space or old space. The object_size is
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000574 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
575 // is passed. If the space is exhausted control continues at the gc_required
576 // label. The allocated object is returned in result. If the flag
577 // tag_allocated_object is true the result is tagged as as a heap object.
578 // All registers are clobbered also when control continues at the gc_required
579 // label.
580 void Allocate(int object_size,
581 Register result,
582 Register scratch1,
583 Register scratch2,
584 Label* gc_required,
585 AllocationFlags flags);
586
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000587 void Allocate(Register object_size, Register result, Register result_end,
588 Register scratch, Label* gc_required, AllocationFlags flags);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000589
Ben Murdochc5610432016-08-08 18:44:38 +0100590 // FastAllocate is right now only used for folded allocations. It just
591 // increments the top pointer without checking against limit. This can only
592 // be done if it was proved earlier that the allocation will succeed.
593 void FastAllocate(int object_size, Register result, Register scratch1,
594 Register scratch2, AllocationFlags flags);
595
596 void FastAllocate(Register object_size, Register result, Register result_new,
597 Register scratch, AllocationFlags flags);
598
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000599 void AllocateTwoByteString(Register result,
600 Register length,
601 Register scratch1,
602 Register scratch2,
603 Register scratch3,
604 Label* gc_required);
605 void AllocateOneByteString(Register result, Register length,
606 Register scratch1, Register scratch2,
607 Register scratch3, Label* gc_required);
608 void AllocateTwoByteConsString(Register result,
609 Register length,
610 Register scratch1,
611 Register scratch2,
612 Label* gc_required);
613 void AllocateOneByteConsString(Register result, Register length,
614 Register scratch1, Register scratch2,
615 Label* gc_required);
616 void AllocateTwoByteSlicedString(Register result,
617 Register length,
618 Register scratch1,
619 Register scratch2,
620 Label* gc_required);
621 void AllocateOneByteSlicedString(Register result, Register length,
622 Register scratch1, Register scratch2,
623 Label* gc_required);
624
625 // Allocates a heap number or jumps to the gc_required label if the young
626 // space is full and a scavenge is needed. All registers are clobbered also
627 // when control continues at the gc_required label.
628 void AllocateHeapNumber(Register result,
629 Register scratch1,
630 Register scratch2,
631 Register heap_number_map,
632 Label* gc_required,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000633 MutableMode mode = IMMUTABLE);
634
635 void AllocateHeapNumberWithValue(Register result,
636 FPURegister value,
637 Register scratch1,
638 Register scratch2,
639 Label* gc_required);
640
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000641 // Allocate and initialize a JSValue wrapper with the specified {constructor}
642 // and {value}.
643 void AllocateJSValue(Register result, Register constructor, Register value,
644 Register scratch1, Register scratch2,
645 Label* gc_required);
646
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000647 // ---------------------------------------------------------------------------
648 // Instruction macros.
649
650#define DEFINE_INSTRUCTION(instr) \
651 void instr(Register rd, Register rs, const Operand& rt); \
652 void instr(Register rd, Register rs, Register rt) { \
653 instr(rd, rs, Operand(rt)); \
654 } \
655 void instr(Register rs, Register rt, int32_t j) { \
656 instr(rs, rt, Operand(j)); \
657 }
658
659#define DEFINE_INSTRUCTION2(instr) \
660 void instr(Register rs, const Operand& rt); \
661 void instr(Register rs, Register rt) { \
662 instr(rs, Operand(rt)); \
663 } \
664 void instr(Register rs, int32_t j) { \
665 instr(rs, Operand(j)); \
666 }
667
668 DEFINE_INSTRUCTION(Addu);
669 DEFINE_INSTRUCTION(Daddu);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400670 DEFINE_INSTRUCTION(Div);
671 DEFINE_INSTRUCTION(Divu);
672 DEFINE_INSTRUCTION(Ddivu);
673 DEFINE_INSTRUCTION(Mod);
674 DEFINE_INSTRUCTION(Modu);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000675 DEFINE_INSTRUCTION(Ddiv);
676 DEFINE_INSTRUCTION(Subu);
677 DEFINE_INSTRUCTION(Dsubu);
678 DEFINE_INSTRUCTION(Dmod);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400679 DEFINE_INSTRUCTION(Dmodu);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000680 DEFINE_INSTRUCTION(Mul);
681 DEFINE_INSTRUCTION(Mulh);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400682 DEFINE_INSTRUCTION(Mulhu);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000683 DEFINE_INSTRUCTION(Dmul);
684 DEFINE_INSTRUCTION(Dmulh);
685 DEFINE_INSTRUCTION2(Mult);
686 DEFINE_INSTRUCTION2(Dmult);
687 DEFINE_INSTRUCTION2(Multu);
688 DEFINE_INSTRUCTION2(Dmultu);
689 DEFINE_INSTRUCTION2(Div);
690 DEFINE_INSTRUCTION2(Ddiv);
691 DEFINE_INSTRUCTION2(Divu);
692 DEFINE_INSTRUCTION2(Ddivu);
693
694 DEFINE_INSTRUCTION(And);
695 DEFINE_INSTRUCTION(Or);
696 DEFINE_INSTRUCTION(Xor);
697 DEFINE_INSTRUCTION(Nor);
698 DEFINE_INSTRUCTION2(Neg);
699
700 DEFINE_INSTRUCTION(Slt);
701 DEFINE_INSTRUCTION(Sltu);
702
703 // MIPS32 R2 instruction macro.
704 DEFINE_INSTRUCTION(Ror);
705 DEFINE_INSTRUCTION(Dror);
706
707#undef DEFINE_INSTRUCTION
708#undef DEFINE_INSTRUCTION2
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000709#undef DEFINE_INSTRUCTION3
710
Ben Murdochda12d292016-06-02 14:46:10 +0100711 // Load Scaled Address instructions. Parameter sa (shift argument) must be
712 // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
713 // may be clobbered.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000714 void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
715 Register scratch = at);
716 void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
717 Register scratch = at);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000718
719 void Pref(int32_t hint, const MemOperand& rs);
720
721
722 // ---------------------------------------------------------------------------
723 // Pseudo-instructions.
724
Ben Murdoch61f157c2016-09-16 13:49:30 +0100725 // Change endianness
726 void ByteSwapSigned(Register reg, int operand_size);
727 void ByteSwapUnsigned(Register reg, int operand_size);
728
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000729 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
730
Ben Murdochc5610432016-08-08 18:44:38 +0100731 void Ulh(Register rd, const MemOperand& rs);
732 void Ulhu(Register rd, const MemOperand& rs);
733 void Ush(Register rd, const MemOperand& rs, Register scratch);
734
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000735 void Ulw(Register rd, const MemOperand& rs);
Ben Murdochc5610432016-08-08 18:44:38 +0100736 void Ulwu(Register rd, const MemOperand& rs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000737 void Usw(Register rd, const MemOperand& rs);
Ben Murdochc5610432016-08-08 18:44:38 +0100738
739 void Uld(Register rd, const MemOperand& rs);
740 void Usd(Register rd, const MemOperand& rs);
741
742 void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
743 void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
744
745 void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
746 void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000747
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000748 void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
749 void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
750
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000751 // Load int32 in the rd register.
752 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100753 inline bool LiLower32BitHelper(Register rd, Operand j);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000754 inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
755 li(rd, Operand(j), mode);
756 }
757 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
758
759 // Push multiple registers on the stack.
760 // Registers are saved in numerical order, with higher numbered registers
761 // saved in higher memory addresses.
762 void MultiPush(RegList regs);
763 void MultiPushReversed(RegList regs);
764
765 void MultiPushFPU(RegList regs);
766 void MultiPushReversedFPU(RegList regs);
767
768 void push(Register src) {
769 Daddu(sp, sp, Operand(-kPointerSize));
770 sd(src, MemOperand(sp, 0));
771 }
772 void Push(Register src) { push(src); }
773
774 // Push a handle.
775 void Push(Handle<Object> handle);
776 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
777
778 // Push two registers. Pushes leftmost register first (to highest address).
779 void Push(Register src1, Register src2) {
780 Dsubu(sp, sp, Operand(2 * kPointerSize));
781 sd(src1, MemOperand(sp, 1 * kPointerSize));
782 sd(src2, MemOperand(sp, 0 * kPointerSize));
783 }
784
785 // Push three registers. Pushes leftmost register first (to highest address).
786 void Push(Register src1, Register src2, Register src3) {
787 Dsubu(sp, sp, Operand(3 * kPointerSize));
788 sd(src1, MemOperand(sp, 2 * kPointerSize));
789 sd(src2, MemOperand(sp, 1 * kPointerSize));
790 sd(src3, MemOperand(sp, 0 * kPointerSize));
791 }
792
793 // Push four registers. Pushes leftmost register first (to highest address).
794 void Push(Register src1, Register src2, Register src3, Register src4) {
795 Dsubu(sp, sp, Operand(4 * kPointerSize));
796 sd(src1, MemOperand(sp, 3 * kPointerSize));
797 sd(src2, MemOperand(sp, 2 * kPointerSize));
798 sd(src3, MemOperand(sp, 1 * kPointerSize));
799 sd(src4, MemOperand(sp, 0 * kPointerSize));
800 }
801
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000802 // Push five registers. Pushes leftmost register first (to highest address).
803 void Push(Register src1, Register src2, Register src3, Register src4,
804 Register src5) {
805 Dsubu(sp, sp, Operand(5 * kPointerSize));
806 sd(src1, MemOperand(sp, 4 * kPointerSize));
807 sd(src2, MemOperand(sp, 3 * kPointerSize));
808 sd(src3, MemOperand(sp, 2 * kPointerSize));
809 sd(src4, MemOperand(sp, 1 * kPointerSize));
810 sd(src5, MemOperand(sp, 0 * kPointerSize));
811 }
812
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000813 void Push(Register src, Condition cond, Register tst1, Register tst2) {
814 // Since we don't have conditional execution we use a Branch.
815 Branch(3, cond, tst1, Operand(tst2));
816 Dsubu(sp, sp, Operand(kPointerSize));
817 sd(src, MemOperand(sp, 0));
818 }
819
820 void PushRegisterAsTwoSmis(Register src, Register scratch = at);
821 void PopRegisterAsTwoSmis(Register dst, Register scratch = at);
822
823 // Pops multiple values from the stack and load them in the
824 // registers specified in regs. Pop order is the opposite as in MultiPush.
825 void MultiPop(RegList regs);
826 void MultiPopReversed(RegList regs);
827
828 void MultiPopFPU(RegList regs);
829 void MultiPopReversedFPU(RegList regs);
830
831 void pop(Register dst) {
832 ld(dst, MemOperand(sp, 0));
833 Daddu(sp, sp, Operand(kPointerSize));
834 }
835 void Pop(Register dst) { pop(dst); }
836
837 // Pop two registers. Pops rightmost register first (from lower address).
838 void Pop(Register src1, Register src2) {
839 DCHECK(!src1.is(src2));
840 ld(src2, MemOperand(sp, 0 * kPointerSize));
841 ld(src1, MemOperand(sp, 1 * kPointerSize));
842 Daddu(sp, sp, 2 * kPointerSize);
843 }
844
845 // Pop three registers. Pops rightmost register first (from lower address).
846 void Pop(Register src1, Register src2, Register src3) {
847 ld(src3, MemOperand(sp, 0 * kPointerSize));
848 ld(src2, MemOperand(sp, 1 * kPointerSize));
849 ld(src1, MemOperand(sp, 2 * kPointerSize));
850 Daddu(sp, sp, 3 * kPointerSize);
851 }
852
853 void Pop(uint32_t count = 1) {
854 Daddu(sp, sp, Operand(count * kPointerSize));
855 }
856
Ben Murdochda12d292016-06-02 14:46:10 +0100857 // Push a fixed frame, consisting of ra, fp.
858 void PushCommonFrame(Register marker_reg = no_reg);
859
860 // Push a standard frame, consisting of ra, fp, context and JS function.
861 void PushStandardFrame(Register function_reg);
862
863 void PopCommonFrame(Register marker_reg = no_reg);
864
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000865 // Push and pop the registers that can hold pointers, as defined by the
866 // RegList constant kSafepointSavedRegisters.
867 void PushSafepointRegisters();
868 void PopSafepointRegisters();
869 // Store value in register src in the safepoint stack slot for
870 // register dst.
871 void StoreToSafepointRegisterSlot(Register src, Register dst);
872 // Load the value of the src register from its safepoint stack slot
873 // into register dst.
874 void LoadFromSafepointRegisterSlot(Register dst, Register src);
875
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000876 // MIPS64 R2 instruction macro.
877 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000878 void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000879 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400880 void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000881 void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
882 void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000883
884 // ---------------------------------------------------------------------------
885 // FPU macros. These do not handle special cases like NaN or +- inf.
886
887 // Convert unsigned word to double.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000888 void Cvt_d_uw(FPURegister fd, FPURegister fs);
889 void Cvt_d_uw(FPURegister fd, Register rs);
890
891 // Convert unsigned long to double.
892 void Cvt_d_ul(FPURegister fd, FPURegister fs);
893 void Cvt_d_ul(FPURegister fd, Register rs);
894
Ben Murdoch097c5b22016-05-18 11:27:45 +0100895 // Convert unsigned word to float.
896 void Cvt_s_uw(FPURegister fd, FPURegister fs);
897 void Cvt_s_uw(FPURegister fd, Register rs);
898
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000899 // Convert unsigned long to float.
900 void Cvt_s_ul(FPURegister fd, FPURegister fs);
901 void Cvt_s_ul(FPURegister fd, Register rs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000902
903 // Convert double to unsigned long.
904 void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
905
906 void Trunc_l_d(FPURegister fd, FPURegister fs);
907 void Round_l_d(FPURegister fd, FPURegister fs);
908 void Floor_l_d(FPURegister fd, FPURegister fs);
909 void Ceil_l_d(FPURegister fd, FPURegister fs);
910
911 // Convert double to unsigned word.
912 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
913 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
914
Ben Murdoch097c5b22016-05-18 11:27:45 +0100915 // Convert single to unsigned word.
916 void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
917 void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
918
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000919 // Convert double to unsigned long.
920 void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch,
921 Register result = no_reg);
922 void Trunc_ul_d(FPURegister fd, Register rs, FPURegister scratch,
923 Register result = no_reg);
924
925 // Convert single to unsigned long.
926 void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch,
927 Register result = no_reg);
928 void Trunc_ul_s(FPURegister fd, Register rs, FPURegister scratch,
929 Register result = no_reg);
930
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000931 void Trunc_w_d(FPURegister fd, FPURegister fs);
932 void Round_w_d(FPURegister fd, FPURegister fs);
933 void Floor_w_d(FPURegister fd, FPURegister fs);
934 void Ceil_w_d(FPURegister fd, FPURegister fs);
935
Ben Murdoch61f157c2016-09-16 13:49:30 +0100936 // Preserve value of a NaN operand
937 void SubNanPreservePayloadAndSign_s(FPURegister fd, FPURegister fs,
938 FPURegister ft);
939 void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
940 FPURegister ft);
941
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000942 void Madd_d(FPURegister fd,
943 FPURegister fr,
944 FPURegister fs,
945 FPURegister ft,
946 FPURegister scratch);
947
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000948 // Wrapper functions for the different cmp/branch types.
949 inline void BranchF32(Label* target, Label* nan, Condition cc,
950 FPURegister cmp1, FPURegister cmp2,
951 BranchDelaySlot bd = PROTECT) {
952 BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
953 }
954
955 inline void BranchF64(Label* target, Label* nan, Condition cc,
956 FPURegister cmp1, FPURegister cmp2,
957 BranchDelaySlot bd = PROTECT) {
958 BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
959 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000960
961 // Alternate (inline) version for better readability with USE_DELAY_SLOT.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000962 inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
963 Condition cc, FPURegister cmp1, FPURegister cmp2) {
964 BranchF64(target, nan, cc, cmp1, cmp2, bd);
965 }
966
967 inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
968 Condition cc, FPURegister cmp1, FPURegister cmp2) {
969 BranchF32(target, nan, cc, cmp1, cmp2, bd);
970 }
971
972 // Alias functions for backward compatibility.
973 inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
974 FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
975 BranchF64(target, nan, cc, cmp1, cmp2, bd);
976 }
977
978 inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
979 Condition cc, FPURegister cmp1, FPURegister cmp2) {
980 BranchF64(bd, target, nan, cc, cmp1, cmp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000981 }
982
983 // Truncates a double using a specific rounding mode, and writes the value
984 // to the result register.
985 // The except_flag will contain any exceptions caused by the instruction.
986 // If check_inexact is kDontCheckForInexactConversion, then the inexact
987 // exception is masked.
988 void EmitFPUTruncate(FPURoundingMode rounding_mode,
989 Register result,
990 DoubleRegister double_input,
991 Register scratch,
992 DoubleRegister double_scratch,
993 Register except_flag,
994 CheckForInexactConversion check_inexact
995 = kDontCheckForInexactConversion);
996
997 // Performs a truncating conversion of a floating point number as used by
998 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
999 // succeeds, otherwise falls through if result is saturated. On return
1000 // 'result' either holds answer, or is clobbered on fall through.
1001 //
1002 // Only public for the test code in test-code-stubs-arm.cc.
1003 void TryInlineTruncateDoubleToI(Register result,
1004 DoubleRegister input,
1005 Label* done);
1006
1007 // Performs a truncating conversion of a floating point number as used by
1008 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1009 // Exits with 'result' holding the answer.
1010 void TruncateDoubleToI(Register result, DoubleRegister double_input);
1011
1012 // Performs a truncating conversion of a heap number as used by
1013 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1014 // must be different registers. Exits with 'result' holding the answer.
1015 void TruncateHeapNumberToI(Register result, Register object);
1016
1017 // Converts the smi or heap number in object to an int32 using the rules
1018 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1019 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1020 // different registers.
1021 void TruncateNumberToI(Register object,
1022 Register result,
1023 Register heap_number_map,
1024 Register scratch,
1025 Label* not_int32);
1026
1027 // Loads the number from object into dst register.
1028 // If |object| is neither smi nor heap number, |not_number| is jumped to
1029 // with |object| still intact.
1030 void LoadNumber(Register object,
1031 FPURegister dst,
1032 Register heap_number_map,
1033 Register scratch,
1034 Label* not_number);
1035
1036 // Loads the number from object into double_dst in the double format.
1037 // Control will jump to not_int32 if the value cannot be exactly represented
1038 // by a 32-bit integer.
1039 // Floating point value in the 32-bit integer range that are not exact integer
1040 // won't be loaded.
1041 void LoadNumberAsInt32Double(Register object,
1042 DoubleRegister double_dst,
1043 Register heap_number_map,
1044 Register scratch1,
1045 Register scratch2,
1046 FPURegister double_scratch,
1047 Label* not_int32);
1048
1049 // Loads the number from object into dst as a 32-bit integer.
1050 // Control will jump to not_int32 if the object cannot be exactly represented
1051 // by a 32-bit integer.
1052 // Floating point value in the 32-bit integer range that are not exact integer
1053 // won't be converted.
1054 void LoadNumberAsInt32(Register object,
1055 Register dst,
1056 Register heap_number_map,
1057 Register scratch1,
1058 Register scratch2,
1059 FPURegister double_scratch0,
1060 FPURegister double_scratch1,
1061 Label* not_int32);
1062
1063 // Enter exit frame.
1064 // argc - argument count to be dropped by LeaveExitFrame.
1065 // save_doubles - saves FPU registers on stack, currently disabled.
1066 // stack_space - extra stack space.
1067 void EnterExitFrame(bool save_doubles,
1068 int stack_space = 0);
1069
1070 // Leave the current exit frame.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001071 void LeaveExitFrame(bool save_doubles, Register arg_count,
1072 bool restore_context, bool do_return = NO_EMIT_RETURN,
1073 bool argument_count_is_length = false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001074
1075 // Get the actual activation frame alignment for target environment.
1076 static int ActivationFrameAlignment();
1077
1078 // Make sure the stack is aligned. Only emits code in debug mode.
1079 void AssertStackIsAligned();
1080
1081 void LoadContext(Register dst, int context_chain_length);
1082
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001083 // Load the global object from the current context.
1084 void LoadGlobalObject(Register dst) {
1085 LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
1086 }
1087
1088 // Load the global proxy from the current context.
1089 void LoadGlobalProxy(Register dst) {
1090 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1091 }
1092
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001093 // Conditionally load the cached Array transitioned map of type
1094 // transitioned_kind from the native context if the map in register
1095 // map_in_out is the cached Array map in the native context of
1096 // expected_kind.
1097 void LoadTransitionedArrayMapConditional(
1098 ElementsKind expected_kind,
1099 ElementsKind transitioned_kind,
1100 Register map_in_out,
1101 Register scratch,
1102 Label* no_map_match);
1103
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001104 void LoadNativeContextSlot(int index, Register dst);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001105
1106 // Load the initial map from the global function. The registers
1107 // function and map can be the same, function is then overwritten.
1108 void LoadGlobalFunctionInitialMap(Register function,
1109 Register map,
1110 Register scratch);
1111
1112 void InitializeRootRegister() {
1113 ExternalReference roots_array_start =
1114 ExternalReference::roots_array_start(isolate());
1115 li(kRootRegister, Operand(roots_array_start));
1116 }
1117
1118 // -------------------------------------------------------------------------
1119 // JavaScript invokes.
1120
Ben Murdochda12d292016-06-02 14:46:10 +01001121 // Removes current frame and its arguments from the stack preserving
1122 // the arguments and a return address pushed to the stack for the next call.
1123 // Both |callee_args_count| and |caller_args_count_reg| do not include
1124 // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
1125 // is trashed.
1126 void PrepareForTailCall(const ParameterCount& callee_args_count,
1127 Register caller_args_count_reg, Register scratch0,
1128 Register scratch1);
1129
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001130 // Invoke the JavaScript function code by either calling or jumping.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001131 void InvokeFunctionCode(Register function, Register new_target,
1132 const ParameterCount& expected,
1133 const ParameterCount& actual, InvokeFlag flag,
1134 const CallWrapper& call_wrapper);
1135
1136 void FloodFunctionIfStepping(Register fun, Register new_target,
1137 const ParameterCount& expected,
1138 const ParameterCount& actual);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001139
1140 // Invoke the JavaScript function in the given register. Changes the
1141 // current context to the context in the function before invoking.
1142 void InvokeFunction(Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001143 Register new_target,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001144 const ParameterCount& actual,
1145 InvokeFlag flag,
1146 const CallWrapper& call_wrapper);
1147
1148 void InvokeFunction(Register function,
1149 const ParameterCount& expected,
1150 const ParameterCount& actual,
1151 InvokeFlag flag,
1152 const CallWrapper& call_wrapper);
1153
1154 void InvokeFunction(Handle<JSFunction> function,
1155 const ParameterCount& expected,
1156 const ParameterCount& actual,
1157 InvokeFlag flag,
1158 const CallWrapper& call_wrapper);
1159
1160
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001161 void IsObjectJSStringType(Register object,
1162 Register scratch,
1163 Label* fail);
1164
1165 void IsObjectNameType(Register object,
1166 Register scratch,
1167 Label* fail);
1168
1169 // -------------------------------------------------------------------------
1170 // Debugger Support.
1171
1172 void DebugBreak();
1173
1174 // -------------------------------------------------------------------------
1175 // Exception handling.
1176
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001177 // Push a new stack handler and link into stack handler chain.
1178 void PushStackHandler();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001179
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001180 // Unlink the stack handler on top of the stack from the stack handler chain.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001181 // Must preserve the result register.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001182 void PopStackHandler();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001183
1184 // Copies a number of bytes from src to dst. All registers are clobbered. On
1185 // exit src and dst will point to the place just after where the last byte was
1186 // read or written and length will be zero.
1187 void CopyBytes(Register src,
1188 Register dst,
1189 Register length,
1190 Register scratch);
1191
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001192 // Initialize fields with filler values. Fields starting at |current_address|
1193 // not including |end_address| are overwritten with the value in |filler|. At
1194 // the end the loop, |current_address| takes the value of |end_address|.
1195 void InitializeFieldsWithFiller(Register current_address,
1196 Register end_address, Register filler);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001197
1198 // -------------------------------------------------------------------------
1199 // Support functions.
1200
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001201 // Machine code version of Map::GetConstructor().
1202 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1203 void GetMapConstructor(Register result, Register map, Register temp,
1204 Register temp2);
1205
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001206 // Try to get function prototype of a function and puts the value in
1207 // the result register. Checks that the function really is a
1208 // function and jumps to the miss label if the fast checks fail. The
1209 // function register will be untouched; the other registers may be
1210 // clobbered.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001211 void TryGetFunctionPrototype(Register function, Register result,
1212 Register scratch, Label* miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001213
1214 void GetObjectType(Register function,
1215 Register map,
1216 Register type_reg);
1217
Ben Murdoch097c5b22016-05-18 11:27:45 +01001218 void GetInstanceType(Register object_map, Register object_instance_type) {
1219 lbu(object_instance_type,
1220 FieldMemOperand(object_map, Map::kInstanceTypeOffset));
1221 }
1222
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001223 // Check if a map for a JSObject indicates that the object has fast elements.
1224 // Jump to the specified label if it does not.
1225 void CheckFastElements(Register map,
1226 Register scratch,
1227 Label* fail);
1228
1229 // Check if a map for a JSObject indicates that the object can have both smi
1230 // and HeapObject elements. Jump to the specified label if it does not.
1231 void CheckFastObjectElements(Register map,
1232 Register scratch,
1233 Label* fail);
1234
1235 // Check if a map for a JSObject indicates that the object has fast smi only
1236 // elements. Jump to the specified label if it does not.
1237 void CheckFastSmiElements(Register map,
1238 Register scratch,
1239 Label* fail);
1240
1241 // Check to see if maybe_number can be stored as a double in
1242 // FastDoubleElements. If it can, store it at the index specified by key in
1243 // the FastDoubleElements array elements. Otherwise jump to fail.
1244 void StoreNumberToDoubleElements(Register value_reg,
1245 Register key_reg,
1246 Register elements_reg,
1247 Register scratch1,
1248 Register scratch2,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001249 Label* fail,
1250 int elements_offset = 0);
1251
1252 // Compare an object's map with the specified map and its transitioned
1253 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1254 // "branch_to" if the result of the comparison is "cond". If multiple map
1255 // compares are required, the compare sequences branches to early_success.
1256 void CompareMapAndBranch(Register obj,
1257 Register scratch,
1258 Handle<Map> map,
1259 Label* early_success,
1260 Condition cond,
1261 Label* branch_to);
1262
1263 // As above, but the map of the object is already loaded into the register
1264 // which is preserved by the code generated.
1265 void CompareMapAndBranch(Register obj_map,
1266 Handle<Map> map,
1267 Label* early_success,
1268 Condition cond,
1269 Label* branch_to);
1270
1271 // Check if the map of an object is equal to a specified map and branch to
1272 // label if not. Skip the smi check if not required (object is known to be a
1273 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1274 // against maps that are ElementsKind transition maps of the specificed map.
1275 void CheckMap(Register obj,
1276 Register scratch,
1277 Handle<Map> map,
1278 Label* fail,
1279 SmiCheckType smi_check_type);
1280
1281
1282 void CheckMap(Register obj,
1283 Register scratch,
1284 Heap::RootListIndex index,
1285 Label* fail,
1286 SmiCheckType smi_check_type);
1287
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001288 // Check if the map of an object is equal to a specified weak map and branch
1289 // to a specified target if equal. Skip the smi check if not required
1290 // (object is known to be a heap object)
1291 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1292 Handle<WeakCell> cell, Handle<Code> success,
1293 SmiCheckType smi_check_type);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001294
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001295 // If the value is a NaN, canonicalize the value else, do nothing.
1296 void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
1297
1298
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001299 // Get value of the weak cell.
1300 void GetWeakValue(Register value, Handle<WeakCell> cell);
1301
1302 // Load the value of the weak cell in the value register. Branch to the
1303 // given miss label is the weak cell was cleared.
1304 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001305
1306 // Load and check the instance type of an object for being a string.
1307 // Loads the type into the second argument register.
1308 // Returns a condition that will be enabled if the object was a string.
1309 Condition IsObjectStringType(Register obj,
1310 Register type,
1311 Register result) {
1312 ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1313 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1314 And(type, type, Operand(kIsNotStringMask));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001315 DCHECK_EQ(0u, kStringTag);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001316 return eq;
1317 }
1318
1319
1320 // Picks out an array index from the hash field.
1321 // Register use:
1322 // hash - holds the index's hash. Clobbered.
1323 // index - holds the overwritten index on exit.
1324 void IndexFromHash(Register hash, Register index);
1325
1326 // Get the number of least significant bits from a register.
1327 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1328 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1329
1330 // Load the value of a number object into a FPU double register. If the
1331 // object is not a number a jump to the label not_number is performed
1332 // and the FPU double register is unchanged.
1333 void ObjectToDoubleFPURegister(
1334 Register object,
1335 FPURegister value,
1336 Register scratch1,
1337 Register scratch2,
1338 Register heap_number_map,
1339 Label* not_number,
1340 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1341
1342 // Load the value of a smi object into a FPU double register. The register
1343 // scratch1 can be the same register as smi in which case smi will hold the
1344 // untagged value afterwards.
1345 void SmiToDoubleFPURegister(Register smi,
1346 FPURegister value,
1347 Register scratch1);
1348
1349 // -------------------------------------------------------------------------
1350 // Overflow handling functions.
1351 // Usage: first call the appropriate arithmetic function, then call one of the
1352 // jump functions with the overflow_dst register as the second parameter.
1353
Ben Murdochda12d292016-06-02 14:46:10 +01001354 inline void AddBranchOvf(Register dst, Register left, const Operand& right,
1355 Label* overflow_label, Register scratch = at) {
1356 AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
1357 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001358
Ben Murdochda12d292016-06-02 14:46:10 +01001359 inline void AddBranchNoOvf(Register dst, Register left, const Operand& right,
1360 Label* no_overflow_label, Register scratch = at) {
1361 AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
1362 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001363
Ben Murdochda12d292016-06-02 14:46:10 +01001364 void AddBranchOvf(Register dst, Register left, const Operand& right,
1365 Label* overflow_label, Label* no_overflow_label,
1366 Register scratch = at);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001367
Ben Murdochda12d292016-06-02 14:46:10 +01001368 void AddBranchOvf(Register dst, Register left, Register right,
1369 Label* overflow_label, Label* no_overflow_label,
1370 Register scratch = at);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001371
Ben Murdochda12d292016-06-02 14:46:10 +01001372 inline void SubBranchOvf(Register dst, Register left, const Operand& right,
1373 Label* overflow_label, Register scratch = at) {
1374 SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
1375 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001376
Ben Murdochda12d292016-06-02 14:46:10 +01001377 inline void SubBranchNoOvf(Register dst, Register left, const Operand& right,
1378 Label* no_overflow_label, Register scratch = at) {
1379 SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
1380 }
1381
1382 void SubBranchOvf(Register dst, Register left, const Operand& right,
1383 Label* overflow_label, Label* no_overflow_label,
1384 Register scratch = at);
1385
1386 void SubBranchOvf(Register dst, Register left, Register right,
1387 Label* overflow_label, Label* no_overflow_label,
1388 Register scratch = at);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001389
1390 inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
1391 Label* overflow_label, Register scratch = at) {
1392 DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
1393 }
1394
1395 inline void DaddBranchNoOvf(Register dst, Register left, const Operand& right,
1396 Label* no_overflow_label, Register scratch = at) {
1397 DaddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
1398 }
1399
1400 void DaddBranchOvf(Register dst, Register left, const Operand& right,
1401 Label* overflow_label, Label* no_overflow_label,
1402 Register scratch = at);
1403
1404 void DaddBranchOvf(Register dst, Register left, Register right,
1405 Label* overflow_label, Label* no_overflow_label,
1406 Register scratch = at);
1407
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001408 inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
1409 Label* overflow_label, Register scratch = at) {
1410 DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
1411 }
1412
1413 inline void DsubBranchNoOvf(Register dst, Register left, const Operand& right,
1414 Label* no_overflow_label, Register scratch = at) {
1415 DsubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
1416 }
1417
1418 void DsubBranchOvf(Register dst, Register left, const Operand& right,
1419 Label* overflow_label, Label* no_overflow_label,
1420 Register scratch = at);
1421
1422 void DsubBranchOvf(Register dst, Register left, Register right,
1423 Label* overflow_label, Label* no_overflow_label,
1424 Register scratch = at);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001425
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001426 void BranchOnOverflow(Label* label,
1427 Register overflow_check,
1428 BranchDelaySlot bd = PROTECT) {
1429 Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1430 }
1431
1432 void BranchOnNoOverflow(Label* label,
1433 Register overflow_check,
1434 BranchDelaySlot bd = PROTECT) {
1435 Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1436 }
1437
1438 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1439 Ret(lt, overflow_check, Operand(zero_reg), bd);
1440 }
1441
1442 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1443 Ret(ge, overflow_check, Operand(zero_reg), bd);
1444 }
1445
1446 // -------------------------------------------------------------------------
1447 // Runtime calls.
1448
1449 // See comments at the beginning of CEntryStub::Generate.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001450 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001451
1452 inline void PrepareCEntryFunction(const ExternalReference& ref) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001453 li(a1, Operand(ref));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001454 }
1455
1456#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1457const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1458
1459 // Call a code stub.
1460 void CallStub(CodeStub* stub,
1461 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1462 COND_ARGS);
1463
1464 // Tail call a code stub (jump).
1465 void TailCallStub(CodeStub* stub, COND_ARGS);
1466
1467#undef COND_ARGS
1468
1469 void CallJSExitStub(CodeStub* stub);
1470
1471 // Call a runtime routine.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001472 void CallRuntime(const Runtime::Function* f, int num_arguments,
1473 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1474 BranchDelaySlot bd = PROTECT);
1475 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
1476 const Runtime::Function* function = Runtime::FunctionForId(fid);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001477 CallRuntime(function, function->nargs, kSaveFPRegs);
1478 }
1479
1480 // Convenience function: Same as above, but takes the fid instead.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001481 void CallRuntime(Runtime::FunctionId fid,
1482 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1483 BranchDelaySlot bd = PROTECT) {
1484 const Runtime::Function* function = Runtime::FunctionForId(fid);
1485 CallRuntime(function, function->nargs, save_doubles, bd);
1486 }
1487
1488 // Convenience function: Same as above, but takes the fid instead.
1489 void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1490 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1491 BranchDelaySlot bd = PROTECT) {
1492 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles, bd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001493 }
1494
1495 // Convenience function: call an external reference.
1496 void CallExternalReference(const ExternalReference& ext,
1497 int num_arguments,
1498 BranchDelaySlot bd = PROTECT);
1499
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001500 // Convenience function: tail call a runtime routine (jump).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001501 void TailCallRuntime(Runtime::FunctionId fid);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001502
1503 int CalculateStackPassedWords(int num_reg_arguments,
1504 int num_double_arguments);
1505
1506 // Before calling a C-function from generated code, align arguments on stack
1507 // and add space for the four mips argument slots.
1508 // After aligning the frame, non-register arguments must be stored on the
1509 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1510 // The argument count assumes all arguments are word sized.
1511 // Some compilers/platforms require the stack to be aligned when calling
1512 // C++ code.
1513 // Needs a scratch register to do some arithmetic. This register will be
1514 // trashed.
1515 void PrepareCallCFunction(int num_reg_arguments,
1516 int num_double_registers,
1517 Register scratch);
1518 void PrepareCallCFunction(int num_reg_arguments,
1519 Register scratch);
1520
1521 // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1522 // Arguments 5..n are stored to stack using following:
1523 // sw(a4, CFunctionArgumentOperand(5));
1524
1525 // Calls a C function and cleans up the space for arguments allocated
1526 // by PrepareCallCFunction. The called function is not allowed to trigger a
1527 // garbage collection, since that might move the code and invalidate the
1528 // return address (unless this is somehow accounted for by the called
1529 // function).
1530 void CallCFunction(ExternalReference function, int num_arguments);
1531 void CallCFunction(Register function, int num_arguments);
1532 void CallCFunction(ExternalReference function,
1533 int num_reg_arguments,
1534 int num_double_arguments);
1535 void CallCFunction(Register function,
1536 int num_reg_arguments,
1537 int num_double_arguments);
1538 void MovFromFloatResult(DoubleRegister dst);
1539 void MovFromFloatParameter(DoubleRegister dst);
1540
1541 // There are two ways of passing double arguments on MIPS, depending on
1542 // whether soft or hard floating point ABI is used. These functions
1543 // abstract parameter passing for the three different ways we call
1544 // C functions from generated code.
1545 void MovToFloatParameter(DoubleRegister src);
1546 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1547 void MovToFloatResult(DoubleRegister src);
1548
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001549 // Jump to the builtin routine.
1550 void JumpToExternalReference(const ExternalReference& builtin,
1551 BranchDelaySlot bd = PROTECT);
1552
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001553 struct Unresolved {
1554 int pc;
1555 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1556 const char* name;
1557 };
1558
1559 Handle<Object> CodeObject() {
1560 DCHECK(!code_object_.is_null());
1561 return code_object_;
1562 }
1563
1564 // Emit code for a truncating division by a constant. The dividend register is
1565 // unchanged and at gets clobbered. Dividend and result must be different.
1566 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1567
1568 // -------------------------------------------------------------------------
1569 // StatsCounter support.
1570
1571 void SetCounter(StatsCounter* counter, int value,
1572 Register scratch1, Register scratch2);
1573 void IncrementCounter(StatsCounter* counter, int value,
1574 Register scratch1, Register scratch2);
1575 void DecrementCounter(StatsCounter* counter, int value,
1576 Register scratch1, Register scratch2);
1577
1578
1579 // -------------------------------------------------------------------------
1580 // Debugging.
1581
1582 // Calls Abort(msg) if the condition cc is not satisfied.
1583 // Use --debug_code to enable.
1584 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1585 void AssertFastElements(Register elements);
1586
1587 // Like Assert(), but always enabled.
1588 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1589
1590 // Print a message to stdout and abort execution.
1591 void Abort(BailoutReason msg);
1592
1593 // Verify restrictions about code generated in stubs.
1594 void set_generating_stub(bool value) { generating_stub_ = value; }
1595 bool generating_stub() { return generating_stub_; }
1596 void set_has_frame(bool value) { has_frame_ = value; }
1597 bool has_frame() { return has_frame_; }
1598 inline bool AllowThisStubCall(CodeStub* stub);
1599
1600 // ---------------------------------------------------------------------------
1601 // Number utilities.
1602
1603 // Check whether the value of reg is a power of two and not zero. If not
1604 // control continues at the label not_power_of_two. If reg is a power of two
1605 // the register scratch contains the value of (reg - 1) when control falls
1606 // through.
1607 void JumpIfNotPowerOfTwoOrZero(Register reg,
1608 Register scratch,
1609 Label* not_power_of_two_or_zero);
1610
1611 // -------------------------------------------------------------------------
1612 // Smi utilities.
1613
1614 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1615 void SmiTagCheckOverflow(Register reg, Register overflow);
1616 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1617
1618 void SmiTag(Register dst, Register src) {
1619 STATIC_ASSERT(kSmiTag == 0);
1620 if (SmiValuesAre32Bits()) {
1621 STATIC_ASSERT(kSmiShift == 32);
1622 dsll32(dst, src, 0);
1623 } else {
1624 Addu(dst, src, src);
1625 }
1626 }
1627
1628 void SmiTag(Register reg) {
1629 SmiTag(reg, reg);
1630 }
1631
1632 // Try to convert int32 to smi. If the value is to large, preserve
1633 // the original value and jump to not_a_smi. Destroys scratch and
1634 // sets flags.
1635 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1636 TrySmiTag(reg, reg, scratch, not_a_smi);
1637 }
1638
1639 void TrySmiTag(Register dst,
1640 Register src,
1641 Register scratch,
1642 Label* not_a_smi) {
1643 if (SmiValuesAre32Bits()) {
1644 SmiTag(dst, src);
1645 } else {
1646 SmiTagCheckOverflow(at, src, scratch);
1647 BranchOnOverflow(not_a_smi, scratch);
1648 mov(dst, at);
1649 }
1650 }
1651
1652 void SmiUntag(Register dst, Register src) {
1653 if (SmiValuesAre32Bits()) {
1654 STATIC_ASSERT(kSmiShift == 32);
1655 dsra32(dst, src, 0);
1656 } else {
1657 sra(dst, src, kSmiTagSize);
1658 }
1659 }
1660
1661 void SmiUntag(Register reg) {
1662 SmiUntag(reg, reg);
1663 }
1664
1665 // Left-shifted from int32 equivalent of Smi.
1666 void SmiScale(Register dst, Register src, int scale) {
1667 if (SmiValuesAre32Bits()) {
1668 // The int portion is upper 32-bits of 64-bit word.
1669 dsra(dst, src, kSmiShift - scale);
1670 } else {
1671 DCHECK(scale >= kSmiTagSize);
1672 sll(dst, src, scale - kSmiTagSize);
1673 }
1674 }
1675
1676 // Combine load with untagging or scaling.
1677 void SmiLoadUntag(Register dst, MemOperand src);
1678
1679 void SmiLoadScale(Register dst, MemOperand src, int scale);
1680
1681 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
1682 void SmiLoadWithScale(Register d_smi,
1683 Register d_scaled,
1684 MemOperand src,
1685 int scale);
1686
1687 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
1688 void SmiLoadUntagWithScale(Register d_int,
1689 Register d_scaled,
1690 MemOperand src,
1691 int scale);
1692
1693
1694 // Test if the register contains a smi.
1695 inline void SmiTst(Register value, Register scratch) {
1696 And(scratch, value, Operand(kSmiTagMask));
1697 }
1698 inline void NonNegativeSmiTst(Register value, Register scratch) {
1699 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1700 }
1701
1702 // Untag the source value into destination and jump if source is a smi.
1703 // Source and destination can be the same register.
1704 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1705
1706 // Untag the source value into destination and jump if source is not a smi.
1707 // Source and destination can be the same register.
1708 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1709
1710 // Jump the register contains a smi.
1711 void JumpIfSmi(Register value,
1712 Label* smi_label,
1713 Register scratch = at,
1714 BranchDelaySlot bd = PROTECT);
1715
1716 // Jump if the register contains a non-smi.
1717 void JumpIfNotSmi(Register value,
1718 Label* not_smi_label,
1719 Register scratch = at,
1720 BranchDelaySlot bd = PROTECT);
1721
1722 // Jump if either of the registers contain a non-smi.
1723 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1724 // Jump if either of the registers contain a smi.
1725 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1726
Ben Murdochda12d292016-06-02 14:46:10 +01001727 // Abort execution if argument is a number, enabled via --debug-code.
1728 void AssertNotNumber(Register object);
1729
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001730 // Abort execution if argument is a smi, enabled via --debug-code.
1731 void AssertNotSmi(Register object);
1732 void AssertSmi(Register object);
1733
1734 // Abort execution if argument is not a string, enabled via --debug-code.
1735 void AssertString(Register object);
1736
1737 // Abort execution if argument is not a name, enabled via --debug-code.
1738 void AssertName(Register object);
1739
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001740 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1741 void AssertFunction(Register object);
1742
1743 // Abort execution if argument is not a JSBoundFunction,
1744 // enabled via --debug-code.
1745 void AssertBoundFunction(Register object);
1746
Ben Murdochc5610432016-08-08 18:44:38 +01001747 // Abort execution if argument is not a JSGeneratorObject,
1748 // enabled via --debug-code.
1749 void AssertGeneratorObject(Register object);
1750
Ben Murdoch097c5b22016-05-18 11:27:45 +01001751 // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
1752 void AssertReceiver(Register object);
1753
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001754 // Abort execution if argument is not undefined or an AllocationSite, enabled
1755 // via --debug-code.
1756 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1757
1758 // Abort execution if reg is not the root value with the given index,
1759 // enabled via --debug-code.
1760 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1761
1762 // ---------------------------------------------------------------------------
1763 // HeapNumber utilities.
1764
1765 void JumpIfNotHeapNumber(Register object,
1766 Register heap_number_map,
1767 Register scratch,
1768 Label* on_not_heap_number);
1769
1770 // -------------------------------------------------------------------------
1771 // String utilities.
1772
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001773 // Checks if both instance types are sequential one-byte strings and jumps to
1774 // label if either is not.
1775 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1776 Register first_object_instance_type, Register second_object_instance_type,
1777 Register scratch1, Register scratch2, Label* failure);
1778
1779 // Check if instance type is sequential one-byte string and jump to label if
1780 // it is not.
1781 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1782 Label* failure);
1783
1784 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1785
1786 void EmitSeqStringSetCharCheck(Register string,
1787 Register index,
1788 Register value,
1789 Register scratch,
1790 uint32_t encoding_mask);
1791
1792 // Checks if both objects are sequential one-byte strings and jumps to label
1793 // if either is not. Assumes that neither object is a smi.
1794 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1795 Register second,
1796 Register scratch1,
1797 Register scratch2,
1798 Label* failure);
1799
1800 // Checks if both objects are sequential one-byte strings and jumps to label
1801 // if either is not.
1802 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1803 Register scratch1,
1804 Register scratch2,
1805 Label* not_flat_one_byte_strings);
1806
1807 void ClampUint8(Register output_reg, Register input_reg);
1808
1809 void ClampDoubleToUint8(Register result_reg,
1810 DoubleRegister input_reg,
1811 DoubleRegister temp_double_reg);
1812
1813
1814 void LoadInstanceDescriptors(Register map, Register descriptors);
1815 void EnumLength(Register dst, Register map);
1816 void NumberOfOwnDescriptors(Register dst, Register map);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001817 void LoadAccessor(Register dst, Register holder, int accessor_index,
1818 AccessorComponent accessor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001819
1820 template<typename Field>
1821 void DecodeField(Register dst, Register src) {
1822 Ext(dst, src, Field::kShift, Field::kSize);
1823 }
1824
1825 template<typename Field>
1826 void DecodeField(Register reg) {
1827 DecodeField<Field>(reg, reg);
1828 }
1829
1830 template<typename Field>
1831 void DecodeFieldToSmi(Register dst, Register src) {
1832 static const int shift = Field::kShift;
1833 static const int mask = Field::kMask >> shift;
1834 dsrl(dst, src, shift);
1835 And(dst, dst, Operand(mask));
1836 dsll32(dst, dst, 0);
1837 }
1838
1839 template<typename Field>
1840 void DecodeFieldToSmi(Register reg) {
1841 DecodeField<Field>(reg, reg);
1842 }
1843 // Generates function and stub prologue code.
Ben Murdochda12d292016-06-02 14:46:10 +01001844 void StubPrologue(StackFrame::Type type);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001845 void Prologue(bool code_pre_aging);
1846
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001847 // Load the type feedback vector from a JavaScript frame.
1848 void EmitLoadTypeFeedbackVector(Register vector);
1849
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001850 // Activation support.
1851 void EnterFrame(StackFrame::Type type);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001852 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001853 void LeaveFrame(StackFrame::Type type);
1854
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001855 // Expects object in a0 and returns map with validated enum cache
1856 // in a0. Assumes that any other register can be used as a scratch.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001857 void CheckEnumCache(Label* call_runtime);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001858
Ben Murdochda12d292016-06-02 14:46:10 +01001859 // AllocationMemento support. Arrays may have an associated AllocationMemento
1860 // object that can be checked for in order to pretransition to another type.
1861 // On entry, receiver_reg should point to the array object. scratch_reg gets
1862 // clobbered. If no info is present jump to no_memento_found, otherwise fall
1863 // through.
1864 void TestJSArrayForAllocationMemento(Register receiver_reg,
1865 Register scratch_reg,
1866 Label* no_memento_found);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001867
1868 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1869 Register scratch_reg,
1870 Label* memento_found) {
1871 Label no_memento_found;
1872 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
Ben Murdochda12d292016-06-02 14:46:10 +01001873 &no_memento_found);
1874 Branch(memento_found);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001875 bind(&no_memento_found);
1876 }
1877
1878 // Jumps to found label if a prototype map has dictionary elements.
1879 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1880 Register scratch1, Label* found);
1881
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001882 bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
1883
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001884 private:
1885 void CallCFunctionHelper(Register function,
1886 int num_reg_arguments,
1887 int num_double_arguments);
1888
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001889 inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
1890 inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
1891 void BranchShortHelperR6(int32_t offset, Label* L);
1892 void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
1893 bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
1894 Register rs, const Operand& rt);
1895 bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
1896 const Operand& rt, BranchDelaySlot bdslot);
1897 bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
1898 const Operand& rt, BranchDelaySlot bdslot);
1899
1900 void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
1901 void BranchAndLinkShortHelper(int16_t offset, Label* L,
1902 BranchDelaySlot bdslot);
1903 void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001904 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001905 bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
1906 Register rs, const Operand& rt);
1907 bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
1908 Register rs, const Operand& rt,
1909 BranchDelaySlot bdslot);
1910 bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
1911 Register rs, const Operand& rt,
1912 BranchDelaySlot bdslot);
1913 void BranchLong(Label* L, BranchDelaySlot bdslot);
1914 void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001915
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001916 // Common implementation of BranchF functions for the different formats.
1917 void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
1918 Condition cc, FPURegister cmp1, FPURegister cmp2,
1919 BranchDelaySlot bd = PROTECT);
1920
1921 void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
1922 FPURegister cmp1, FPURegister cmp2,
1923 BranchDelaySlot bd = PROTECT);
1924
1925
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001926 // Helper functions for generating invokes.
1927 void InvokePrologue(const ParameterCount& expected,
1928 const ParameterCount& actual,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001929 Label* done,
1930 bool* definitely_mismatches,
1931 InvokeFlag flag,
1932 const CallWrapper& call_wrapper);
1933
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001934 void InitializeNewString(Register string,
1935 Register length,
1936 Heap::RootListIndex map_index,
1937 Register scratch1,
1938 Register scratch2);
1939
1940 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001941 void InNewSpace(Register object, Register scratch,
1942 Condition cond, // ne for new space, eq otherwise.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001943 Label* branch);
1944
1945 // Helper for finding the mark bits for an address. Afterwards, the
1946 // bitmap register points at the word with the mark bits and the mask
1947 // the position of the first bit. Leaves addr_reg unchanged.
1948 inline void GetMarkBits(Register addr_reg,
1949 Register bitmap_reg,
1950 Register mask_reg);
1951
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001952 // Compute memory operands for safepoint stack slots.
1953 static int SafepointRegisterStackIndex(int reg_code);
1954 MemOperand SafepointRegisterSlot(Register reg);
1955 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1956
1957 bool generating_stub_;
1958 bool has_frame_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001959 bool has_double_zero_reg_set_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001960 // This handle will be patched with the code object on installation.
1961 Handle<Object> code_object_;
1962
1963 // Needs access to SafepointRegisterStackIndex for compiled frame
1964 // traversal.
1965 friend class StandardFrame;
1966};
1967
1968
1969// The code patcher is used to patch (typically) small parts of code e.g. for
1970// debugging and other types of instrumentation. When using the code patcher
1971// the exact number of bytes specified must be emitted. It is not legal to emit
1972// relocation information. If any of these constraints are violated it causes
1973// an assertion to fail.
1974class CodePatcher {
1975 public:
1976 enum FlushICache {
1977 FLUSH,
1978 DONT_FLUSH
1979 };
1980
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001981 CodePatcher(Isolate* isolate, byte* address, int instructions,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001982 FlushICache flush_cache = FLUSH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001983 ~CodePatcher();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001984
1985 // Macro assembler to emit code.
1986 MacroAssembler* masm() { return &masm_; }
1987
1988 // Emit an instruction directly.
1989 void Emit(Instr instr);
1990
1991 // Emit an address directly.
1992 void Emit(Address addr);
1993
1994 // Change the condition part of an instruction leaving the rest of the current
1995 // instruction unchanged.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001996 void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001997
1998 private:
1999 byte* address_; // The address of the code being patched.
2000 int size_; // Number of bytes of the expected patch size.
2001 MacroAssembler masm_; // Macro assembler used to generate the code.
2002 FlushICache flush_cache_; // Whether to flush the I cache after patching.
2003};
2004
Ben Murdoch097c5b22016-05-18 11:27:45 +01002005template <typename Func>
2006void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
2007 Func GetLabelFunction) {
2008 // Ensure that dd-ed labels following this instruction use 8 bytes aligned
2009 // addresses.
2010 if (kArchVariant >= kMips64r6) {
2011 BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 6);
2012 // Opposite of Align(8) as we have odd number of instructions in this case.
2013 if ((pc_offset() & 7) == 0) {
2014 nop();
2015 }
2016 addiupc(at, 5);
Ben Murdochda12d292016-06-02 14:46:10 +01002017 Dlsa(at, at, index, kPointerSizeLog2);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002018 ld(at, MemOperand(at));
2019 } else {
2020 Label here;
Ben Murdochda12d292016-06-02 14:46:10 +01002021 BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 11);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002022 Align(8);
Ben Murdochda12d292016-06-02 14:46:10 +01002023 push(ra);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002024 bal(&here);
2025 dsll(at, index, kPointerSizeLog2); // Branch delay slot.
2026 bind(&here);
2027 daddu(at, at, ra);
Ben Murdochda12d292016-06-02 14:46:10 +01002028 pop(ra);
2029 ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002030 }
2031 jr(at);
2032 nop(); // Branch delay slot nop.
2033 for (size_t index = 0; index < case_count; ++index) {
2034 dd(GetLabelFunction(index));
2035 }
2036}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002037
2038#ifdef GENERATED_CODE_COVERAGE
2039#define CODE_COVERAGE_STRINGIFY(x) #x
2040#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2041#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2042#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2043#else
2044#define ACCESS_MASM(masm) masm->
2045#endif
2046
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002047} // namespace internal
2048} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002049
2050#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_