blob: 147fb59aaea0a9387bc1d371a411a21f8a593261 [file] [log] [blame]
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2014 the V8 project authors. All rights reserved.
36
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037#include "src/ppc/assembler-ppc.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040038
39#if V8_TARGET_ARCH_PPC
40
41#include "src/base/bits.h"
42#include "src/base/cpu.h"
43#include "src/macro-assembler.h"
44#include "src/ppc/assembler-ppc-inl.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040045
46namespace v8 {
47namespace internal {
48
49// Get the CPU features enabled by the build.
50static unsigned CpuFeaturesImpliedByCompiler() {
51 unsigned answer = 0;
52 return answer;
53}
54
55
56void CpuFeatures::ProbeImpl(bool cross_compile) {
57 supported_ |= CpuFeaturesImpliedByCompiler();
58 cache_line_size_ = 128;
59
60 // Only use statically determined features for cross compile (snapshot).
61 if (cross_compile) return;
62
63// Detect whether frim instruction is supported (POWER5+)
64// For now we will just check for processors we know do not
65// support it
66#ifndef USE_SIMULATOR
67 // Probe for additional features at runtime.
68 base::CPU cpu;
69#if V8_TARGET_ARCH_PPC64
70 if (cpu.part() == base::CPU::PPC_POWER8) {
71 supported_ |= (1u << FPR_GPR_MOV);
72 }
73#endif
74 if (cpu.part() == base::CPU::PPC_POWER6 ||
75 cpu.part() == base::CPU::PPC_POWER7 ||
76 cpu.part() == base::CPU::PPC_POWER8) {
77 supported_ |= (1u << LWSYNC);
78 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000079 if (cpu.part() == base::CPU::PPC_POWER7 ||
80 cpu.part() == base::CPU::PPC_POWER8) {
81 supported_ |= (1u << ISELECT);
82 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040083#if V8_OS_LINUX
84 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
85 // Assume support
86 supported_ |= (1u << FPU);
87 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040088#elif V8_OS_AIX
89 // Assume support FP support and default cache line size
90 supported_ |= (1u << FPU);
91#endif
92#else // Simulator
93 supported_ |= (1u << FPU);
94 supported_ |= (1u << LWSYNC);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000095 supported_ |= (1u << ISELECT);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040096#if V8_TARGET_ARCH_PPC64
97 supported_ |= (1u << FPR_GPR_MOV);
98#endif
99#endif
100}
101
102
103void CpuFeatures::PrintTarget() {
104 const char* ppc_arch = NULL;
105
106#if V8_TARGET_ARCH_PPC64
107 ppc_arch = "ppc64";
108#else
109 ppc_arch = "ppc";
110#endif
111
112 printf("target %s\n", ppc_arch);
113}
114
115
116void CpuFeatures::PrintFeatures() {
117 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
118}
119
120
121Register ToRegister(int num) {
122 DCHECK(num >= 0 && num < kNumRegisters);
123 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
124 r8, r9, r10, r11, ip, r13, r14, r15,
125 r16, r17, r18, r19, r20, r21, r22, r23,
126 r24, r25, r26, r27, r28, r29, r30, fp};
127 return kRegisters[num];
128}
129
130
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400131// -----------------------------------------------------------------------------
132// Implementation of RelocInfo
133
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000134const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
135 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400136
137
138bool RelocInfo::IsCodedSpecially() {
139 // The deserializer needs to know whether a pointer is specially
140 // coded. Being specially coded on PPC means that it is a lis/ori
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000141 // instruction sequence or is a constant pool entry, and these are
142 // always the case inside code objects.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400143 return true;
144}
145
146
147bool RelocInfo::IsInConstantPool() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000148 if (FLAG_enable_embedded_constant_pool) {
149 Address constant_pool = host_->constant_pool();
150 return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400151 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000152 return false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400153}
154
155
156// -----------------------------------------------------------------------------
157// Implementation of Operand and MemOperand
158// See assembler-ppc-inl.h for inlined constructors
159
160Operand::Operand(Handle<Object> handle) {
161 AllowDeferredHandleDereference using_raw_address;
162 rm_ = no_reg;
163 // Verify all Objects referred by code are NOT in new space.
164 Object* obj = *handle;
165 if (obj->IsHeapObject()) {
166 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
167 imm_ = reinterpret_cast<intptr_t>(handle.location());
168 rmode_ = RelocInfo::EMBEDDED_OBJECT;
169 } else {
170 // no relocation needed
171 imm_ = reinterpret_cast<intptr_t>(obj);
172 rmode_ = kRelocInfo_NONEPTR;
173 }
174}
175
176
177MemOperand::MemOperand(Register rn, int32_t offset) {
178 ra_ = rn;
179 rb_ = no_reg;
180 offset_ = offset;
181}
182
183
184MemOperand::MemOperand(Register ra, Register rb) {
185 ra_ = ra;
186 rb_ = rb;
187 offset_ = 0;
188}
189
190
191// -----------------------------------------------------------------------------
192// Specific instructions, constants, and masks.
193
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400194
195Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
196 : AssemblerBase(isolate, buffer, buffer_size),
197 recorded_ast_id_(TypeFeedbackId::None()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000198 constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400199 positions_recorder_(this) {
200 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
201
202 no_trampoline_pool_before_ = 0;
203 trampoline_pool_blocked_nesting_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000204 constant_pool_entry_sharing_blocked_nesting_ = 0;
205 next_trampoline_check_ = kMaxInt;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400206 internal_trampoline_exception_ = false;
207 last_bound_pos_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000208 optimizable_cmpi_pos_ = -1;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400209 trampoline_emitted_ = FLAG_force_long_branches;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000210 tracked_branch_count_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400211 ClearRecordedAstId();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000212 relocations_.reserve(128);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400213}
214
215
216void Assembler::GetCode(CodeDesc* desc) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000217 // Emit constant pool if necessary.
218 int constant_pool_offset = EmitConstantPool();
219
220 EmitRelocations();
221
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400222 // Set up code descriptor.
223 desc->buffer = buffer_;
224 desc->buffer_size = buffer_size_;
225 desc->instr_size = pc_offset();
226 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000227 desc->constant_pool_size =
228 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400229 desc->origin = this;
230}
231
232
233void Assembler::Align(int m) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400234 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000235 DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400236 while ((pc_offset() & (m - 1)) != 0) {
237 nop();
238 }
239}
240
241
242void Assembler::CodeTargetAlign() { Align(8); }
243
244
245Condition Assembler::GetCondition(Instr instr) {
246 switch (instr & kCondMask) {
247 case BT:
248 return eq;
249 case BF:
250 return ne;
251 default:
252 UNIMPLEMENTED();
253 }
254 return al;
255}
256
257
258bool Assembler::IsLis(Instr instr) {
259 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
260}
261
262
263bool Assembler::IsLi(Instr instr) {
264 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
265}
266
267
268bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
269
270
271bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
272
273
274bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
275
276
277Register Assembler::GetRA(Instr instr) {
278 Register reg;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000279 reg.reg_code = Instruction::RAValue(instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400280 return reg;
281}
282
283
284Register Assembler::GetRB(Instr instr) {
285 Register reg;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000286 reg.reg_code = Instruction::RBValue(instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400287 return reg;
288}
289
290
291#if V8_TARGET_ARCH_PPC64
292// This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
293bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
294 Instr instr4, Instr instr5) {
295 // Check the instructions are indeed a five part load (into r12)
296 // 3d800000 lis r12, 0
297 // 618c0000 ori r12, r12, 0
298 // 798c07c6 rldicr r12, r12, 32, 31
299 // 658c00c3 oris r12, r12, 195
300 // 618ccd40 ori r12, r12, 52544
301 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
302 (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
303 ((instr5 >> 16) == 0x618c));
304}
305#else
306// This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
307bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
308 // Check the instruction is indeed a two part load (into r12)
309 // 3d802553 lis r12, 9555
310 // 618c5000 ori r12, r12, 20480
311 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
312}
313#endif
314
315
316bool Assembler::IsCmpRegister(Instr instr) {
317 return (((instr & kOpcodeMask) == EXT2) &&
318 ((instr & kExt2OpcodeMask) == CMP));
319}
320
321
322bool Assembler::IsRlwinm(Instr instr) {
323 return ((instr & kOpcodeMask) == RLWINMX);
324}
325
326
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000327bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
328
329
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400330#if V8_TARGET_ARCH_PPC64
331bool Assembler::IsRldicl(Instr instr) {
332 return (((instr & kOpcodeMask) == EXT5) &&
333 ((instr & kExt5OpcodeMask) == RLDICL));
334}
335#endif
336
337
338bool Assembler::IsCmpImmediate(Instr instr) {
339 return ((instr & kOpcodeMask) == CMPI);
340}
341
342
343bool Assembler::IsCrSet(Instr instr) {
344 return (((instr & kOpcodeMask) == EXT1) &&
345 ((instr & kExt1OpcodeMask) == CREQV));
346}
347
348
349Register Assembler::GetCmpImmediateRegister(Instr instr) {
350 DCHECK(IsCmpImmediate(instr));
351 return GetRA(instr);
352}
353
354
355int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
356 DCHECK(IsCmpImmediate(instr));
357 return instr & kOff16Mask;
358}
359
360
361// Labels refer to positions in the (to be) generated code.
362// There are bound, linked, and unused labels.
363//
364// Bound labels refer to known positions in the already
365// generated code. pos() is the position the label refers to.
366//
367// Linked labels refer to unknown positions in the code
368// to be generated; pos() is the position of the last
369// instruction using the label.
370
371
372// The link chain is terminated by a negative code position (must be aligned)
373const int kEndOfChain = -4;
374
375
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000376// Dummy opcodes for unbound label mov instructions or jump table entries.
377enum {
378 kUnboundMovLabelOffsetOpcode = 0 << 26,
379 kUnboundAddLabelOffsetOpcode = 1 << 26,
380 kUnboundMovLabelAddrOpcode = 2 << 26,
381 kUnboundJumpTableEntryOpcode = 3 << 26
382};
383
384
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400385int Assembler::target_at(int pos) {
386 Instr instr = instr_at(pos);
387 // check which type of branch this is 16 or 26 bit offset
388 int opcode = instr & kOpcodeMask;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000389 int link;
390 switch (opcode) {
391 case BX:
392 link = SIGN_EXT_IMM26(instr & kImm26Mask);
393 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
394 break;
395 case BCX:
396 link = SIGN_EXT_IMM16((instr & kImm16Mask));
397 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
398 break;
399 case kUnboundMovLabelOffsetOpcode:
400 case kUnboundAddLabelOffsetOpcode:
401 case kUnboundMovLabelAddrOpcode:
402 case kUnboundJumpTableEntryOpcode:
403 link = SIGN_EXT_IMM26(instr & kImm26Mask);
404 link <<= 2;
405 break;
406 default:
407 DCHECK(false);
408 return -1;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400409 }
410
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000411 if (link == 0) return kEndOfChain;
412 return pos + link;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400413}
414
415
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000416void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400417 Instr instr = instr_at(pos);
418 int opcode = instr & kOpcodeMask;
419
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000420 if (is_branch != nullptr) {
421 *is_branch = (opcode == BX || opcode == BCX);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400422 }
423
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000424 switch (opcode) {
425 case BX: {
426 int imm26 = target_pos - pos;
427 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
428 if (imm26 == kInstrSize && !(instr & kLKMask)) {
429 // Branch to next instr without link.
430 instr = ORI; // nop: ori, 0,0,0
431 } else {
432 instr &= ((~kImm26Mask) | kAAMask | kLKMask);
433 instr |= (imm26 & kImm26Mask);
434 }
435 instr_at_put(pos, instr);
436 break;
437 }
438 case BCX: {
439 int imm16 = target_pos - pos;
440 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
441 if (imm16 == kInstrSize && !(instr & kLKMask)) {
442 // Branch to next instr without link.
443 instr = ORI; // nop: ori, 0,0,0
444 } else {
445 instr &= ((~kImm16Mask) | kAAMask | kLKMask);
446 instr |= (imm16 & kImm16Mask);
447 }
448 instr_at_put(pos, instr);
449 break;
450 }
451 case kUnboundMovLabelOffsetOpcode: {
452 // Load the position of the label relative to the generated code object
453 // pointer in a register.
454 Register dst = Register::from_code(instr_at(pos + kInstrSize));
455 int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
456 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
457 CodePatcher::DONT_FLUSH);
458 patcher.masm()->bitwise_mov32(dst, offset);
459 break;
460 }
461 case kUnboundAddLabelOffsetOpcode: {
462 // dst = base + position + immediate
463 Instr operands = instr_at(pos + kInstrSize);
464 Register dst = Register::from_code((operands >> 21) & 0x1f);
465 Register base = Register::from_code((operands >> 16) & 0x1f);
466 int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
467 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
468 CodePatcher::DONT_FLUSH);
469 patcher.masm()->bitwise_add32(dst, base, offset);
470 break;
471 }
472 case kUnboundMovLabelAddrOpcode: {
473 // Load the address of the label in a register.
474 Register dst = Register::from_code(instr_at(pos + kInstrSize));
475 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
476 kMovInstructionsNoConstantPool,
477 CodePatcher::DONT_FLUSH);
478 // Keep internal references relative until EmitRelocations.
479 patcher.masm()->bitwise_mov(dst, target_pos);
480 break;
481 }
482 case kUnboundJumpTableEntryOpcode: {
483 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
484 kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
485 // Keep internal references relative until EmitRelocations.
486 patcher.masm()->dp(target_pos);
487 break;
488 }
489 default:
490 DCHECK(false);
491 break;
492 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400493}
494
495
496int Assembler::max_reach_from(int pos) {
497 Instr instr = instr_at(pos);
498 int opcode = instr & kOpcodeMask;
499
500 // check which type of branch this is 16 or 26 bit offset
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000501 switch (opcode) {
502 case BX:
503 return 26;
504 case BCX:
505 return 16;
506 case kUnboundMovLabelOffsetOpcode:
507 case kUnboundAddLabelOffsetOpcode:
508 case kUnboundMovLabelAddrOpcode:
509 case kUnboundJumpTableEntryOpcode:
510 return 0; // no limit on reach
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400511 }
512
513 DCHECK(false);
514 return 0;
515}
516
517
518void Assembler::bind_to(Label* L, int pos) {
519 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
520 int32_t trampoline_pos = kInvalidSlotPos;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000521 bool is_branch = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400522 while (L->is_linked()) {
523 int fixup_pos = L->pos();
524 int32_t offset = pos - fixup_pos;
525 int maxReach = max_reach_from(fixup_pos);
526 next(L); // call next before overwriting link with target at fixup_pos
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000527 if (maxReach && is_intn(offset, maxReach) == false) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400528 if (trampoline_pos == kInvalidSlotPos) {
529 trampoline_pos = get_trampoline_entry();
530 CHECK(trampoline_pos != kInvalidSlotPos);
531 target_at_put(trampoline_pos, pos);
532 }
533 target_at_put(fixup_pos, trampoline_pos);
534 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000535 target_at_put(fixup_pos, pos, &is_branch);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400536 }
537 }
538 L->bind_to(pos);
539
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000540 if (!trampoline_emitted_ && is_branch) {
541 UntrackBranch();
542 }
543
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400544 // Keep track of the last bound label so we don't eliminate any instructions
545 // before a bound label.
546 if (pos > last_bound_pos_) last_bound_pos_ = pos;
547}
548
549
550void Assembler::bind(Label* L) {
551 DCHECK(!L->is_bound()); // label can only be bound once
552 bind_to(L, pc_offset());
553}
554
555
556void Assembler::next(Label* L) {
557 DCHECK(L->is_linked());
558 int link = target_at(L->pos());
559 if (link == kEndOfChain) {
560 L->Unuse();
561 } else {
562 DCHECK(link >= 0);
563 L->link_to(link);
564 }
565}
566
567
568bool Assembler::is_near(Label* L, Condition cond) {
569 DCHECK(L->is_bound());
570 if (L->is_bound() == false) return false;
571
572 int maxReach = ((cond == al) ? 26 : 16);
573 int offset = L->pos() - pc_offset();
574
575 return is_intn(offset, maxReach);
576}
577
578
579void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
580 DoubleRegister frb, RCBit r) {
581 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
582}
583
584
585void Assembler::d_form(Instr instr, Register rt, Register ra,
586 const intptr_t val, bool signed_disp) {
587 if (signed_disp) {
588 if (!is_int16(val)) {
589 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
590 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000591 CHECK(is_int16(val));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400592 } else {
593 if (!is_uint16(val)) {
594 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
595 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
596 val, val, is_uint16(val), kImm16Mask);
597 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000598 CHECK(is_uint16(val));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400599 }
600 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
601}
602
603
604void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
605 RCBit r) {
606 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
607}
608
609
610void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
611 OEBit o, RCBit r) {
612 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
613}
614
615
616void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
617 int maskbit, RCBit r) {
618 int sh0_4 = shift & 0x1f;
619 int sh5 = (shift >> 5) & 0x1;
620 int m0_4 = maskbit & 0x1f;
621 int m5 = (maskbit >> 5) & 0x1;
622
623 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
624 m5 * B5 | sh5 * B1 | r);
625}
626
627
628void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
629 int maskbit, RCBit r) {
630 int m0_4 = maskbit & 0x1f;
631 int m5 = (maskbit >> 5) & 0x1;
632
633 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
634 m5 * B5 | r);
635}
636
637
638// Returns the next free trampoline entry.
639int32_t Assembler::get_trampoline_entry() {
640 int32_t trampoline_entry = kInvalidSlotPos;
641
642 if (!internal_trampoline_exception_) {
643 trampoline_entry = trampoline_.take_slot();
644
645 if (kInvalidSlotPos == trampoline_entry) {
646 internal_trampoline_exception_ = true;
647 }
648 }
649 return trampoline_entry;
650}
651
652
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000653int Assembler::link(Label* L) {
654 int position;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400655 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000656 position = L->pos();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400657 } else {
658 if (L->is_linked()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000659 position = L->pos(); // L's link
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400660 } else {
661 // was: target_pos = kEndOfChain;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000662 // However, using self to mark the first reference
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400663 // should avoid most instances of branch offset overflow. See
664 // target_at() for where this is converted back to kEndOfChain.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000665 position = pc_offset();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400666 }
667 L->link_to(pc_offset());
668 }
669
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000670 return position;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400671}
672
673
674// Branch instructions.
675
676
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000677void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400678 positions_recorder()->WriteRecordedPositions();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000679 emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400680}
681
682
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000683void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400684 positions_recorder()->WriteRecordedPositions();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000685 emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400686}
687
688
689// Pseudo op - branch to link register
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000690void Assembler::blr() { bclr(BA, 0, LeaveLK); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400691
692
693// Pseudo op - branch to count register -- used for "jump"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000694void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400695
696
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000697void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400698
699
700void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
701 if (lk == SetLK) {
702 positions_recorder()->WriteRecordedPositions();
703 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000704 int imm16 = branch_offset;
705 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
706 emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400707}
708
709
710void Assembler::b(int branch_offset, LKBit lk) {
711 if (lk == SetLK) {
712 positions_recorder()->WriteRecordedPositions();
713 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400714 int imm26 = branch_offset;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000715 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400716 emit(BX | (imm26 & kImm26Mask) | lk);
717}
718
719
720void Assembler::xori(Register dst, Register src, const Operand& imm) {
721 d_form(XORI, src, dst, imm.imm_, false);
722}
723
724
725void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
726 d_form(XORIS, rs, ra, imm.imm_, false);
727}
728
729
730void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
731 x_form(EXT2 | XORX, dst, src1, src2, rc);
732}
733
734
735void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
736 x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
737}
738
739
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000740void Assembler::popcntw(Register ra, Register rs) {
741 emit(EXT2 | POPCNTW | rs.code() * B21 | ra.code() * B16);
742}
743
744
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400745void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
746 x_form(EXT2 | ANDX, ra, rs, rb, rc);
747}
748
749
750void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
751 RCBit rc) {
752 sh &= 0x1f;
753 mb &= 0x1f;
754 me &= 0x1f;
755 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
756 me << 1 | rc);
757}
758
759
760void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
761 RCBit rc) {
762 mb &= 0x1f;
763 me &= 0x1f;
764 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
765 me << 1 | rc);
766}
767
768
769void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
770 RCBit rc) {
771 sh &= 0x1f;
772 mb &= 0x1f;
773 me &= 0x1f;
774 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
775 me << 1 | rc);
776}
777
778
779void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
780 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
781 rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
782}
783
784
785void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
786 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
787 rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
788}
789
790
791void Assembler::clrrwi(Register dst, Register src, const Operand& val,
792 RCBit rc) {
793 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
794 rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
795}
796
797
798void Assembler::clrlwi(Register dst, Register src, const Operand& val,
799 RCBit rc) {
800 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
801 rlwinm(dst, src, 0, val.imm_, 31, rc);
802}
803
804
805void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
806 emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
807}
808
809
810void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
811 x_form(EXT2 | SRWX, dst, src1, src2, r);
812}
813
814
815void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
816 x_form(EXT2 | SLWX, dst, src1, src2, r);
817}
818
819
820void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
821 x_form(EXT2 | SRAW, ra, rs, rb, r);
822}
823
824
825void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
826 rlwnm(ra, rs, rb, 0, 31, r);
827}
828
829
830void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
831 rlwinm(ra, rs, sh, 0, 31, r);
832}
833
834
835void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
836 rlwinm(ra, rs, 32 - sh, 0, 31, r);
837}
838
839
840void Assembler::subi(Register dst, Register src, const Operand& imm) {
841 addi(dst, src, Operand(-(imm.imm_)));
842}
843
844void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
845 RCBit r) {
846 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
847}
848
849
850void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
851 // a special xo_form
852 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
853}
854
855
856void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
857 RCBit r) {
858 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
859}
860
861
862void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o,
863 RCBit r) {
864 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
865}
866
867
868void Assembler::subfic(Register dst, Register src, const Operand& imm) {
869 d_form(SUBFIC, dst, src, imm.imm_, true);
870}
871
872
873void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
874 RCBit r) {
875 xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
876}
877
878
879// Multiply low word
880void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
881 RCBit r) {
882 xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
883}
884
885
886// Multiply hi word
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000887void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
888 xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
889}
890
891
892// Multiply hi word unsigned
893void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
894 xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400895}
896
897
898// Divide word
899void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
900 RCBit r) {
901 xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
902}
903
904
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000905// Divide word unsigned
906void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
907 RCBit r) {
908 xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
909}
910
911
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400912void Assembler::addi(Register dst, Register src, const Operand& imm) {
913 DCHECK(!src.is(r0)); // use li instead to show intent
914 d_form(ADDI, dst, src, imm.imm_, true);
915}
916
917
918void Assembler::addis(Register dst, Register src, const Operand& imm) {
919 DCHECK(!src.is(r0)); // use lis instead to show intent
920 d_form(ADDIS, dst, src, imm.imm_, true);
921}
922
923
924void Assembler::addic(Register dst, Register src, const Operand& imm) {
925 d_form(ADDIC, dst, src, imm.imm_, true);
926}
927
928
929void Assembler::andi(Register ra, Register rs, const Operand& imm) {
930 d_form(ANDIx, rs, ra, imm.imm_, false);
931}
932
933
934void Assembler::andis(Register ra, Register rs, const Operand& imm) {
935 d_form(ANDISx, rs, ra, imm.imm_, false);
936}
937
938
939void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
940 x_form(EXT2 | NORX, dst, src1, src2, r);
941}
942
943
944void Assembler::notx(Register dst, Register src, RCBit r) {
945 x_form(EXT2 | NORX, dst, src, src, r);
946}
947
948
949void Assembler::ori(Register ra, Register rs, const Operand& imm) {
950 d_form(ORI, rs, ra, imm.imm_, false);
951}
952
953
954void Assembler::oris(Register dst, Register src, const Operand& imm) {
955 d_form(ORIS, src, dst, imm.imm_, false);
956}
957
958
959void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
960 x_form(EXT2 | ORX, dst, src1, src2, rc);
961}
962
963
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000964void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
965 x_form(EXT2 | ORC, dst, src1, src2, rc);
966}
967
968
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400969void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
970 intptr_t imm16 = src2.imm_;
971#if V8_TARGET_ARCH_PPC64
972 int L = 1;
973#else
974 int L = 0;
975#endif
976 DCHECK(is_int16(imm16));
977 DCHECK(cr.code() >= 0 && cr.code() <= 7);
978 imm16 &= kImm16Mask;
979 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
980}
981
982
983void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
984 uintptr_t uimm16 = src2.imm_;
985#if V8_TARGET_ARCH_PPC64
986 int L = 1;
987#else
988 int L = 0;
989#endif
990 DCHECK(is_uint16(uimm16));
991 DCHECK(cr.code() >= 0 && cr.code() <= 7);
992 uimm16 &= kImm16Mask;
993 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
994}
995
996
997void Assembler::cmp(Register src1, Register src2, CRegister cr) {
998#if V8_TARGET_ARCH_PPC64
999 int L = 1;
1000#else
1001 int L = 0;
1002#endif
1003 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1004 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
1005 src2.code() * B11);
1006}
1007
1008
1009void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
1010#if V8_TARGET_ARCH_PPC64
1011 int L = 1;
1012#else
1013 int L = 0;
1014#endif
1015 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1016 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
1017 src2.code() * B11);
1018}
1019
1020
1021void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
1022 intptr_t imm16 = src2.imm_;
1023 int L = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001024 int pos = pc_offset();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001025 DCHECK(is_int16(imm16));
1026 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1027 imm16 &= kImm16Mask;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001028
1029 // For cmpwi against 0, save postition and cr for later examination
1030 // of potential optimization.
1031 if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
1032 optimizable_cmpi_pos_ = pos;
1033 cmpi_cr_ = cr;
1034 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001035 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
1036}
1037
1038
1039void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
1040 uintptr_t uimm16 = src2.imm_;
1041 int L = 0;
1042 DCHECK(is_uint16(uimm16));
1043 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1044 uimm16 &= kImm16Mask;
1045 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
1046}
1047
1048
1049void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
1050 int L = 0;
1051 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1052 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
1053 src2.code() * B11);
1054}
1055
1056
1057void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
1058 int L = 0;
1059 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1060 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
1061 src2.code() * B11);
1062}
1063
1064
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001065void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
1066 emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1067 cb * B6);
1068}
1069
1070
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001071// Pseudo op - load immediate
1072void Assembler::li(Register dst, const Operand& imm) {
1073 d_form(ADDI, dst, r0, imm.imm_, true);
1074}
1075
1076
1077void Assembler::lis(Register dst, const Operand& imm) {
1078 d_form(ADDIS, dst, r0, imm.imm_, true);
1079}
1080
1081
1082// Pseudo op - move register
1083void Assembler::mr(Register dst, Register src) {
1084 // actually or(dst, src, src)
1085 orx(dst, src, src);
1086}
1087
1088
1089void Assembler::lbz(Register dst, const MemOperand& src) {
1090 DCHECK(!src.ra_.is(r0));
1091 d_form(LBZ, dst, src.ra(), src.offset(), true);
1092}
1093
1094
1095void Assembler::lbzx(Register rt, const MemOperand& src) {
1096 Register ra = src.ra();
1097 Register rb = src.rb();
1098 DCHECK(!ra.is(r0));
1099 emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1100 LeaveRC);
1101}
1102
1103
1104void Assembler::lbzux(Register rt, const MemOperand& src) {
1105 Register ra = src.ra();
1106 Register rb = src.rb();
1107 DCHECK(!ra.is(r0));
1108 emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1109 LeaveRC);
1110}
1111
1112
1113void Assembler::lhz(Register dst, const MemOperand& src) {
1114 DCHECK(!src.ra_.is(r0));
1115 d_form(LHZ, dst, src.ra(), src.offset(), true);
1116}
1117
1118
1119void Assembler::lhzx(Register rt, const MemOperand& src) {
1120 Register ra = src.ra();
1121 Register rb = src.rb();
1122 DCHECK(!ra.is(r0));
1123 emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1124 LeaveRC);
1125}
1126
1127
1128void Assembler::lhzux(Register rt, const MemOperand& src) {
1129 Register ra = src.ra();
1130 Register rb = src.rb();
1131 DCHECK(!ra.is(r0));
1132 emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1133 LeaveRC);
1134}
1135
1136
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001137void Assembler::lhax(Register rt, const MemOperand& src) {
1138 Register ra = src.ra();
1139 Register rb = src.rb();
1140 DCHECK(!ra.is(r0));
1141 emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
1142}
1143
1144
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001145void Assembler::lwz(Register dst, const MemOperand& src) {
1146 DCHECK(!src.ra_.is(r0));
1147 d_form(LWZ, dst, src.ra(), src.offset(), true);
1148}
1149
1150
1151void Assembler::lwzu(Register dst, const MemOperand& src) {
1152 DCHECK(!src.ra_.is(r0));
1153 d_form(LWZU, dst, src.ra(), src.offset(), true);
1154}
1155
1156
1157void Assembler::lwzx(Register rt, const MemOperand& src) {
1158 Register ra = src.ra();
1159 Register rb = src.rb();
1160 DCHECK(!ra.is(r0));
1161 emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1162 LeaveRC);
1163}
1164
1165
1166void Assembler::lwzux(Register rt, const MemOperand& src) {
1167 Register ra = src.ra();
1168 Register rb = src.rb();
1169 DCHECK(!ra.is(r0));
1170 emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1171 LeaveRC);
1172}
1173
1174
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001175void Assembler::lha(Register dst, const MemOperand& src) {
1176 DCHECK(!src.ra_.is(r0));
1177 d_form(LHA, dst, src.ra(), src.offset(), true);
1178}
1179
1180
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001181void Assembler::lwa(Register dst, const MemOperand& src) {
1182#if V8_TARGET_ARCH_PPC64
1183 int offset = src.offset();
1184 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001185 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001186 offset = kImm16Mask & offset;
1187 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
1188#else
1189 lwz(dst, src);
1190#endif
1191}
1192
1193
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001194void Assembler::lwax(Register rt, const MemOperand& src) {
1195#if V8_TARGET_ARCH_PPC64
1196 Register ra = src.ra();
1197 Register rb = src.rb();
1198 DCHECK(!ra.is(r0));
1199 emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
1200#else
1201 lwzx(rt, src);
1202#endif
1203}
1204
1205
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001206void Assembler::stb(Register dst, const MemOperand& src) {
1207 DCHECK(!src.ra_.is(r0));
1208 d_form(STB, dst, src.ra(), src.offset(), true);
1209}
1210
1211
1212void Assembler::stbx(Register rs, const MemOperand& src) {
1213 Register ra = src.ra();
1214 Register rb = src.rb();
1215 DCHECK(!ra.is(r0));
1216 emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1217 LeaveRC);
1218}
1219
1220
1221void Assembler::stbux(Register rs, const MemOperand& src) {
1222 Register ra = src.ra();
1223 Register rb = src.rb();
1224 DCHECK(!ra.is(r0));
1225 emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1226 LeaveRC);
1227}
1228
1229
1230void Assembler::sth(Register dst, const MemOperand& src) {
1231 DCHECK(!src.ra_.is(r0));
1232 d_form(STH, dst, src.ra(), src.offset(), true);
1233}
1234
1235
1236void Assembler::sthx(Register rs, const MemOperand& src) {
1237 Register ra = src.ra();
1238 Register rb = src.rb();
1239 DCHECK(!ra.is(r0));
1240 emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1241 LeaveRC);
1242}
1243
1244
1245void Assembler::sthux(Register rs, const MemOperand& src) {
1246 Register ra = src.ra();
1247 Register rb = src.rb();
1248 DCHECK(!ra.is(r0));
1249 emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1250 LeaveRC);
1251}
1252
1253
1254void Assembler::stw(Register dst, const MemOperand& src) {
1255 DCHECK(!src.ra_.is(r0));
1256 d_form(STW, dst, src.ra(), src.offset(), true);
1257}
1258
1259
1260void Assembler::stwu(Register dst, const MemOperand& src) {
1261 DCHECK(!src.ra_.is(r0));
1262 d_form(STWU, dst, src.ra(), src.offset(), true);
1263}
1264
1265
1266void Assembler::stwx(Register rs, const MemOperand& src) {
1267 Register ra = src.ra();
1268 Register rb = src.rb();
1269 DCHECK(!ra.is(r0));
1270 emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1271 LeaveRC);
1272}
1273
1274
1275void Assembler::stwux(Register rs, const MemOperand& src) {
1276 Register ra = src.ra();
1277 Register rb = src.rb();
1278 DCHECK(!ra.is(r0));
1279 emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1280 LeaveRC);
1281}
1282
1283
1284void Assembler::extsb(Register rs, Register ra, RCBit rc) {
1285 emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
1286}
1287
1288
1289void Assembler::extsh(Register rs, Register ra, RCBit rc) {
1290 emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
1291}
1292
1293
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001294void Assembler::extsw(Register rs, Register ra, RCBit rc) {
1295#if V8_TARGET_ARCH_PPC64
1296 emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
1297#else
1298 // nop on 32-bit
1299 DCHECK(rs.is(ra) && rc == LeaveRC);
1300#endif
1301}
1302
1303
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001304void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1305 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1306}
1307
1308
1309void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
1310 x_form(EXT2 | ANDCX, dst, src1, src2, rc);
1311}
1312
1313
1314#if V8_TARGET_ARCH_PPC64
1315// 64bit specific instructions
1316void Assembler::ld(Register rd, const MemOperand& src) {
1317 int offset = src.offset();
1318 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001319 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001320 offset = kImm16Mask & offset;
1321 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1322}
1323
1324
1325void Assembler::ldx(Register rd, const MemOperand& src) {
1326 Register ra = src.ra();
1327 Register rb = src.rb();
1328 DCHECK(!ra.is(r0));
1329 emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1330}
1331
1332
1333void Assembler::ldu(Register rd, const MemOperand& src) {
1334 int offset = src.offset();
1335 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001336 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001337 offset = kImm16Mask & offset;
1338 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1339}
1340
1341
1342void Assembler::ldux(Register rd, const MemOperand& src) {
1343 Register ra = src.ra();
1344 Register rb = src.rb();
1345 DCHECK(!ra.is(r0));
1346 emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1347}
1348
1349
1350void Assembler::std(Register rs, const MemOperand& src) {
1351 int offset = src.offset();
1352 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001353 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001354 offset = kImm16Mask & offset;
1355 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1356}
1357
1358
1359void Assembler::stdx(Register rs, const MemOperand& src) {
1360 Register ra = src.ra();
1361 Register rb = src.rb();
1362 DCHECK(!ra.is(r0));
1363 emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1364}
1365
1366
1367void Assembler::stdu(Register rs, const MemOperand& src) {
1368 int offset = src.offset();
1369 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001370 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001371 offset = kImm16Mask & offset;
1372 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1373}
1374
1375
1376void Assembler::stdux(Register rs, const MemOperand& src) {
1377 Register ra = src.ra();
1378 Register rb = src.rb();
1379 DCHECK(!ra.is(r0));
1380 emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1381}
1382
1383
1384void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1385 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1386}
1387
1388
1389void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1390 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1391}
1392
1393
1394void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1395 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1396}
1397
1398
1399void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1400 md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1401}
1402
1403
1404void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1405 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1406 rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
1407}
1408
1409
1410void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1411 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1412 rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
1413}
1414
1415
1416void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1417 RCBit rc) {
1418 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1419 rldicr(dst, src, 0, 63 - val.imm_, rc);
1420}
1421
1422
1423void Assembler::clrldi(Register dst, Register src, const Operand& val,
1424 RCBit rc) {
1425 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1426 rldicl(dst, src, 0, val.imm_, rc);
1427}
1428
1429
1430void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1431 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1432}
1433
1434
1435void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1436 int sh0_4 = sh & 0x1f;
1437 int sh5 = (sh >> 5) & 0x1;
1438
1439 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1440 sh5 * B1 | r);
1441}
1442
1443
1444void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
1445 x_form(EXT2 | SRDX, dst, src1, src2, r);
1446}
1447
1448
1449void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
1450 x_form(EXT2 | SLDX, dst, src1, src2, r);
1451}
1452
1453
1454void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
1455 x_form(EXT2 | SRAD, ra, rs, rb, r);
1456}
1457
1458
1459void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1460 rldcl(ra, rs, rb, 0, r);
1461}
1462
1463
1464void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1465 rldicl(ra, rs, sh, 0, r);
1466}
1467
1468
1469void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1470 rldicl(ra, rs, 64 - sh, 0, r);
1471}
1472
1473
1474void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
1475 x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
1476}
1477
1478
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001479void Assembler::popcntd(Register ra, Register rs) {
1480 emit(EXT2 | POPCNTD | rs.code() * B21 | ra.code() * B16);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001481}
1482
1483
1484void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1485 RCBit r) {
1486 xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1487}
1488
1489
1490void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1491 RCBit r) {
1492 xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1493}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001494
1495
1496void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1497 RCBit r) {
1498 xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1499}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001500#endif
1501
1502
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001503// Function descriptor for AIX.
1504// Code address skips the function descriptor "header".
1505// TOC and static chain are ignored and set to 0.
1506void Assembler::function_descriptor() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001507#if ABI_USES_FUNCTION_DESCRIPTORS
1508 Label instructions;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001509 DCHECK(pc_offset() == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001510 emit_label_addr(&instructions);
1511 dp(0);
1512 dp(0);
1513 bind(&instructions);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001514#endif
1515}
1516
1517
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001518int Assembler::instructions_required_for_mov(Register dst,
1519 const Operand& src) const {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001520 bool canOptimize =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001521 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1522 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1523 if (ConstantPoolAccessIsInOverflow()) {
1524 return kMovInstructionsConstantPool + 1;
1525 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001526 return kMovInstructionsConstantPool;
1527 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001528 DCHECK(!canOptimize);
1529 return kMovInstructionsNoConstantPool;
1530}
1531
1532
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001533bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001534 bool canOptimize) const {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001535 if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001536 // If there is no constant pool available, we must use a mov
1537 // immediate sequence.
1538 return false;
1539 }
1540
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001541 intptr_t value = src.immediate();
1542#if V8_TARGET_ARCH_PPC64
1543 bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
1544#else
1545 bool allowOverflow = !(canOptimize || dst.is(r0));
1546#endif
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001547 if (canOptimize && is_int16(value)) {
1548 // Prefer a single-instruction load-immediate.
1549 return false;
1550 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001551 if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1552 // Prefer non-relocatable two-instruction bitwise-mov32 over
1553 // overflow sequence.
1554 return false;
1555 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001556
1557 return true;
1558}
1559
1560
1561void Assembler::EnsureSpaceFor(int space_needed) {
1562 if (buffer_space() <= (kGap + space_needed)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001563 GrowBuffer(space_needed);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001564 }
1565}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001566
1567
1568bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1569 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1570 if (assembler != NULL && assembler->predictable_code_size()) return true;
1571 return assembler->serializer_enabled();
1572 } else if (RelocInfo::IsNone(rmode_)) {
1573 return false;
1574 }
1575 return true;
1576}
1577
1578
1579// Primarily used for loading constants
1580// This should really move to be in macro-assembler as it
1581// is really a pseudo instruction
1582// Some usages of this intend for a FIXED_SEQUENCE to be used
1583// Todo - break this dependency so we can optimize mov() in general
1584// and only use the generic version when we require a fixed sequence
1585void Assembler::mov(Register dst, const Operand& src) {
1586 intptr_t value = src.immediate();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001587 bool relocatable = src.must_output_reloc_info(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001588 bool canOptimize;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001589
1590 canOptimize =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001591 !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001592
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001593 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1594 DCHECK(is_constant_pool_available());
1595 if (relocatable) {
1596 RecordRelocInfo(src.rmode_);
1597 }
1598 ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001599#if V8_TARGET_ARCH_PPC64
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001600 if (access == ConstantPoolEntry::OVERFLOWED) {
1601 addis(dst, kConstantPoolRegister, Operand::Zero());
1602 ld(dst, MemOperand(dst, 0));
1603 } else {
1604 ld(dst, MemOperand(kConstantPoolRegister, 0));
1605 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001606#else
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001607 if (access == ConstantPoolEntry::OVERFLOWED) {
1608 addis(dst, kConstantPoolRegister, Operand::Zero());
1609 lwz(dst, MemOperand(dst, 0));
1610 } else {
1611 lwz(dst, MemOperand(kConstantPoolRegister, 0));
1612 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001613#endif
1614 return;
1615 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001616
1617 if (canOptimize) {
1618 if (is_int16(value)) {
1619 li(dst, Operand(value));
1620 } else {
1621 uint16_t u16;
1622#if V8_TARGET_ARCH_PPC64
1623 if (is_int32(value)) {
1624#endif
1625 lis(dst, Operand(value >> 16));
1626#if V8_TARGET_ARCH_PPC64
1627 } else {
1628 if (is_int48(value)) {
1629 li(dst, Operand(value >> 32));
1630 } else {
1631 lis(dst, Operand(value >> 48));
1632 u16 = ((value >> 32) & 0xffff);
1633 if (u16) {
1634 ori(dst, dst, Operand(u16));
1635 }
1636 }
1637 sldi(dst, dst, Operand(32));
1638 u16 = ((value >> 16) & 0xffff);
1639 if (u16) {
1640 oris(dst, dst, Operand(u16));
1641 }
1642 }
1643#endif
1644 u16 = (value & 0xffff);
1645 if (u16) {
1646 ori(dst, dst, Operand(u16));
1647 }
1648 }
1649 return;
1650 }
1651
1652 DCHECK(!canOptimize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001653 if (relocatable) {
1654 RecordRelocInfo(src.rmode_);
1655 }
1656 bitwise_mov(dst, value);
1657}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001658
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001659
1660void Assembler::bitwise_mov(Register dst, intptr_t value) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001661 BlockTrampolinePoolScope block_trampoline_pool(this);
1662#if V8_TARGET_ARCH_PPC64
1663 int32_t hi_32 = static_cast<int32_t>(value >> 32);
1664 int32_t lo_32 = static_cast<int32_t>(value);
1665 int hi_word = static_cast<int>(hi_32 >> 16);
1666 int lo_word = static_cast<int>(hi_32 & 0xffff);
1667 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1668 ori(dst, dst, Operand(lo_word));
1669 sldi(dst, dst, Operand(32));
1670 hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
1671 lo_word = static_cast<int>(lo_32 & 0xffff);
1672 oris(dst, dst, Operand(hi_word));
1673 ori(dst, dst, Operand(lo_word));
1674#else
1675 int hi_word = static_cast<int>(value >> 16);
1676 int lo_word = static_cast<int>(value & 0xffff);
1677 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1678 ori(dst, dst, Operand(lo_word));
1679#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001680}
1681
1682
1683void Assembler::bitwise_mov32(Register dst, int32_t value) {
1684 BlockTrampolinePoolScope block_trampoline_pool(this);
1685 int hi_word = static_cast<int>(value >> 16);
1686 int lo_word = static_cast<int>(value & 0xffff);
1687 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1688 ori(dst, dst, Operand(lo_word));
1689}
1690
1691
1692void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1693 BlockTrampolinePoolScope block_trampoline_pool(this);
1694 if (is_int16(value)) {
1695 addi(dst, src, Operand(value));
1696 nop();
1697 } else {
1698 int hi_word = static_cast<int>(value >> 16);
1699 int lo_word = static_cast<int>(value & 0xffff);
1700 if (lo_word & 0x8000) hi_word++;
1701 addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1702 addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001703 }
1704}
1705
1706
1707void Assembler::mov_label_offset(Register dst, Label* label) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001708 int position = link(label);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001709 if (label->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001710 // Load the position of the label relative to the generated code object.
1711 mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001712 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001713 // Encode internal reference to unbound label. We use a dummy opcode
1714 // such that it won't collide with any opcode that might appear in the
1715 // label's chain. Encode the destination register in the 2nd instruction.
1716 int link = position - pc_offset();
1717 DCHECK_EQ(0, link & 3);
1718 link >>= 2;
1719 DCHECK(is_int26(link));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001720
1721 // When the label is bound, these instructions will be patched
1722 // with a 2 instruction mov sequence that will load the
1723 // destination register with the position of the label from the
1724 // beginning of the code.
1725 //
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001726 // target_at extracts the link and target_at_put patches the instructions.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001727 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001728 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1729 emit(dst.code());
1730 }
1731}
1732
1733
1734void Assembler::add_label_offset(Register dst, Register base, Label* label,
1735 int delta) {
1736 int position = link(label);
1737 if (label->is_bound()) {
1738 // dst = base + position + delta
1739 position += delta;
1740 bitwise_add32(dst, base, position);
1741 } else {
1742 // Encode internal reference to unbound label. We use a dummy opcode
1743 // such that it won't collide with any opcode that might appear in the
1744 // label's chain. Encode the operands in the 2nd instruction.
1745 int link = position - pc_offset();
1746 DCHECK_EQ(0, link & 3);
1747 link >>= 2;
1748 DCHECK(is_int26(link));
1749 DCHECK(is_int16(delta));
1750
1751 BlockTrampolinePoolScope block_trampoline_pool(this);
1752 emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
1753 emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
1754 }
1755}
1756
1757
1758void Assembler::mov_label_addr(Register dst, Label* label) {
1759 CheckBuffer();
1760 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1761 int position = link(label);
1762 if (label->is_bound()) {
1763 // Keep internal references relative until EmitRelocations.
1764 bitwise_mov(dst, position);
1765 } else {
1766 // Encode internal reference to unbound label. We use a dummy opcode
1767 // such that it won't collide with any opcode that might appear in the
1768 // label's chain. Encode the destination register in the 2nd instruction.
1769 int link = position - pc_offset();
1770 DCHECK_EQ(0, link & 3);
1771 link >>= 2;
1772 DCHECK(is_int26(link));
1773
1774 // When the label is bound, these instructions will be patched
1775 // with a multi-instruction mov sequence that will load the
1776 // destination register with the address of the label.
1777 //
1778 // target_at extracts the link and target_at_put patches the instructions.
1779 BlockTrampolinePoolScope block_trampoline_pool(this);
1780 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1781 emit(dst.code());
1782 DCHECK(kMovInstructionsNoConstantPool >= 2);
1783 for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
1784 }
1785}
1786
1787
1788void Assembler::emit_label_addr(Label* label) {
1789 CheckBuffer();
1790 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1791 int position = link(label);
1792 if (label->is_bound()) {
1793 // Keep internal references relative until EmitRelocations.
1794 dp(position);
1795 } else {
1796 // Encode internal reference to unbound label. We use a dummy opcode
1797 // such that it won't collide with any opcode that might appear in the
1798 // label's chain.
1799 int link = position - pc_offset();
1800 DCHECK_EQ(0, link & 3);
1801 link >>= 2;
1802 DCHECK(is_int26(link));
1803
1804 // When the label is bound, the instruction(s) will be patched
1805 // as a jump table entry containing the label address. target_at extracts
1806 // the link and target_at_put patches the instruction(s).
1807 BlockTrampolinePoolScope block_trampoline_pool(this);
1808 emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1809#if V8_TARGET_ARCH_PPC64
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001810 nop();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001811#endif
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001812 }
1813}
1814
1815
1816// Special register instructions
1817void Assembler::crxor(int bt, int ba, int bb) {
1818 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1819}
1820
1821
1822void Assembler::creqv(int bt, int ba, int bb) {
1823 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1824}
1825
1826
1827void Assembler::mflr(Register dst) {
1828 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
1829}
1830
1831
1832void Assembler::mtlr(Register src) {
1833 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
1834}
1835
1836
1837void Assembler::mtctr(Register src) {
1838 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
1839}
1840
1841
1842void Assembler::mtxer(Register src) {
1843 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1844}
1845
1846
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001847void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1848 DCHECK(static_cast<int>(bit) < 32);
1849 int bf = cr.code();
1850 int bfa = bit / CRWIDTH;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001851 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1852}
1853
1854
1855void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1856
1857
1858#if V8_TARGET_ARCH_PPC64
1859void Assembler::mffprd(Register dst, DoubleRegister src) {
1860 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1861}
1862
1863
1864void Assembler::mffprwz(Register dst, DoubleRegister src) {
1865 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1866}
1867
1868
1869void Assembler::mtfprd(DoubleRegister dst, Register src) {
1870 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1871}
1872
1873
1874void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1875 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1876}
1877
1878
1879void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1880 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1881}
1882#endif
1883
1884
1885// Exception-generating instructions and debugging support.
1886// Stops with a non-negative code less than kNumOfWatchedStops support
1887// enabling/disabling and a counter feature. See simulator-ppc.h .
1888void Assembler::stop(const char* msg, Condition cond, int32_t code,
1889 CRegister cr) {
1890 if (cond != al) {
1891 Label skip;
1892 b(NegateCondition(cond), &skip, cr);
1893 bkpt(0);
1894 bind(&skip);
1895 } else {
1896 bkpt(0);
1897 }
1898}
1899
1900
1901void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
1902
1903
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001904void Assembler::dcbf(Register ra, Register rb) {
1905 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1906}
1907
1908
1909void Assembler::sync() { emit(EXT2 | SYNC); }
1910
1911
1912void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1913
1914
1915void Assembler::icbi(Register ra, Register rb) {
1916 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1917}
1918
1919
1920void Assembler::isync() { emit(EXT1 | ISYNC); }
1921
1922
1923// Floating point support
1924
1925void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1926 int offset = src.offset();
1927 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001928 DCHECK(!ra.is(r0));
1929 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001930 int imm16 = offset & kImm16Mask;
1931 // could be x_form instruction with some casting magic
1932 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1933}
1934
1935
1936void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1937 int offset = src.offset();
1938 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001939 DCHECK(!ra.is(r0));
1940 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001941 int imm16 = offset & kImm16Mask;
1942 // could be x_form instruction with some casting magic
1943 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1944}
1945
1946
1947void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
1948 Register ra = src.ra();
1949 Register rb = src.rb();
1950 DCHECK(!ra.is(r0));
1951 emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1952 LeaveRC);
1953}
1954
1955
1956void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
1957 Register ra = src.ra();
1958 Register rb = src.rb();
1959 DCHECK(!ra.is(r0));
1960 emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1961 LeaveRC);
1962}
1963
1964
1965void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1966 int offset = src.offset();
1967 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001968 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001969 DCHECK(!ra.is(r0));
1970 int imm16 = offset & kImm16Mask;
1971 // could be x_form instruction with some casting magic
1972 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1973}
1974
1975
1976void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1977 int offset = src.offset();
1978 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001979 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001980 DCHECK(!ra.is(r0));
1981 int imm16 = offset & kImm16Mask;
1982 // could be x_form instruction with some casting magic
1983 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1984}
1985
1986
1987void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
1988 Register ra = src.ra();
1989 Register rb = src.rb();
1990 DCHECK(!ra.is(r0));
1991 emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1992 LeaveRC);
1993}
1994
1995
1996void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
1997 Register ra = src.ra();
1998 Register rb = src.rb();
1999 DCHECK(!ra.is(r0));
2000 emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2001 LeaveRC);
2002}
2003
2004
2005void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
2006 int offset = src.offset();
2007 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002008 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002009 DCHECK(!ra.is(r0));
2010 int imm16 = offset & kImm16Mask;
2011 // could be x_form instruction with some casting magic
2012 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
2013}
2014
2015
2016void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
2017 int offset = src.offset();
2018 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002019 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002020 DCHECK(!ra.is(r0));
2021 int imm16 = offset & kImm16Mask;
2022 // could be x_form instruction with some casting magic
2023 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
2024}
2025
2026
2027void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
2028 Register ra = src.ra();
2029 Register rb = src.rb();
2030 DCHECK(!ra.is(r0));
2031 emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2032 LeaveRC);
2033}
2034
2035
2036void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
2037 Register ra = src.ra();
2038 Register rb = src.rb();
2039 DCHECK(!ra.is(r0));
2040 emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2041 LeaveRC);
2042}
2043
2044
2045void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
2046 int offset = src.offset();
2047 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002048 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002049 DCHECK(!ra.is(r0));
2050 int imm16 = offset & kImm16Mask;
2051 // could be x_form instruction with some casting magic
2052 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
2053}
2054
2055
2056void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
2057 int offset = src.offset();
2058 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002059 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002060 DCHECK(!ra.is(r0));
2061 int imm16 = offset & kImm16Mask;
2062 // could be x_form instruction with some casting magic
2063 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
2064}
2065
2066
2067void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
2068 Register ra = src.ra();
2069 Register rb = src.rb();
2070 DCHECK(!ra.is(r0));
2071 emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2072 LeaveRC);
2073}
2074
2075
2076void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
2077 Register ra = src.ra();
2078 Register rb = src.rb();
2079 DCHECK(!ra.is(r0));
2080 emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2081 LeaveRC);
2082}
2083
2084
2085void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
2086 const DoubleRegister frb, RCBit rc) {
2087 a_form(EXT4 | FSUB, frt, fra, frb, rc);
2088}
2089
2090
2091void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
2092 const DoubleRegister frb, RCBit rc) {
2093 a_form(EXT4 | FADD, frt, fra, frb, rc);
2094}
2095
2096
2097void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
2098 const DoubleRegister frc, RCBit rc) {
2099 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
2100 rc);
2101}
2102
2103
2104void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
2105 const DoubleRegister frb, RCBit rc) {
2106 a_form(EXT4 | FDIV, frt, fra, frb, rc);
2107}
2108
2109
2110void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
2111 CRegister cr) {
2112 DCHECK(cr.code() >= 0 && cr.code() <= 7);
2113 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
2114}
2115
2116
2117void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
2118 RCBit rc) {
2119 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
2120}
2121
2122
2123void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
2124 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
2125}
2126
2127
2128void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
2129 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
2130}
2131
2132
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002133void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
2134 RCBit rc) {
2135 emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
2136}
2137
2138
2139void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
2140 RCBit rc) {
2141 emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
2142}
2143
2144
2145void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
2146 RCBit rc) {
2147 emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
2148}
2149
2150
2151void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
2152 RCBit rc) {
2153 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002154}
2155
2156
2157void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
2158 RCBit rc) {
2159 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
2160}
2161
2162
2163void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
2164 RCBit rc) {
2165 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
2166}
2167
2168
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002169void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
2170 RCBit rc) {
2171 emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
2172}
2173
2174
2175void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
2176 RCBit rc) {
2177 emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
2178}
2179
2180
2181void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
2182 RCBit rc) {
2183 emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
2184}
2185
2186
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002187void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
2188 RCBit rc) {
2189 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
2190}
2191
2192
2193void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
2194 RCBit rc) {
2195 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
2196}
2197
2198
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002199void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
2200 RCBit rc) {
2201 emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
2202}
2203
2204
2205void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
2206 RCBit rc) {
2207 emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
2208}
2209
2210
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002211void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
2212 const DoubleRegister frc, const DoubleRegister frb,
2213 RCBit rc) {
2214 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2215 frc.code() * B6 | rc);
2216}
2217
2218
2219void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
2220 RCBit rc) {
2221 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
2222}
2223
2224
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002225void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
2226 DCHECK(static_cast<int>(bit) < 32);
2227 int bt = bit;
2228 emit(EXT4 | MTFSB0 | bt * B21 | rc);
2229}
2230
2231
2232void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
2233 DCHECK(static_cast<int>(bit) < 32);
2234 int bt = bit;
2235 emit(EXT4 | MTFSB1 | bt * B21 | rc);
2236}
2237
2238
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002239void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
2240 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
2241}
2242
2243
2244void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
2245 emit(EXT4 | MFFS | frt.code() * B21 | rc);
2246}
2247
2248
2249void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
2250 RCBit rc) {
2251 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
2252}
2253
2254
2255void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
2256 RCBit rc) {
2257 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
2258}
2259
2260
2261void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
2262 RCBit rc) {
2263 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
2264}
2265
2266
2267void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
2268 const DoubleRegister frc, const DoubleRegister frb,
2269 RCBit rc) {
2270 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2271 frc.code() * B6 | rc);
2272}
2273
2274
2275void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
2276 const DoubleRegister frc, const DoubleRegister frb,
2277 RCBit rc) {
2278 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2279 frc.code() * B6 | rc);
2280}
2281
2282
2283// Pseudo instructions.
2284void Assembler::nop(int type) {
2285 Register reg = r0;
2286 switch (type) {
2287 case NON_MARKING_NOP:
2288 reg = r0;
2289 break;
2290 case GROUP_ENDING_NOP:
2291 reg = r2;
2292 break;
2293 case DEBUG_BREAK_NOP:
2294 reg = r3;
2295 break;
2296 default:
2297 UNIMPLEMENTED();
2298 }
2299
2300 ori(reg, reg, Operand::Zero());
2301}
2302
2303
2304bool Assembler::IsNop(Instr instr, int type) {
2305 int reg = 0;
2306 switch (type) {
2307 case NON_MARKING_NOP:
2308 reg = 0;
2309 break;
2310 case GROUP_ENDING_NOP:
2311 reg = 2;
2312 break;
2313 case DEBUG_BREAK_NOP:
2314 reg = 3;
2315 break;
2316 default:
2317 UNIMPLEMENTED();
2318 }
2319 return instr == (ORI | reg * B21 | reg * B16);
2320}
2321
2322
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002323void Assembler::GrowBuffer(int needed) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002324 if (!own_buffer_) FATAL("external code buffer is too small");
2325
2326 // Compute new buffer size.
2327 CodeDesc desc; // the new buffer
2328 if (buffer_size_ < 4 * KB) {
2329 desc.buffer_size = 4 * KB;
2330 } else if (buffer_size_ < 1 * MB) {
2331 desc.buffer_size = 2 * buffer_size_;
2332 } else {
2333 desc.buffer_size = buffer_size_ + 1 * MB;
2334 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002335 int space = buffer_space() + (desc.buffer_size - buffer_size_);
2336 if (space < needed) {
2337 desc.buffer_size += needed - space;
2338 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002339 CHECK_GT(desc.buffer_size, 0); // no overflow
2340
2341 // Set up new buffer.
2342 desc.buffer = NewArray<byte>(desc.buffer_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002343 desc.origin = this;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002344
2345 desc.instr_size = pc_offset();
2346 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2347
2348 // Copy the data.
2349 intptr_t pc_delta = desc.buffer - buffer_;
2350 intptr_t rc_delta =
2351 (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2352 memmove(desc.buffer, buffer_, desc.instr_size);
2353 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2354 desc.reloc_size);
2355
2356 // Switch buffers.
2357 DeleteArray(buffer_);
2358 buffer_ = desc.buffer;
2359 buffer_size_ = desc.buffer_size;
2360 pc_ += pc_delta;
2361 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2362 reloc_info_writer.last_pc() + pc_delta);
2363
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002364 // Nothing else to do here since we keep all internal references and
2365 // deferred relocation entries relative to the buffer (until
2366 // EmitRelocations).
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002367}
2368
2369
2370void Assembler::db(uint8_t data) {
2371 CheckBuffer();
2372 *reinterpret_cast<uint8_t*>(pc_) = data;
2373 pc_ += sizeof(uint8_t);
2374}
2375
2376
2377void Assembler::dd(uint32_t data) {
2378 CheckBuffer();
2379 *reinterpret_cast<uint32_t*>(pc_) = data;
2380 pc_ += sizeof(uint32_t);
2381}
2382
2383
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002384void Assembler::dq(uint64_t value) {
2385 CheckBuffer();
2386 *reinterpret_cast<uint64_t*>(pc_) = value;
2387 pc_ += sizeof(uint64_t);
2388}
2389
2390
2391void Assembler::dp(uintptr_t data) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002392 CheckBuffer();
2393 *reinterpret_cast<uintptr_t*>(pc_) = data;
2394 pc_ += sizeof(uintptr_t);
2395}
2396
2397
2398void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002399 if (RelocInfo::IsNone(rmode) ||
2400 // Don't record external references unless the heap will be serialized.
2401 (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
2402 !emit_debug_code())) {
2403 return;
2404 }
2405 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2406 data = RecordedAstId().ToInt();
2407 ClearRecordedAstId();
2408 }
2409 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2410 relocations_.push_back(rinfo);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002411}
2412
2413
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002414void Assembler::EmitRelocations() {
2415 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2416
2417 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2418 it != relocations_.end(); it++) {
2419 RelocInfo::Mode rmode = it->rmode();
2420 Address pc = buffer_ + it->position();
2421 Code* code = NULL;
2422 RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
2423
2424 // Fix up internal references now that they are guaranteed to be bound.
2425 if (RelocInfo::IsInternalReference(rmode)) {
2426 // Jump table entry
2427 intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
2428 Memory::Address_at(pc) = buffer_ + pos;
2429 } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
2430 // mov sequence
2431 intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
2432 set_target_address_at(isolate(), pc, code, buffer_ + pos,
2433 SKIP_ICACHE_FLUSH);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002434 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002435
2436 reloc_info_writer.Write(&rinfo);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002437 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002438
2439 reloc_info_writer.Finish();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002440}
2441
2442
2443void Assembler::BlockTrampolinePoolFor(int instructions) {
2444 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2445}
2446
2447
2448void Assembler::CheckTrampolinePool() {
2449 // Some small sequences of instructions must not be broken up by the
2450 // insertion of a trampoline pool; such sequences are protected by setting
2451 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2452 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2453 // are blocked by trampoline_pool_blocked_nesting_.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002454 if (trampoline_pool_blocked_nesting_ > 0) return;
2455 if (pc_offset() < no_trampoline_pool_before_) {
2456 next_trampoline_check_ = no_trampoline_pool_before_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002457 return;
2458 }
2459
2460 DCHECK(!trampoline_emitted_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002461 if (tracked_branch_count_ > 0) {
2462 int size = tracked_branch_count_ * kInstrSize;
2463
2464 // As we are only going to emit trampoline once, we need to prevent any
2465 // further emission.
2466 trampoline_emitted_ = true;
2467 next_trampoline_check_ = kMaxInt;
2468
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002469 // First we emit jump, then we emit trampoline pool.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002470 b(size + kInstrSize, LeaveLK);
2471 for (int i = size; i > 0; i -= kInstrSize) {
2472 b(i, LeaveLK);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002473 }
2474
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002475 trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002476 }
2477}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002478
2479
2480} // namespace internal
2481} // namespace v8
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002482
2483#endif // V8_TARGET_ARCH_PPC