blob: 507eec11afecca1b0b08e3a1fa9dadddd12973bd [file] [log] [blame]
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2014 the V8 project authors. All rights reserved.
36
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037#include "src/ppc/assembler-ppc.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040038
39#if V8_TARGET_ARCH_PPC
40
41#include "src/base/bits.h"
42#include "src/base/cpu.h"
43#include "src/macro-assembler.h"
44#include "src/ppc/assembler-ppc-inl.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040045
46namespace v8 {
47namespace internal {
48
49// Get the CPU features enabled by the build.
50static unsigned CpuFeaturesImpliedByCompiler() {
51 unsigned answer = 0;
52 return answer;
53}
54
55
56void CpuFeatures::ProbeImpl(bool cross_compile) {
57 supported_ |= CpuFeaturesImpliedByCompiler();
Ben Murdoch097c5b22016-05-18 11:27:45 +010058 icache_line_size_ = 128;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040059
60 // Only use statically determined features for cross compile (snapshot).
61 if (cross_compile) return;
62
63// Detect whether frim instruction is supported (POWER5+)
64// For now we will just check for processors we know do not
65// support it
66#ifndef USE_SIMULATOR
67 // Probe for additional features at runtime.
68 base::CPU cpu;
69#if V8_TARGET_ARCH_PPC64
70 if (cpu.part() == base::CPU::PPC_POWER8) {
71 supported_ |= (1u << FPR_GPR_MOV);
72 }
73#endif
74 if (cpu.part() == base::CPU::PPC_POWER6 ||
75 cpu.part() == base::CPU::PPC_POWER7 ||
76 cpu.part() == base::CPU::PPC_POWER8) {
77 supported_ |= (1u << LWSYNC);
78 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000079 if (cpu.part() == base::CPU::PPC_POWER7 ||
80 cpu.part() == base::CPU::PPC_POWER8) {
81 supported_ |= (1u << ISELECT);
82 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040083#if V8_OS_LINUX
84 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
85 // Assume support
86 supported_ |= (1u << FPU);
87 }
Ben Murdoch097c5b22016-05-18 11:27:45 +010088 if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
89 icache_line_size_ = cpu.icache_line_size();
90 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040091#elif V8_OS_AIX
92 // Assume support FP support and default cache line size
93 supported_ |= (1u << FPU);
94#endif
95#else // Simulator
96 supported_ |= (1u << FPU);
97 supported_ |= (1u << LWSYNC);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000098 supported_ |= (1u << ISELECT);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040099#if V8_TARGET_ARCH_PPC64
100 supported_ |= (1u << FPR_GPR_MOV);
101#endif
102#endif
103}
104
105
106void CpuFeatures::PrintTarget() {
107 const char* ppc_arch = NULL;
108
109#if V8_TARGET_ARCH_PPC64
110 ppc_arch = "ppc64";
111#else
112 ppc_arch = "ppc";
113#endif
114
115 printf("target %s\n", ppc_arch);
116}
117
118
119void CpuFeatures::PrintFeatures() {
120 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
121}
122
123
124Register ToRegister(int num) {
125 DCHECK(num >= 0 && num < kNumRegisters);
126 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
127 r8, r9, r10, r11, ip, r13, r14, r15,
128 r16, r17, r18, r19, r20, r21, r22, r23,
129 r24, r25, r26, r27, r28, r29, r30, fp};
130 return kRegisters[num];
131}
132
133
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400134// -----------------------------------------------------------------------------
135// Implementation of RelocInfo
136
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000137const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
138 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400139
140
141bool RelocInfo::IsCodedSpecially() {
142 // The deserializer needs to know whether a pointer is specially
143 // coded. Being specially coded on PPC means that it is a lis/ori
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000144 // instruction sequence or is a constant pool entry, and these are
145 // always the case inside code objects.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400146 return true;
147}
148
149
150bool RelocInfo::IsInConstantPool() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000151 if (FLAG_enable_embedded_constant_pool) {
152 Address constant_pool = host_->constant_pool();
153 return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400154 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000155 return false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400156}
157
158
159// -----------------------------------------------------------------------------
160// Implementation of Operand and MemOperand
161// See assembler-ppc-inl.h for inlined constructors
162
163Operand::Operand(Handle<Object> handle) {
164 AllowDeferredHandleDereference using_raw_address;
165 rm_ = no_reg;
166 // Verify all Objects referred by code are NOT in new space.
167 Object* obj = *handle;
168 if (obj->IsHeapObject()) {
169 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
170 imm_ = reinterpret_cast<intptr_t>(handle.location());
171 rmode_ = RelocInfo::EMBEDDED_OBJECT;
172 } else {
173 // no relocation needed
174 imm_ = reinterpret_cast<intptr_t>(obj);
175 rmode_ = kRelocInfo_NONEPTR;
176 }
177}
178
179
180MemOperand::MemOperand(Register rn, int32_t offset) {
181 ra_ = rn;
182 rb_ = no_reg;
183 offset_ = offset;
184}
185
186
187MemOperand::MemOperand(Register ra, Register rb) {
188 ra_ = ra;
189 rb_ = rb;
190 offset_ = 0;
191}
192
193
194// -----------------------------------------------------------------------------
195// Specific instructions, constants, and masks.
196
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400197
198Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
199 : AssemblerBase(isolate, buffer, buffer_size),
200 recorded_ast_id_(TypeFeedbackId::None()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000201 constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400202 positions_recorder_(this) {
203 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
204
205 no_trampoline_pool_before_ = 0;
206 trampoline_pool_blocked_nesting_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000207 constant_pool_entry_sharing_blocked_nesting_ = 0;
208 next_trampoline_check_ = kMaxInt;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400209 internal_trampoline_exception_ = false;
210 last_bound_pos_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000211 optimizable_cmpi_pos_ = -1;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400212 trampoline_emitted_ = FLAG_force_long_branches;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000213 tracked_branch_count_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400214 ClearRecordedAstId();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000215 relocations_.reserve(128);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400216}
217
218
219void Assembler::GetCode(CodeDesc* desc) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000220 // Emit constant pool if necessary.
221 int constant_pool_offset = EmitConstantPool();
222
223 EmitRelocations();
224
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400225 // Set up code descriptor.
226 desc->buffer = buffer_;
227 desc->buffer_size = buffer_size_;
228 desc->instr_size = pc_offset();
229 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000230 desc->constant_pool_size =
231 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400232 desc->origin = this;
233}
234
235
236void Assembler::Align(int m) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400237 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000238 DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400239 while ((pc_offset() & (m - 1)) != 0) {
240 nop();
241 }
242}
243
244
245void Assembler::CodeTargetAlign() { Align(8); }
246
247
248Condition Assembler::GetCondition(Instr instr) {
249 switch (instr & kCondMask) {
250 case BT:
251 return eq;
252 case BF:
253 return ne;
254 default:
255 UNIMPLEMENTED();
256 }
257 return al;
258}
259
260
261bool Assembler::IsLis(Instr instr) {
262 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
263}
264
265
266bool Assembler::IsLi(Instr instr) {
267 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
268}
269
270
271bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
272
273
274bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
275
276
277bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
278
279
280Register Assembler::GetRA(Instr instr) {
281 Register reg;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000282 reg.reg_code = Instruction::RAValue(instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400283 return reg;
284}
285
286
287Register Assembler::GetRB(Instr instr) {
288 Register reg;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000289 reg.reg_code = Instruction::RBValue(instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400290 return reg;
291}
292
293
294#if V8_TARGET_ARCH_PPC64
295// This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
296bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
297 Instr instr4, Instr instr5) {
298 // Check the instructions are indeed a five part load (into r12)
299 // 3d800000 lis r12, 0
300 // 618c0000 ori r12, r12, 0
301 // 798c07c6 rldicr r12, r12, 32, 31
302 // 658c00c3 oris r12, r12, 195
303 // 618ccd40 ori r12, r12, 52544
304 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
305 (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
306 ((instr5 >> 16) == 0x618c));
307}
308#else
309// This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
310bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
311 // Check the instruction is indeed a two part load (into r12)
312 // 3d802553 lis r12, 9555
313 // 618c5000 ori r12, r12, 20480
314 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
315}
316#endif
317
318
319bool Assembler::IsCmpRegister(Instr instr) {
320 return (((instr & kOpcodeMask) == EXT2) &&
321 ((instr & kExt2OpcodeMask) == CMP));
322}
323
324
325bool Assembler::IsRlwinm(Instr instr) {
326 return ((instr & kOpcodeMask) == RLWINMX);
327}
328
329
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000330bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
331
332
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400333#if V8_TARGET_ARCH_PPC64
334bool Assembler::IsRldicl(Instr instr) {
335 return (((instr & kOpcodeMask) == EXT5) &&
336 ((instr & kExt5OpcodeMask) == RLDICL));
337}
338#endif
339
340
341bool Assembler::IsCmpImmediate(Instr instr) {
342 return ((instr & kOpcodeMask) == CMPI);
343}
344
345
346bool Assembler::IsCrSet(Instr instr) {
347 return (((instr & kOpcodeMask) == EXT1) &&
348 ((instr & kExt1OpcodeMask) == CREQV));
349}
350
351
352Register Assembler::GetCmpImmediateRegister(Instr instr) {
353 DCHECK(IsCmpImmediate(instr));
354 return GetRA(instr);
355}
356
357
358int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
359 DCHECK(IsCmpImmediate(instr));
360 return instr & kOff16Mask;
361}
362
363
364// Labels refer to positions in the (to be) generated code.
365// There are bound, linked, and unused labels.
366//
367// Bound labels refer to known positions in the already
368// generated code. pos() is the position the label refers to.
369//
370// Linked labels refer to unknown positions in the code
371// to be generated; pos() is the position of the last
372// instruction using the label.
373
374
375// The link chain is terminated by a negative code position (must be aligned)
376const int kEndOfChain = -4;
377
378
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000379// Dummy opcodes for unbound label mov instructions or jump table entries.
380enum {
381 kUnboundMovLabelOffsetOpcode = 0 << 26,
382 kUnboundAddLabelOffsetOpcode = 1 << 26,
383 kUnboundMovLabelAddrOpcode = 2 << 26,
384 kUnboundJumpTableEntryOpcode = 3 << 26
385};
386
387
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400388int Assembler::target_at(int pos) {
389 Instr instr = instr_at(pos);
390 // check which type of branch this is 16 or 26 bit offset
391 int opcode = instr & kOpcodeMask;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000392 int link;
393 switch (opcode) {
394 case BX:
395 link = SIGN_EXT_IMM26(instr & kImm26Mask);
396 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
397 break;
398 case BCX:
399 link = SIGN_EXT_IMM16((instr & kImm16Mask));
400 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
401 break;
402 case kUnboundMovLabelOffsetOpcode:
403 case kUnboundAddLabelOffsetOpcode:
404 case kUnboundMovLabelAddrOpcode:
405 case kUnboundJumpTableEntryOpcode:
406 link = SIGN_EXT_IMM26(instr & kImm26Mask);
407 link <<= 2;
408 break;
409 default:
410 DCHECK(false);
411 return -1;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400412 }
413
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000414 if (link == 0) return kEndOfChain;
415 return pos + link;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400416}
417
418
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000419void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400420 Instr instr = instr_at(pos);
421 int opcode = instr & kOpcodeMask;
422
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000423 if (is_branch != nullptr) {
424 *is_branch = (opcode == BX || opcode == BCX);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400425 }
426
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000427 switch (opcode) {
428 case BX: {
429 int imm26 = target_pos - pos;
430 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
431 if (imm26 == kInstrSize && !(instr & kLKMask)) {
432 // Branch to next instr without link.
433 instr = ORI; // nop: ori, 0,0,0
434 } else {
435 instr &= ((~kImm26Mask) | kAAMask | kLKMask);
436 instr |= (imm26 & kImm26Mask);
437 }
438 instr_at_put(pos, instr);
439 break;
440 }
441 case BCX: {
442 int imm16 = target_pos - pos;
443 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
444 if (imm16 == kInstrSize && !(instr & kLKMask)) {
445 // Branch to next instr without link.
446 instr = ORI; // nop: ori, 0,0,0
447 } else {
448 instr &= ((~kImm16Mask) | kAAMask | kLKMask);
449 instr |= (imm16 & kImm16Mask);
450 }
451 instr_at_put(pos, instr);
452 break;
453 }
454 case kUnboundMovLabelOffsetOpcode: {
455 // Load the position of the label relative to the generated code object
456 // pointer in a register.
457 Register dst = Register::from_code(instr_at(pos + kInstrSize));
458 int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
459 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
460 CodePatcher::DONT_FLUSH);
461 patcher.masm()->bitwise_mov32(dst, offset);
462 break;
463 }
464 case kUnboundAddLabelOffsetOpcode: {
465 // dst = base + position + immediate
466 Instr operands = instr_at(pos + kInstrSize);
467 Register dst = Register::from_code((operands >> 21) & 0x1f);
468 Register base = Register::from_code((operands >> 16) & 0x1f);
469 int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
470 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
471 CodePatcher::DONT_FLUSH);
472 patcher.masm()->bitwise_add32(dst, base, offset);
473 break;
474 }
475 case kUnboundMovLabelAddrOpcode: {
476 // Load the address of the label in a register.
477 Register dst = Register::from_code(instr_at(pos + kInstrSize));
478 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
479 kMovInstructionsNoConstantPool,
480 CodePatcher::DONT_FLUSH);
481 // Keep internal references relative until EmitRelocations.
482 patcher.masm()->bitwise_mov(dst, target_pos);
483 break;
484 }
485 case kUnboundJumpTableEntryOpcode: {
486 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
487 kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
488 // Keep internal references relative until EmitRelocations.
489 patcher.masm()->dp(target_pos);
490 break;
491 }
492 default:
493 DCHECK(false);
494 break;
495 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400496}
497
498
499int Assembler::max_reach_from(int pos) {
500 Instr instr = instr_at(pos);
501 int opcode = instr & kOpcodeMask;
502
503 // check which type of branch this is 16 or 26 bit offset
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000504 switch (opcode) {
505 case BX:
506 return 26;
507 case BCX:
508 return 16;
509 case kUnboundMovLabelOffsetOpcode:
510 case kUnboundAddLabelOffsetOpcode:
511 case kUnboundMovLabelAddrOpcode:
512 case kUnboundJumpTableEntryOpcode:
513 return 0; // no limit on reach
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400514 }
515
516 DCHECK(false);
517 return 0;
518}
519
520
521void Assembler::bind_to(Label* L, int pos) {
522 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
523 int32_t trampoline_pos = kInvalidSlotPos;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000524 bool is_branch = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400525 while (L->is_linked()) {
526 int fixup_pos = L->pos();
527 int32_t offset = pos - fixup_pos;
528 int maxReach = max_reach_from(fixup_pos);
529 next(L); // call next before overwriting link with target at fixup_pos
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000530 if (maxReach && is_intn(offset, maxReach) == false) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400531 if (trampoline_pos == kInvalidSlotPos) {
532 trampoline_pos = get_trampoline_entry();
533 CHECK(trampoline_pos != kInvalidSlotPos);
534 target_at_put(trampoline_pos, pos);
535 }
536 target_at_put(fixup_pos, trampoline_pos);
537 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000538 target_at_put(fixup_pos, pos, &is_branch);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400539 }
540 }
541 L->bind_to(pos);
542
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000543 if (!trampoline_emitted_ && is_branch) {
544 UntrackBranch();
545 }
546
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400547 // Keep track of the last bound label so we don't eliminate any instructions
548 // before a bound label.
549 if (pos > last_bound_pos_) last_bound_pos_ = pos;
550}
551
552
553void Assembler::bind(Label* L) {
554 DCHECK(!L->is_bound()); // label can only be bound once
555 bind_to(L, pc_offset());
556}
557
558
559void Assembler::next(Label* L) {
560 DCHECK(L->is_linked());
561 int link = target_at(L->pos());
562 if (link == kEndOfChain) {
563 L->Unuse();
564 } else {
565 DCHECK(link >= 0);
566 L->link_to(link);
567 }
568}
569
570
571bool Assembler::is_near(Label* L, Condition cond) {
572 DCHECK(L->is_bound());
573 if (L->is_bound() == false) return false;
574
575 int maxReach = ((cond == al) ? 26 : 16);
576 int offset = L->pos() - pc_offset();
577
578 return is_intn(offset, maxReach);
579}
580
581
582void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
583 DoubleRegister frb, RCBit r) {
584 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
585}
586
587
588void Assembler::d_form(Instr instr, Register rt, Register ra,
589 const intptr_t val, bool signed_disp) {
590 if (signed_disp) {
591 if (!is_int16(val)) {
592 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
593 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000594 CHECK(is_int16(val));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400595 } else {
596 if (!is_uint16(val)) {
597 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
598 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
599 val, val, is_uint16(val), kImm16Mask);
600 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000601 CHECK(is_uint16(val));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400602 }
603 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
604}
605
606
607void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
608 RCBit r) {
609 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
610}
611
612
613void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
614 OEBit o, RCBit r) {
615 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
616}
617
618
619void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
620 int maskbit, RCBit r) {
621 int sh0_4 = shift & 0x1f;
622 int sh5 = (shift >> 5) & 0x1;
623 int m0_4 = maskbit & 0x1f;
624 int m5 = (maskbit >> 5) & 0x1;
625
626 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
627 m5 * B5 | sh5 * B1 | r);
628}
629
630
631void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
632 int maskbit, RCBit r) {
633 int m0_4 = maskbit & 0x1f;
634 int m5 = (maskbit >> 5) & 0x1;
635
636 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
637 m5 * B5 | r);
638}
639
640
641// Returns the next free trampoline entry.
642int32_t Assembler::get_trampoline_entry() {
643 int32_t trampoline_entry = kInvalidSlotPos;
644
645 if (!internal_trampoline_exception_) {
646 trampoline_entry = trampoline_.take_slot();
647
648 if (kInvalidSlotPos == trampoline_entry) {
649 internal_trampoline_exception_ = true;
650 }
651 }
652 return trampoline_entry;
653}
654
655
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000656int Assembler::link(Label* L) {
657 int position;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400658 if (L->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000659 position = L->pos();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400660 } else {
661 if (L->is_linked()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000662 position = L->pos(); // L's link
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400663 } else {
664 // was: target_pos = kEndOfChain;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000665 // However, using self to mark the first reference
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400666 // should avoid most instances of branch offset overflow. See
667 // target_at() for where this is converted back to kEndOfChain.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000668 position = pc_offset();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400669 }
670 L->link_to(pc_offset());
671 }
672
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000673 return position;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400674}
675
676
677// Branch instructions.
678
679
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000680void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400681 positions_recorder()->WriteRecordedPositions();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000682 emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400683}
684
685
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000686void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400687 positions_recorder()->WriteRecordedPositions();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000688 emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400689}
690
691
692// Pseudo op - branch to link register
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000693void Assembler::blr() { bclr(BA, 0, LeaveLK); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400694
695
696// Pseudo op - branch to count register -- used for "jump"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000697void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400698
699
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000700void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400701
702
703void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
704 if (lk == SetLK) {
705 positions_recorder()->WriteRecordedPositions();
706 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000707 int imm16 = branch_offset;
708 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
709 emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400710}
711
712
713void Assembler::b(int branch_offset, LKBit lk) {
714 if (lk == SetLK) {
715 positions_recorder()->WriteRecordedPositions();
716 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400717 int imm26 = branch_offset;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000718 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400719 emit(BX | (imm26 & kImm26Mask) | lk);
720}
721
722
723void Assembler::xori(Register dst, Register src, const Operand& imm) {
724 d_form(XORI, src, dst, imm.imm_, false);
725}
726
727
728void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
729 d_form(XORIS, rs, ra, imm.imm_, false);
730}
731
732
733void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
734 x_form(EXT2 | XORX, dst, src1, src2, rc);
735}
736
737
738void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
739 x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
740}
741
742
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000743void Assembler::popcntw(Register ra, Register rs) {
744 emit(EXT2 | POPCNTW | rs.code() * B21 | ra.code() * B16);
745}
746
747
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400748void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
749 x_form(EXT2 | ANDX, ra, rs, rb, rc);
750}
751
752
753void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
754 RCBit rc) {
755 sh &= 0x1f;
756 mb &= 0x1f;
757 me &= 0x1f;
758 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
759 me << 1 | rc);
760}
761
762
763void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
764 RCBit rc) {
765 mb &= 0x1f;
766 me &= 0x1f;
767 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
768 me << 1 | rc);
769}
770
771
772void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
773 RCBit rc) {
774 sh &= 0x1f;
775 mb &= 0x1f;
776 me &= 0x1f;
777 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
778 me << 1 | rc);
779}
780
781
782void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
783 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
784 rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
785}
786
787
788void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
789 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
790 rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
791}
792
793
794void Assembler::clrrwi(Register dst, Register src, const Operand& val,
795 RCBit rc) {
796 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
797 rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
798}
799
800
801void Assembler::clrlwi(Register dst, Register src, const Operand& val,
802 RCBit rc) {
803 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
804 rlwinm(dst, src, 0, val.imm_, 31, rc);
805}
806
807
808void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
809 emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
810}
811
812
813void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
814 x_form(EXT2 | SRWX, dst, src1, src2, r);
815}
816
817
818void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
819 x_form(EXT2 | SLWX, dst, src1, src2, r);
820}
821
822
823void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
824 x_form(EXT2 | SRAW, ra, rs, rb, r);
825}
826
827
828void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
829 rlwnm(ra, rs, rb, 0, 31, r);
830}
831
832
833void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
834 rlwinm(ra, rs, sh, 0, 31, r);
835}
836
837
838void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
839 rlwinm(ra, rs, 32 - sh, 0, 31, r);
840}
841
842
843void Assembler::subi(Register dst, Register src, const Operand& imm) {
844 addi(dst, src, Operand(-(imm.imm_)));
845}
846
847void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
848 RCBit r) {
849 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
850}
851
Ben Murdochda12d292016-06-02 14:46:10 +0100852void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
853 RCBit r) {
854 xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
855}
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400856
857void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
858 // a special xo_form
859 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
860}
861
862
863void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
864 RCBit r) {
865 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
866}
867
Ben Murdochda12d292016-06-02 14:46:10 +0100868void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
869 RCBit r) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400870 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
871}
872
Ben Murdochda12d292016-06-02 14:46:10 +0100873void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
874 RCBit r) {
875 xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
876}
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400877
878void Assembler::subfic(Register dst, Register src, const Operand& imm) {
879 d_form(SUBFIC, dst, src, imm.imm_, true);
880}
881
882
883void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
884 RCBit r) {
885 xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
886}
887
888
889// Multiply low word
890void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
891 RCBit r) {
892 xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
893}
894
895
896// Multiply hi word
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000897void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
898 xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
899}
900
901
902// Multiply hi word unsigned
903void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
904 xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400905}
906
907
908// Divide word
909void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
910 RCBit r) {
911 xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
912}
913
914
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000915// Divide word unsigned
916void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
917 RCBit r) {
918 xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
919}
920
921
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400922void Assembler::addi(Register dst, Register src, const Operand& imm) {
923 DCHECK(!src.is(r0)); // use li instead to show intent
924 d_form(ADDI, dst, src, imm.imm_, true);
925}
926
927
928void Assembler::addis(Register dst, Register src, const Operand& imm) {
929 DCHECK(!src.is(r0)); // use lis instead to show intent
930 d_form(ADDIS, dst, src, imm.imm_, true);
931}
932
933
934void Assembler::addic(Register dst, Register src, const Operand& imm) {
935 d_form(ADDIC, dst, src, imm.imm_, true);
936}
937
938
939void Assembler::andi(Register ra, Register rs, const Operand& imm) {
940 d_form(ANDIx, rs, ra, imm.imm_, false);
941}
942
943
944void Assembler::andis(Register ra, Register rs, const Operand& imm) {
945 d_form(ANDISx, rs, ra, imm.imm_, false);
946}
947
948
949void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
950 x_form(EXT2 | NORX, dst, src1, src2, r);
951}
952
953
954void Assembler::notx(Register dst, Register src, RCBit r) {
955 x_form(EXT2 | NORX, dst, src, src, r);
956}
957
958
959void Assembler::ori(Register ra, Register rs, const Operand& imm) {
960 d_form(ORI, rs, ra, imm.imm_, false);
961}
962
963
964void Assembler::oris(Register dst, Register src, const Operand& imm) {
965 d_form(ORIS, src, dst, imm.imm_, false);
966}
967
968
969void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
970 x_form(EXT2 | ORX, dst, src1, src2, rc);
971}
972
973
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000974void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
975 x_form(EXT2 | ORC, dst, src1, src2, rc);
976}
977
978
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400979void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
980 intptr_t imm16 = src2.imm_;
981#if V8_TARGET_ARCH_PPC64
982 int L = 1;
983#else
984 int L = 0;
985#endif
986 DCHECK(is_int16(imm16));
987 DCHECK(cr.code() >= 0 && cr.code() <= 7);
988 imm16 &= kImm16Mask;
989 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
990}
991
992
993void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
994 uintptr_t uimm16 = src2.imm_;
995#if V8_TARGET_ARCH_PPC64
996 int L = 1;
997#else
998 int L = 0;
999#endif
1000 DCHECK(is_uint16(uimm16));
1001 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1002 uimm16 &= kImm16Mask;
1003 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
1004}
1005
1006
1007void Assembler::cmp(Register src1, Register src2, CRegister cr) {
1008#if V8_TARGET_ARCH_PPC64
1009 int L = 1;
1010#else
1011 int L = 0;
1012#endif
1013 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1014 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
1015 src2.code() * B11);
1016}
1017
1018
1019void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
1020#if V8_TARGET_ARCH_PPC64
1021 int L = 1;
1022#else
1023 int L = 0;
1024#endif
1025 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1026 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
1027 src2.code() * B11);
1028}
1029
1030
1031void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
1032 intptr_t imm16 = src2.imm_;
1033 int L = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001034 int pos = pc_offset();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001035 DCHECK(is_int16(imm16));
1036 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1037 imm16 &= kImm16Mask;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001038
1039 // For cmpwi against 0, save postition and cr for later examination
1040 // of potential optimization.
1041 if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
1042 optimizable_cmpi_pos_ = pos;
1043 cmpi_cr_ = cr;
1044 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001045 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
1046}
1047
1048
1049void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
1050 uintptr_t uimm16 = src2.imm_;
1051 int L = 0;
1052 DCHECK(is_uint16(uimm16));
1053 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1054 uimm16 &= kImm16Mask;
1055 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
1056}
1057
1058
1059void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
1060 int L = 0;
1061 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1062 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
1063 src2.code() * B11);
1064}
1065
1066
1067void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
1068 int L = 0;
1069 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1070 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
1071 src2.code() * B11);
1072}
1073
1074
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001075void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
1076 emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1077 cb * B6);
1078}
1079
1080
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001081// Pseudo op - load immediate
1082void Assembler::li(Register dst, const Operand& imm) {
1083 d_form(ADDI, dst, r0, imm.imm_, true);
1084}
1085
1086
1087void Assembler::lis(Register dst, const Operand& imm) {
1088 d_form(ADDIS, dst, r0, imm.imm_, true);
1089}
1090
1091
1092// Pseudo op - move register
1093void Assembler::mr(Register dst, Register src) {
1094 // actually or(dst, src, src)
1095 orx(dst, src, src);
1096}
1097
1098
1099void Assembler::lbz(Register dst, const MemOperand& src) {
1100 DCHECK(!src.ra_.is(r0));
1101 d_form(LBZ, dst, src.ra(), src.offset(), true);
1102}
1103
1104
1105void Assembler::lbzx(Register rt, const MemOperand& src) {
1106 Register ra = src.ra();
1107 Register rb = src.rb();
1108 DCHECK(!ra.is(r0));
1109 emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1110 LeaveRC);
1111}
1112
1113
1114void Assembler::lbzux(Register rt, const MemOperand& src) {
1115 Register ra = src.ra();
1116 Register rb = src.rb();
1117 DCHECK(!ra.is(r0));
1118 emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1119 LeaveRC);
1120}
1121
1122
1123void Assembler::lhz(Register dst, const MemOperand& src) {
1124 DCHECK(!src.ra_.is(r0));
1125 d_form(LHZ, dst, src.ra(), src.offset(), true);
1126}
1127
1128
1129void Assembler::lhzx(Register rt, const MemOperand& src) {
1130 Register ra = src.ra();
1131 Register rb = src.rb();
1132 DCHECK(!ra.is(r0));
1133 emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1134 LeaveRC);
1135}
1136
1137
1138void Assembler::lhzux(Register rt, const MemOperand& src) {
1139 Register ra = src.ra();
1140 Register rb = src.rb();
1141 DCHECK(!ra.is(r0));
1142 emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1143 LeaveRC);
1144}
1145
1146
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001147void Assembler::lhax(Register rt, const MemOperand& src) {
1148 Register ra = src.ra();
1149 Register rb = src.rb();
1150 DCHECK(!ra.is(r0));
1151 emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
1152}
1153
1154
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001155void Assembler::lwz(Register dst, const MemOperand& src) {
1156 DCHECK(!src.ra_.is(r0));
1157 d_form(LWZ, dst, src.ra(), src.offset(), true);
1158}
1159
1160
1161void Assembler::lwzu(Register dst, const MemOperand& src) {
1162 DCHECK(!src.ra_.is(r0));
1163 d_form(LWZU, dst, src.ra(), src.offset(), true);
1164}
1165
1166
1167void Assembler::lwzx(Register rt, const MemOperand& src) {
1168 Register ra = src.ra();
1169 Register rb = src.rb();
1170 DCHECK(!ra.is(r0));
1171 emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1172 LeaveRC);
1173}
1174
1175
1176void Assembler::lwzux(Register rt, const MemOperand& src) {
1177 Register ra = src.ra();
1178 Register rb = src.rb();
1179 DCHECK(!ra.is(r0));
1180 emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1181 LeaveRC);
1182}
1183
1184
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001185void Assembler::lha(Register dst, const MemOperand& src) {
1186 DCHECK(!src.ra_.is(r0));
1187 d_form(LHA, dst, src.ra(), src.offset(), true);
1188}
1189
1190
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001191void Assembler::lwa(Register dst, const MemOperand& src) {
1192#if V8_TARGET_ARCH_PPC64
1193 int offset = src.offset();
1194 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001195 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001196 offset = kImm16Mask & offset;
1197 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
1198#else
1199 lwz(dst, src);
1200#endif
1201}
1202
1203
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001204void Assembler::lwax(Register rt, const MemOperand& src) {
1205#if V8_TARGET_ARCH_PPC64
1206 Register ra = src.ra();
1207 Register rb = src.rb();
1208 DCHECK(!ra.is(r0));
1209 emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
1210#else
1211 lwzx(rt, src);
1212#endif
1213}
1214
1215
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001216void Assembler::stb(Register dst, const MemOperand& src) {
1217 DCHECK(!src.ra_.is(r0));
1218 d_form(STB, dst, src.ra(), src.offset(), true);
1219}
1220
1221
1222void Assembler::stbx(Register rs, const MemOperand& src) {
1223 Register ra = src.ra();
1224 Register rb = src.rb();
1225 DCHECK(!ra.is(r0));
1226 emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1227 LeaveRC);
1228}
1229
1230
1231void Assembler::stbux(Register rs, const MemOperand& src) {
1232 Register ra = src.ra();
1233 Register rb = src.rb();
1234 DCHECK(!ra.is(r0));
1235 emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1236 LeaveRC);
1237}
1238
1239
1240void Assembler::sth(Register dst, const MemOperand& src) {
1241 DCHECK(!src.ra_.is(r0));
1242 d_form(STH, dst, src.ra(), src.offset(), true);
1243}
1244
1245
1246void Assembler::sthx(Register rs, const MemOperand& src) {
1247 Register ra = src.ra();
1248 Register rb = src.rb();
1249 DCHECK(!ra.is(r0));
1250 emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1251 LeaveRC);
1252}
1253
1254
1255void Assembler::sthux(Register rs, const MemOperand& src) {
1256 Register ra = src.ra();
1257 Register rb = src.rb();
1258 DCHECK(!ra.is(r0));
1259 emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1260 LeaveRC);
1261}
1262
1263
1264void Assembler::stw(Register dst, const MemOperand& src) {
1265 DCHECK(!src.ra_.is(r0));
1266 d_form(STW, dst, src.ra(), src.offset(), true);
1267}
1268
1269
1270void Assembler::stwu(Register dst, const MemOperand& src) {
1271 DCHECK(!src.ra_.is(r0));
1272 d_form(STWU, dst, src.ra(), src.offset(), true);
1273}
1274
1275
1276void Assembler::stwx(Register rs, const MemOperand& src) {
1277 Register ra = src.ra();
1278 Register rb = src.rb();
1279 DCHECK(!ra.is(r0));
1280 emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1281 LeaveRC);
1282}
1283
1284
1285void Assembler::stwux(Register rs, const MemOperand& src) {
1286 Register ra = src.ra();
1287 Register rb = src.rb();
1288 DCHECK(!ra.is(r0));
1289 emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1290 LeaveRC);
1291}
1292
1293
1294void Assembler::extsb(Register rs, Register ra, RCBit rc) {
1295 emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
1296}
1297
1298
1299void Assembler::extsh(Register rs, Register ra, RCBit rc) {
1300 emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
1301}
1302
1303
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001304void Assembler::extsw(Register rs, Register ra, RCBit rc) {
1305#if V8_TARGET_ARCH_PPC64
1306 emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
1307#else
1308 // nop on 32-bit
1309 DCHECK(rs.is(ra) && rc == LeaveRC);
1310#endif
1311}
1312
1313
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001314void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1315 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1316}
1317
1318
1319void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
1320 x_form(EXT2 | ANDCX, dst, src1, src2, rc);
1321}
1322
1323
1324#if V8_TARGET_ARCH_PPC64
1325// 64bit specific instructions
1326void Assembler::ld(Register rd, const MemOperand& src) {
1327 int offset = src.offset();
1328 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001329 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001330 offset = kImm16Mask & offset;
1331 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1332}
1333
1334
1335void Assembler::ldx(Register rd, const MemOperand& src) {
1336 Register ra = src.ra();
1337 Register rb = src.rb();
1338 DCHECK(!ra.is(r0));
1339 emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1340}
1341
1342
1343void Assembler::ldu(Register rd, const MemOperand& src) {
1344 int offset = src.offset();
1345 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001346 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001347 offset = kImm16Mask & offset;
1348 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1349}
1350
1351
1352void Assembler::ldux(Register rd, const MemOperand& src) {
1353 Register ra = src.ra();
1354 Register rb = src.rb();
1355 DCHECK(!ra.is(r0));
1356 emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1357}
1358
1359
1360void Assembler::std(Register rs, const MemOperand& src) {
1361 int offset = src.offset();
1362 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001363 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001364 offset = kImm16Mask & offset;
1365 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1366}
1367
1368
1369void Assembler::stdx(Register rs, const MemOperand& src) {
1370 Register ra = src.ra();
1371 Register rb = src.rb();
1372 DCHECK(!ra.is(r0));
1373 emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1374}
1375
1376
1377void Assembler::stdu(Register rs, const MemOperand& src) {
1378 int offset = src.offset();
1379 DCHECK(!src.ra_.is(r0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001380 CHECK(!(offset & 3) && is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001381 offset = kImm16Mask & offset;
1382 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1383}
1384
1385
1386void Assembler::stdux(Register rs, const MemOperand& src) {
1387 Register ra = src.ra();
1388 Register rb = src.rb();
1389 DCHECK(!ra.is(r0));
1390 emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1391}
1392
1393
1394void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1395 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1396}
1397
1398
1399void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1400 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1401}
1402
1403
1404void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1405 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1406}
1407
1408
1409void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1410 md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1411}
1412
1413
1414void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1415 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1416 rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
1417}
1418
1419
1420void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1421 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1422 rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
1423}
1424
1425
1426void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1427 RCBit rc) {
1428 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1429 rldicr(dst, src, 0, 63 - val.imm_, rc);
1430}
1431
1432
1433void Assembler::clrldi(Register dst, Register src, const Operand& val,
1434 RCBit rc) {
1435 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1436 rldicl(dst, src, 0, val.imm_, rc);
1437}
1438
1439
1440void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1441 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1442}
1443
1444
1445void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1446 int sh0_4 = sh & 0x1f;
1447 int sh5 = (sh >> 5) & 0x1;
1448
1449 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1450 sh5 * B1 | r);
1451}
1452
1453
1454void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
1455 x_form(EXT2 | SRDX, dst, src1, src2, r);
1456}
1457
1458
1459void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
1460 x_form(EXT2 | SLDX, dst, src1, src2, r);
1461}
1462
1463
1464void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
1465 x_form(EXT2 | SRAD, ra, rs, rb, r);
1466}
1467
1468
1469void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1470 rldcl(ra, rs, rb, 0, r);
1471}
1472
1473
1474void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1475 rldicl(ra, rs, sh, 0, r);
1476}
1477
1478
1479void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1480 rldicl(ra, rs, 64 - sh, 0, r);
1481}
1482
1483
1484void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
1485 x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
1486}
1487
1488
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001489void Assembler::popcntd(Register ra, Register rs) {
1490 emit(EXT2 | POPCNTD | rs.code() * B21 | ra.code() * B16);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001491}
1492
1493
1494void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1495 RCBit r) {
1496 xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1497}
1498
1499
1500void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1501 RCBit r) {
1502 xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1503}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001504
1505
1506void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1507 RCBit r) {
1508 xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1509}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001510#endif
1511
1512
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001513// Function descriptor for AIX.
1514// Code address skips the function descriptor "header".
1515// TOC and static chain are ignored and set to 0.
1516void Assembler::function_descriptor() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001517 if (ABI_USES_FUNCTION_DESCRIPTORS) {
1518 Label instructions;
1519 DCHECK(pc_offset() == 0);
1520 emit_label_addr(&instructions);
1521 dp(0);
1522 dp(0);
1523 bind(&instructions);
1524 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001525}
1526
1527
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001528int Assembler::instructions_required_for_mov(Register dst,
1529 const Operand& src) const {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001530 bool canOptimize =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001531 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1532 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1533 if (ConstantPoolAccessIsInOverflow()) {
1534 return kMovInstructionsConstantPool + 1;
1535 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001536 return kMovInstructionsConstantPool;
1537 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001538 DCHECK(!canOptimize);
1539 return kMovInstructionsNoConstantPool;
1540}
1541
1542
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001543bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001544 bool canOptimize) const {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001545 if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001546 // If there is no constant pool available, we must use a mov
1547 // immediate sequence.
1548 return false;
1549 }
1550
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001551 intptr_t value = src.immediate();
1552#if V8_TARGET_ARCH_PPC64
1553 bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
1554#else
1555 bool allowOverflow = !(canOptimize || dst.is(r0));
1556#endif
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001557 if (canOptimize && is_int16(value)) {
1558 // Prefer a single-instruction load-immediate.
1559 return false;
1560 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001561 if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1562 // Prefer non-relocatable two-instruction bitwise-mov32 over
1563 // overflow sequence.
1564 return false;
1565 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001566
1567 return true;
1568}
1569
1570
1571void Assembler::EnsureSpaceFor(int space_needed) {
1572 if (buffer_space() <= (kGap + space_needed)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001573 GrowBuffer(space_needed);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001574 }
1575}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001576
1577
1578bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1579 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1580 if (assembler != NULL && assembler->predictable_code_size()) return true;
1581 return assembler->serializer_enabled();
1582 } else if (RelocInfo::IsNone(rmode_)) {
1583 return false;
1584 }
1585 return true;
1586}
1587
1588
1589// Primarily used for loading constants
1590// This should really move to be in macro-assembler as it
1591// is really a pseudo instruction
1592// Some usages of this intend for a FIXED_SEQUENCE to be used
1593// Todo - break this dependency so we can optimize mov() in general
1594// and only use the generic version when we require a fixed sequence
1595void Assembler::mov(Register dst, const Operand& src) {
1596 intptr_t value = src.immediate();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001597 bool relocatable = src.must_output_reloc_info(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001598 bool canOptimize;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001599
1600 canOptimize =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001601 !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001602
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001603 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1604 DCHECK(is_constant_pool_available());
1605 if (relocatable) {
1606 RecordRelocInfo(src.rmode_);
1607 }
1608 ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001609#if V8_TARGET_ARCH_PPC64
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001610 if (access == ConstantPoolEntry::OVERFLOWED) {
1611 addis(dst, kConstantPoolRegister, Operand::Zero());
1612 ld(dst, MemOperand(dst, 0));
1613 } else {
1614 ld(dst, MemOperand(kConstantPoolRegister, 0));
1615 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001616#else
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001617 if (access == ConstantPoolEntry::OVERFLOWED) {
1618 addis(dst, kConstantPoolRegister, Operand::Zero());
1619 lwz(dst, MemOperand(dst, 0));
1620 } else {
1621 lwz(dst, MemOperand(kConstantPoolRegister, 0));
1622 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001623#endif
1624 return;
1625 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001626
1627 if (canOptimize) {
1628 if (is_int16(value)) {
1629 li(dst, Operand(value));
1630 } else {
1631 uint16_t u16;
1632#if V8_TARGET_ARCH_PPC64
1633 if (is_int32(value)) {
1634#endif
1635 lis(dst, Operand(value >> 16));
1636#if V8_TARGET_ARCH_PPC64
1637 } else {
1638 if (is_int48(value)) {
1639 li(dst, Operand(value >> 32));
1640 } else {
1641 lis(dst, Operand(value >> 48));
1642 u16 = ((value >> 32) & 0xffff);
1643 if (u16) {
1644 ori(dst, dst, Operand(u16));
1645 }
1646 }
1647 sldi(dst, dst, Operand(32));
1648 u16 = ((value >> 16) & 0xffff);
1649 if (u16) {
1650 oris(dst, dst, Operand(u16));
1651 }
1652 }
1653#endif
1654 u16 = (value & 0xffff);
1655 if (u16) {
1656 ori(dst, dst, Operand(u16));
1657 }
1658 }
1659 return;
1660 }
1661
1662 DCHECK(!canOptimize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001663 if (relocatable) {
1664 RecordRelocInfo(src.rmode_);
1665 }
1666 bitwise_mov(dst, value);
1667}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001668
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001669
1670void Assembler::bitwise_mov(Register dst, intptr_t value) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001671 BlockTrampolinePoolScope block_trampoline_pool(this);
1672#if V8_TARGET_ARCH_PPC64
1673 int32_t hi_32 = static_cast<int32_t>(value >> 32);
1674 int32_t lo_32 = static_cast<int32_t>(value);
1675 int hi_word = static_cast<int>(hi_32 >> 16);
1676 int lo_word = static_cast<int>(hi_32 & 0xffff);
1677 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1678 ori(dst, dst, Operand(lo_word));
1679 sldi(dst, dst, Operand(32));
1680 hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
1681 lo_word = static_cast<int>(lo_32 & 0xffff);
1682 oris(dst, dst, Operand(hi_word));
1683 ori(dst, dst, Operand(lo_word));
1684#else
1685 int hi_word = static_cast<int>(value >> 16);
1686 int lo_word = static_cast<int>(value & 0xffff);
1687 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1688 ori(dst, dst, Operand(lo_word));
1689#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001690}
1691
1692
1693void Assembler::bitwise_mov32(Register dst, int32_t value) {
1694 BlockTrampolinePoolScope block_trampoline_pool(this);
1695 int hi_word = static_cast<int>(value >> 16);
1696 int lo_word = static_cast<int>(value & 0xffff);
1697 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1698 ori(dst, dst, Operand(lo_word));
1699}
1700
1701
1702void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1703 BlockTrampolinePoolScope block_trampoline_pool(this);
1704 if (is_int16(value)) {
1705 addi(dst, src, Operand(value));
1706 nop();
1707 } else {
1708 int hi_word = static_cast<int>(value >> 16);
1709 int lo_word = static_cast<int>(value & 0xffff);
1710 if (lo_word & 0x8000) hi_word++;
1711 addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1712 addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001713 }
1714}
1715
1716
1717void Assembler::mov_label_offset(Register dst, Label* label) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001718 int position = link(label);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001719 if (label->is_bound()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001720 // Load the position of the label relative to the generated code object.
1721 mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001722 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001723 // Encode internal reference to unbound label. We use a dummy opcode
1724 // such that it won't collide with any opcode that might appear in the
1725 // label's chain. Encode the destination register in the 2nd instruction.
1726 int link = position - pc_offset();
1727 DCHECK_EQ(0, link & 3);
1728 link >>= 2;
1729 DCHECK(is_int26(link));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001730
1731 // When the label is bound, these instructions will be patched
1732 // with a 2 instruction mov sequence that will load the
1733 // destination register with the position of the label from the
1734 // beginning of the code.
1735 //
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001736 // target_at extracts the link and target_at_put patches the instructions.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001737 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001738 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1739 emit(dst.code());
1740 }
1741}
1742
1743
1744void Assembler::add_label_offset(Register dst, Register base, Label* label,
1745 int delta) {
1746 int position = link(label);
1747 if (label->is_bound()) {
1748 // dst = base + position + delta
1749 position += delta;
1750 bitwise_add32(dst, base, position);
1751 } else {
1752 // Encode internal reference to unbound label. We use a dummy opcode
1753 // such that it won't collide with any opcode that might appear in the
1754 // label's chain. Encode the operands in the 2nd instruction.
1755 int link = position - pc_offset();
1756 DCHECK_EQ(0, link & 3);
1757 link >>= 2;
1758 DCHECK(is_int26(link));
1759 DCHECK(is_int16(delta));
1760
1761 BlockTrampolinePoolScope block_trampoline_pool(this);
1762 emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
1763 emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
1764 }
1765}
1766
1767
1768void Assembler::mov_label_addr(Register dst, Label* label) {
1769 CheckBuffer();
1770 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1771 int position = link(label);
1772 if (label->is_bound()) {
1773 // Keep internal references relative until EmitRelocations.
1774 bitwise_mov(dst, position);
1775 } else {
1776 // Encode internal reference to unbound label. We use a dummy opcode
1777 // such that it won't collide with any opcode that might appear in the
1778 // label's chain. Encode the destination register in the 2nd instruction.
1779 int link = position - pc_offset();
1780 DCHECK_EQ(0, link & 3);
1781 link >>= 2;
1782 DCHECK(is_int26(link));
1783
1784 // When the label is bound, these instructions will be patched
1785 // with a multi-instruction mov sequence that will load the
1786 // destination register with the address of the label.
1787 //
1788 // target_at extracts the link and target_at_put patches the instructions.
1789 BlockTrampolinePoolScope block_trampoline_pool(this);
1790 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1791 emit(dst.code());
1792 DCHECK(kMovInstructionsNoConstantPool >= 2);
1793 for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
1794 }
1795}
1796
1797
1798void Assembler::emit_label_addr(Label* label) {
1799 CheckBuffer();
1800 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1801 int position = link(label);
1802 if (label->is_bound()) {
1803 // Keep internal references relative until EmitRelocations.
1804 dp(position);
1805 } else {
1806 // Encode internal reference to unbound label. We use a dummy opcode
1807 // such that it won't collide with any opcode that might appear in the
1808 // label's chain.
1809 int link = position - pc_offset();
1810 DCHECK_EQ(0, link & 3);
1811 link >>= 2;
1812 DCHECK(is_int26(link));
1813
1814 // When the label is bound, the instruction(s) will be patched
1815 // as a jump table entry containing the label address. target_at extracts
1816 // the link and target_at_put patches the instruction(s).
1817 BlockTrampolinePoolScope block_trampoline_pool(this);
1818 emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1819#if V8_TARGET_ARCH_PPC64
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001820 nop();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001821#endif
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001822 }
1823}
1824
1825
1826// Special register instructions
1827void Assembler::crxor(int bt, int ba, int bb) {
1828 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1829}
1830
1831
1832void Assembler::creqv(int bt, int ba, int bb) {
1833 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1834}
1835
1836
1837void Assembler::mflr(Register dst) {
1838 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
1839}
1840
1841
1842void Assembler::mtlr(Register src) {
1843 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
1844}
1845
1846
1847void Assembler::mtctr(Register src) {
1848 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
1849}
1850
1851
1852void Assembler::mtxer(Register src) {
1853 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1854}
1855
1856
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001857void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1858 DCHECK(static_cast<int>(bit) < 32);
1859 int bf = cr.code();
1860 int bfa = bit / CRWIDTH;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001861 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1862}
1863
1864
1865void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1866
1867
1868#if V8_TARGET_ARCH_PPC64
1869void Assembler::mffprd(Register dst, DoubleRegister src) {
1870 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1871}
1872
1873
1874void Assembler::mffprwz(Register dst, DoubleRegister src) {
1875 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1876}
1877
1878
1879void Assembler::mtfprd(DoubleRegister dst, Register src) {
1880 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1881}
1882
1883
1884void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1885 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1886}
1887
1888
1889void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1890 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1891}
1892#endif
1893
1894
1895// Exception-generating instructions and debugging support.
1896// Stops with a non-negative code less than kNumOfWatchedStops support
1897// enabling/disabling and a counter feature. See simulator-ppc.h .
1898void Assembler::stop(const char* msg, Condition cond, int32_t code,
1899 CRegister cr) {
1900 if (cond != al) {
1901 Label skip;
1902 b(NegateCondition(cond), &skip, cr);
1903 bkpt(0);
1904 bind(&skip);
1905 } else {
1906 bkpt(0);
1907 }
1908}
1909
1910
1911void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
1912
1913
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001914void Assembler::dcbf(Register ra, Register rb) {
1915 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1916}
1917
1918
1919void Assembler::sync() { emit(EXT2 | SYNC); }
1920
1921
1922void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1923
1924
1925void Assembler::icbi(Register ra, Register rb) {
1926 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1927}
1928
1929
1930void Assembler::isync() { emit(EXT1 | ISYNC); }
1931
1932
1933// Floating point support
1934
1935void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1936 int offset = src.offset();
1937 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001938 DCHECK(!ra.is(r0));
1939 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001940 int imm16 = offset & kImm16Mask;
1941 // could be x_form instruction with some casting magic
1942 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1943}
1944
1945
1946void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1947 int offset = src.offset();
1948 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001949 DCHECK(!ra.is(r0));
1950 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001951 int imm16 = offset & kImm16Mask;
1952 // could be x_form instruction with some casting magic
1953 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1954}
1955
1956
1957void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
1958 Register ra = src.ra();
1959 Register rb = src.rb();
1960 DCHECK(!ra.is(r0));
1961 emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1962 LeaveRC);
1963}
1964
1965
1966void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
1967 Register ra = src.ra();
1968 Register rb = src.rb();
1969 DCHECK(!ra.is(r0));
1970 emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1971 LeaveRC);
1972}
1973
1974
1975void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1976 int offset = src.offset();
1977 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001978 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001979 DCHECK(!ra.is(r0));
1980 int imm16 = offset & kImm16Mask;
1981 // could be x_form instruction with some casting magic
1982 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1983}
1984
1985
1986void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1987 int offset = src.offset();
1988 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001989 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001990 DCHECK(!ra.is(r0));
1991 int imm16 = offset & kImm16Mask;
1992 // could be x_form instruction with some casting magic
1993 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1994}
1995
1996
1997void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
1998 Register ra = src.ra();
1999 Register rb = src.rb();
2000 DCHECK(!ra.is(r0));
2001 emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2002 LeaveRC);
2003}
2004
2005
2006void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
2007 Register ra = src.ra();
2008 Register rb = src.rb();
2009 DCHECK(!ra.is(r0));
2010 emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2011 LeaveRC);
2012}
2013
2014
2015void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
2016 int offset = src.offset();
2017 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002018 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002019 DCHECK(!ra.is(r0));
2020 int imm16 = offset & kImm16Mask;
2021 // could be x_form instruction with some casting magic
2022 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
2023}
2024
2025
2026void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
2027 int offset = src.offset();
2028 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002029 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002030 DCHECK(!ra.is(r0));
2031 int imm16 = offset & kImm16Mask;
2032 // could be x_form instruction with some casting magic
2033 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
2034}
2035
2036
2037void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
2038 Register ra = src.ra();
2039 Register rb = src.rb();
2040 DCHECK(!ra.is(r0));
2041 emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2042 LeaveRC);
2043}
2044
2045
2046void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
2047 Register ra = src.ra();
2048 Register rb = src.rb();
2049 DCHECK(!ra.is(r0));
2050 emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2051 LeaveRC);
2052}
2053
2054
2055void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
2056 int offset = src.offset();
2057 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002058 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002059 DCHECK(!ra.is(r0));
2060 int imm16 = offset & kImm16Mask;
2061 // could be x_form instruction with some casting magic
2062 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
2063}
2064
2065
2066void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
2067 int offset = src.offset();
2068 Register ra = src.ra();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002069 CHECK(is_int16(offset));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002070 DCHECK(!ra.is(r0));
2071 int imm16 = offset & kImm16Mask;
2072 // could be x_form instruction with some casting magic
2073 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
2074}
2075
2076
2077void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
2078 Register ra = src.ra();
2079 Register rb = src.rb();
2080 DCHECK(!ra.is(r0));
2081 emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2082 LeaveRC);
2083}
2084
2085
2086void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
2087 Register ra = src.ra();
2088 Register rb = src.rb();
2089 DCHECK(!ra.is(r0));
2090 emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
2091 LeaveRC);
2092}
2093
2094
2095void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
2096 const DoubleRegister frb, RCBit rc) {
2097 a_form(EXT4 | FSUB, frt, fra, frb, rc);
2098}
2099
2100
2101void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
2102 const DoubleRegister frb, RCBit rc) {
2103 a_form(EXT4 | FADD, frt, fra, frb, rc);
2104}
2105
2106
2107void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
2108 const DoubleRegister frc, RCBit rc) {
2109 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
2110 rc);
2111}
2112
2113
2114void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
2115 const DoubleRegister frb, RCBit rc) {
2116 a_form(EXT4 | FDIV, frt, fra, frb, rc);
2117}
2118
2119
2120void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
2121 CRegister cr) {
2122 DCHECK(cr.code() >= 0 && cr.code() <= 7);
2123 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
2124}
2125
2126
2127void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
2128 RCBit rc) {
2129 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
2130}
2131
2132
2133void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
2134 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
2135}
2136
2137
2138void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
2139 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
2140}
2141
2142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002143void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
2144 RCBit rc) {
2145 emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
2146}
2147
2148
2149void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
2150 RCBit rc) {
2151 emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
2152}
2153
2154
2155void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
2156 RCBit rc) {
2157 emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
2158}
2159
2160
2161void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
2162 RCBit rc) {
2163 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002164}
2165
2166
2167void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
2168 RCBit rc) {
2169 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
2170}
2171
2172
2173void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
2174 RCBit rc) {
2175 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
2176}
2177
2178
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002179void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
2180 RCBit rc) {
2181 emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
2182}
2183
2184
2185void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
2186 RCBit rc) {
2187 emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
2188}
2189
2190
2191void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
2192 RCBit rc) {
2193 emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
2194}
2195
2196
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002197void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
2198 RCBit rc) {
2199 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
2200}
2201
2202
2203void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
2204 RCBit rc) {
2205 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
2206}
2207
2208
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002209void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
2210 RCBit rc) {
2211 emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
2212}
2213
2214
2215void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
2216 RCBit rc) {
2217 emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
2218}
2219
2220
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002221void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
2222 const DoubleRegister frc, const DoubleRegister frb,
2223 RCBit rc) {
2224 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2225 frc.code() * B6 | rc);
2226}
2227
2228
2229void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
2230 RCBit rc) {
2231 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
2232}
2233
2234
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002235void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
2236 DCHECK(static_cast<int>(bit) < 32);
2237 int bt = bit;
2238 emit(EXT4 | MTFSB0 | bt * B21 | rc);
2239}
2240
2241
2242void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
2243 DCHECK(static_cast<int>(bit) < 32);
2244 int bt = bit;
2245 emit(EXT4 | MTFSB1 | bt * B21 | rc);
2246}
2247
2248
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002249void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
2250 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
2251}
2252
2253
2254void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
2255 emit(EXT4 | MFFS | frt.code() * B21 | rc);
2256}
2257
2258
2259void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
2260 RCBit rc) {
2261 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
2262}
2263
2264
2265void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
2266 RCBit rc) {
2267 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
2268}
2269
2270
2271void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
2272 RCBit rc) {
2273 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
2274}
2275
2276
2277void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
2278 const DoubleRegister frc, const DoubleRegister frb,
2279 RCBit rc) {
2280 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2281 frc.code() * B6 | rc);
2282}
2283
2284
2285void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
2286 const DoubleRegister frc, const DoubleRegister frb,
2287 RCBit rc) {
2288 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2289 frc.code() * B6 | rc);
2290}
2291
2292
2293// Pseudo instructions.
2294void Assembler::nop(int type) {
2295 Register reg = r0;
2296 switch (type) {
2297 case NON_MARKING_NOP:
2298 reg = r0;
2299 break;
2300 case GROUP_ENDING_NOP:
2301 reg = r2;
2302 break;
2303 case DEBUG_BREAK_NOP:
2304 reg = r3;
2305 break;
2306 default:
2307 UNIMPLEMENTED();
2308 }
2309
2310 ori(reg, reg, Operand::Zero());
2311}
2312
2313
2314bool Assembler::IsNop(Instr instr, int type) {
2315 int reg = 0;
2316 switch (type) {
2317 case NON_MARKING_NOP:
2318 reg = 0;
2319 break;
2320 case GROUP_ENDING_NOP:
2321 reg = 2;
2322 break;
2323 case DEBUG_BREAK_NOP:
2324 reg = 3;
2325 break;
2326 default:
2327 UNIMPLEMENTED();
2328 }
2329 return instr == (ORI | reg * B21 | reg * B16);
2330}
2331
2332
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002333void Assembler::GrowBuffer(int needed) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002334 if (!own_buffer_) FATAL("external code buffer is too small");
2335
2336 // Compute new buffer size.
2337 CodeDesc desc; // the new buffer
2338 if (buffer_size_ < 4 * KB) {
2339 desc.buffer_size = 4 * KB;
2340 } else if (buffer_size_ < 1 * MB) {
2341 desc.buffer_size = 2 * buffer_size_;
2342 } else {
2343 desc.buffer_size = buffer_size_ + 1 * MB;
2344 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002345 int space = buffer_space() + (desc.buffer_size - buffer_size_);
2346 if (space < needed) {
2347 desc.buffer_size += needed - space;
2348 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002349 CHECK_GT(desc.buffer_size, 0); // no overflow
2350
2351 // Set up new buffer.
2352 desc.buffer = NewArray<byte>(desc.buffer_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002353 desc.origin = this;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002354
2355 desc.instr_size = pc_offset();
2356 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2357
2358 // Copy the data.
2359 intptr_t pc_delta = desc.buffer - buffer_;
2360 intptr_t rc_delta =
2361 (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2362 memmove(desc.buffer, buffer_, desc.instr_size);
2363 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2364 desc.reloc_size);
2365
2366 // Switch buffers.
2367 DeleteArray(buffer_);
2368 buffer_ = desc.buffer;
2369 buffer_size_ = desc.buffer_size;
2370 pc_ += pc_delta;
2371 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2372 reloc_info_writer.last_pc() + pc_delta);
2373
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002374 // Nothing else to do here since we keep all internal references and
2375 // deferred relocation entries relative to the buffer (until
2376 // EmitRelocations).
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002377}
2378
2379
2380void Assembler::db(uint8_t data) {
2381 CheckBuffer();
2382 *reinterpret_cast<uint8_t*>(pc_) = data;
2383 pc_ += sizeof(uint8_t);
2384}
2385
2386
2387void Assembler::dd(uint32_t data) {
2388 CheckBuffer();
2389 *reinterpret_cast<uint32_t*>(pc_) = data;
2390 pc_ += sizeof(uint32_t);
2391}
2392
2393
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002394void Assembler::dq(uint64_t value) {
2395 CheckBuffer();
2396 *reinterpret_cast<uint64_t*>(pc_) = value;
2397 pc_ += sizeof(uint64_t);
2398}
2399
2400
2401void Assembler::dp(uintptr_t data) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002402 CheckBuffer();
2403 *reinterpret_cast<uintptr_t*>(pc_) = data;
2404 pc_ += sizeof(uintptr_t);
2405}
2406
2407
2408void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002409 if (RelocInfo::IsNone(rmode) ||
2410 // Don't record external references unless the heap will be serialized.
2411 (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
2412 !emit_debug_code())) {
2413 return;
2414 }
2415 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2416 data = RecordedAstId().ToInt();
2417 ClearRecordedAstId();
2418 }
2419 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2420 relocations_.push_back(rinfo);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002421}
2422
2423
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002424void Assembler::EmitRelocations() {
2425 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2426
2427 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2428 it != relocations_.end(); it++) {
2429 RelocInfo::Mode rmode = it->rmode();
2430 Address pc = buffer_ + it->position();
2431 Code* code = NULL;
2432 RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
2433
2434 // Fix up internal references now that they are guaranteed to be bound.
2435 if (RelocInfo::IsInternalReference(rmode)) {
2436 // Jump table entry
2437 intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
2438 Memory::Address_at(pc) = buffer_ + pos;
2439 } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
2440 // mov sequence
2441 intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
2442 set_target_address_at(isolate(), pc, code, buffer_ + pos,
2443 SKIP_ICACHE_FLUSH);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002444 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002445
2446 reloc_info_writer.Write(&rinfo);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002447 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002448
2449 reloc_info_writer.Finish();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002450}
2451
2452
2453void Assembler::BlockTrampolinePoolFor(int instructions) {
2454 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2455}
2456
2457
2458void Assembler::CheckTrampolinePool() {
2459 // Some small sequences of instructions must not be broken up by the
2460 // insertion of a trampoline pool; such sequences are protected by setting
2461 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2462 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2463 // are blocked by trampoline_pool_blocked_nesting_.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002464 if (trampoline_pool_blocked_nesting_ > 0) return;
2465 if (pc_offset() < no_trampoline_pool_before_) {
2466 next_trampoline_check_ = no_trampoline_pool_before_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002467 return;
2468 }
2469
2470 DCHECK(!trampoline_emitted_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002471 if (tracked_branch_count_ > 0) {
2472 int size = tracked_branch_count_ * kInstrSize;
2473
2474 // As we are only going to emit trampoline once, we need to prevent any
2475 // further emission.
2476 trampoline_emitted_ = true;
2477 next_trampoline_check_ = kMaxInt;
2478
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002479 // First we emit jump, then we emit trampoline pool.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002480 b(size + kInstrSize, LeaveLK);
2481 for (int i = size; i > 0; i -= kInstrSize) {
2482 b(i, LeaveLK);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002483 }
2484
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002485 trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002486 }
2487}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002488
2489
2490} // namespace internal
2491} // namespace v8
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002492
2493#endif // V8_TARGET_ARCH_PPC