blob: 7e3f00572930be3737d52c9bfda932f36e01330d [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief R600 Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15#include "R600InstrInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000016#include "AMDGPUSubtarget.h"
Chandler Carruthbe810232013-01-02 10:22:59 +000017#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000018#include "R600Defines.h"
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000019#include "R600MachineFunctionInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000020#include "R600RegisterInfo.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000022#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000024
25#define GET_INSTRINFO_CTOR
26#include "AMDGPUGenDFAPacketizer.inc"
27
28using namespace llvm;
29
30R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
31 : AMDGPUInstrInfo(tm),
32 RI(tm, *this)
33 { }
34
35const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
36 return RI;
37}
38
39bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
40 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
41}
42
43bool R600InstrInfo::isVector(const MachineInstr &MI) const {
44 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
45}
46
47void
48R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
49 MachineBasicBlock::iterator MI, DebugLoc DL,
50 unsigned DestReg, unsigned SrcReg,
51 bool KillSrc) const {
52 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
53 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
54 for (unsigned I = 0; I < 4; I++) {
55 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
56 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
57 RI.getSubReg(DestReg, SubRegIndex),
58 RI.getSubReg(SrcReg, SubRegIndex))
59 .addReg(DestReg,
60 RegState::Define | RegState::Implicit);
61 }
62 } else {
63
64 // We can't copy vec4 registers
65 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
66 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
67
68 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
69 DestReg, SrcReg);
70 NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
71 .setIsKill(KillSrc);
72 }
73}
74
75MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
76 unsigned DstReg, int64_t Imm) const {
77 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
NAKAMURA Takumi2a0b40f2012-12-20 00:22:11 +000078 MachineInstrBuilder MIB(*MF, MI);
79 MIB.addReg(DstReg, RegState::Define);
80 MIB.addReg(AMDGPU::ALU_LITERAL_X);
81 MIB.addImm(Imm);
82 MIB.addReg(0); // PREDICATE_BIT
Tom Stellard75aadc22012-12-11 21:25:42 +000083
84 return MI;
85}
86
87unsigned R600InstrInfo::getIEQOpcode() const {
88 return AMDGPU::SETE_INT;
89}
90
91bool R600InstrInfo::isMov(unsigned Opcode) const {
92
93
94 switch(Opcode) {
95 default: return false;
96 case AMDGPU::MOV:
97 case AMDGPU::MOV_IMM_F32:
98 case AMDGPU::MOV_IMM_I32:
99 return true;
100 }
101}
102
103// Some instructions act as place holders to emulate operations that the GPU
104// hardware does automatically. This function can be used to check if
105// an opcode falls into this category.
106bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
107 switch (Opcode) {
108 default: return false;
109 case AMDGPU::RETURN:
Tom Stellard75aadc22012-12-11 21:25:42 +0000110 return true;
111 }
112}
113
114bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
115 switch(Opcode) {
116 default: return false;
117 case AMDGPU::DOT4_r600_pseudo:
118 case AMDGPU::DOT4_eg_pseudo:
119 return true;
120 }
121}
122
123bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
124 switch(Opcode) {
125 default: return false;
126 case AMDGPU::CUBE_r600_pseudo:
127 case AMDGPU::CUBE_r600_real:
128 case AMDGPU::CUBE_eg_pseudo:
129 case AMDGPU::CUBE_eg_real:
130 return true;
131 }
132}
133
134bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
135 unsigned TargetFlags = get(Opcode).TSFlags;
136
137 return ((TargetFlags & R600_InstFlag::OP1) |
138 (TargetFlags & R600_InstFlag::OP2) |
139 (TargetFlags & R600_InstFlag::OP3));
140}
141
142DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
143 const ScheduleDAG *DAG) const {
144 const InstrItineraryData *II = TM->getInstrItineraryData();
145 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
146}
147
148static bool
149isPredicateSetter(unsigned Opcode) {
150 switch (Opcode) {
151 case AMDGPU::PRED_X:
152 return true;
153 default:
154 return false;
155 }
156}
157
158static MachineInstr *
159findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
160 MachineBasicBlock::iterator I) {
161 while (I != MBB.begin()) {
162 --I;
163 MachineInstr *MI = I;
164 if (isPredicateSetter(MI->getOpcode()))
165 return MI;
166 }
167
168 return NULL;
169}
170
171bool
172R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
173 MachineBasicBlock *&TBB,
174 MachineBasicBlock *&FBB,
175 SmallVectorImpl<MachineOperand> &Cond,
176 bool AllowModify) const {
177 // Most of the following comes from the ARM implementation of AnalyzeBranch
178
179 // If the block has no terminators, it just falls into the block after it.
180 MachineBasicBlock::iterator I = MBB.end();
181 if (I == MBB.begin())
182 return false;
183 --I;
184 while (I->isDebugValue()) {
185 if (I == MBB.begin())
186 return false;
187 --I;
188 }
189 if (static_cast<MachineInstr *>(I)->getOpcode() != AMDGPU::JUMP) {
190 return false;
191 }
192
193 // Get the last instruction in the block.
194 MachineInstr *LastInst = I;
195
196 // If there is only one terminator instruction, process it.
197 unsigned LastOpc = LastInst->getOpcode();
198 if (I == MBB.begin() ||
199 static_cast<MachineInstr *>(--I)->getOpcode() != AMDGPU::JUMP) {
200 if (LastOpc == AMDGPU::JUMP) {
201 if(!isPredicated(LastInst)) {
202 TBB = LastInst->getOperand(0).getMBB();
203 return false;
204 } else {
205 MachineInstr *predSet = I;
206 while (!isPredicateSetter(predSet->getOpcode())) {
207 predSet = --I;
208 }
209 TBB = LastInst->getOperand(0).getMBB();
210 Cond.push_back(predSet->getOperand(1));
211 Cond.push_back(predSet->getOperand(2));
212 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
213 return false;
214 }
215 }
216 return true; // Can't handle indirect branch.
217 }
218
219 // Get the instruction before it if it is a terminator.
220 MachineInstr *SecondLastInst = I;
221 unsigned SecondLastOpc = SecondLastInst->getOpcode();
222
223 // If the block ends with a B and a Bcc, handle it.
224 if (SecondLastOpc == AMDGPU::JUMP &&
225 isPredicated(SecondLastInst) &&
226 LastOpc == AMDGPU::JUMP &&
227 !isPredicated(LastInst)) {
228 MachineInstr *predSet = --I;
229 while (!isPredicateSetter(predSet->getOpcode())) {
230 predSet = --I;
231 }
232 TBB = SecondLastInst->getOperand(0).getMBB();
233 FBB = LastInst->getOperand(0).getMBB();
234 Cond.push_back(predSet->getOperand(1));
235 Cond.push_back(predSet->getOperand(2));
236 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
237 return false;
238 }
239
240 // Otherwise, can't handle this.
241 return true;
242}
243
244int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
245 const MachineInstr *MI = op.getParent();
246
247 switch (MI->getDesc().OpInfo->RegClass) {
248 default: // FIXME: fallthrough??
249 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
250 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
251 };
252}
253
254unsigned
255R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
256 MachineBasicBlock *TBB,
257 MachineBasicBlock *FBB,
258 const SmallVectorImpl<MachineOperand> &Cond,
259 DebugLoc DL) const {
260 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
261
262 if (FBB == 0) {
263 if (Cond.empty()) {
264 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB).addReg(0);
265 return 1;
266 } else {
267 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
268 assert(PredSet && "No previous predicate !");
269 addFlag(PredSet, 0, MO_FLAG_PUSH);
270 PredSet->getOperand(2).setImm(Cond[1].getImm());
271
272 BuildMI(&MBB, DL, get(AMDGPU::JUMP))
273 .addMBB(TBB)
274 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
275 return 1;
276 }
277 } else {
278 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
279 assert(PredSet && "No previous predicate !");
280 addFlag(PredSet, 0, MO_FLAG_PUSH);
281 PredSet->getOperand(2).setImm(Cond[1].getImm());
282 BuildMI(&MBB, DL, get(AMDGPU::JUMP))
283 .addMBB(TBB)
284 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
285 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB).addReg(0);
286 return 2;
287 }
288}
289
290unsigned
291R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
292
293 // Note : we leave PRED* instructions there.
294 // They may be needed when predicating instructions.
295
296 MachineBasicBlock::iterator I = MBB.end();
297
298 if (I == MBB.begin()) {
299 return 0;
300 }
301 --I;
302 switch (I->getOpcode()) {
303 default:
304 return 0;
305 case AMDGPU::JUMP:
306 if (isPredicated(I)) {
307 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
308 clearFlag(predSet, 0, MO_FLAG_PUSH);
309 }
310 I->eraseFromParent();
311 break;
312 }
313 I = MBB.end();
314
315 if (I == MBB.begin()) {
316 return 1;
317 }
318 --I;
319 switch (I->getOpcode()) {
320 // FIXME: only one case??
321 default:
322 return 1;
323 case AMDGPU::JUMP:
324 if (isPredicated(I)) {
325 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
326 clearFlag(predSet, 0, MO_FLAG_PUSH);
327 }
328 I->eraseFromParent();
329 break;
330 }
331 return 2;
332}
333
334bool
335R600InstrInfo::isPredicated(const MachineInstr *MI) const {
336 int idx = MI->findFirstPredOperandIdx();
337 if (idx < 0)
338 return false;
339
340 unsigned Reg = MI->getOperand(idx).getReg();
341 switch (Reg) {
342 default: return false;
343 case AMDGPU::PRED_SEL_ONE:
344 case AMDGPU::PRED_SEL_ZERO:
345 case AMDGPU::PREDICATE_BIT:
346 return true;
347 }
348}
349
350bool
351R600InstrInfo::isPredicable(MachineInstr *MI) const {
352 // XXX: KILL* instructions can be predicated, but they must be the last
353 // instruction in a clause, so this means any instructions after them cannot
354 // be predicated. Until we have proper support for instruction clauses in the
355 // backend, we will mark KILL* instructions as unpredicable.
356
357 if (MI->getOpcode() == AMDGPU::KILLGT) {
358 return false;
359 } else {
360 return AMDGPUInstrInfo::isPredicable(MI);
361 }
362}
363
364
365bool
366R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
367 unsigned NumCyles,
368 unsigned ExtraPredCycles,
369 const BranchProbability &Probability) const{
370 return true;
371}
372
373bool
374R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
375 unsigned NumTCycles,
376 unsigned ExtraTCycles,
377 MachineBasicBlock &FMBB,
378 unsigned NumFCycles,
379 unsigned ExtraFCycles,
380 const BranchProbability &Probability) const {
381 return true;
382}
383
384bool
385R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
386 unsigned NumCyles,
387 const BranchProbability &Probability)
388 const {
389 return true;
390}
391
392bool
393R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
394 MachineBasicBlock &FMBB) const {
395 return false;
396}
397
398
399bool
400R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
401 MachineOperand &MO = Cond[1];
402 switch (MO.getImm()) {
403 case OPCODE_IS_ZERO_INT:
404 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
405 break;
406 case OPCODE_IS_NOT_ZERO_INT:
407 MO.setImm(OPCODE_IS_ZERO_INT);
408 break;
409 case OPCODE_IS_ZERO:
410 MO.setImm(OPCODE_IS_NOT_ZERO);
411 break;
412 case OPCODE_IS_NOT_ZERO:
413 MO.setImm(OPCODE_IS_ZERO);
414 break;
415 default:
416 return true;
417 }
418
419 MachineOperand &MO2 = Cond[2];
420 switch (MO2.getReg()) {
421 case AMDGPU::PRED_SEL_ZERO:
422 MO2.setReg(AMDGPU::PRED_SEL_ONE);
423 break;
424 case AMDGPU::PRED_SEL_ONE:
425 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
426 break;
427 default:
428 return true;
429 }
430 return false;
431}
432
433bool
434R600InstrInfo::DefinesPredicate(MachineInstr *MI,
435 std::vector<MachineOperand> &Pred) const {
436 return isPredicateSetter(MI->getOpcode());
437}
438
439
440bool
441R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
442 const SmallVectorImpl<MachineOperand> &Pred2) const {
443 return false;
444}
445
446
447bool
448R600InstrInfo::PredicateInstruction(MachineInstr *MI,
449 const SmallVectorImpl<MachineOperand> &Pred) const {
450 int PIdx = MI->findFirstPredOperandIdx();
451
452 if (PIdx != -1) {
453 MachineOperand &PMO = MI->getOperand(PIdx);
454 PMO.setReg(Pred[2].getReg());
NAKAMURA Takumi2a0b40f2012-12-20 00:22:11 +0000455 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
456 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000457 return true;
458 }
459
460 return false;
461}
462
463unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
464 const MachineInstr *MI,
465 unsigned *PredCost) const {
466 if (PredCost)
467 *PredCost = 2;
468 return 2;
469}
470
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000471int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
472 const MachineRegisterInfo &MRI = MF.getRegInfo();
473 const MachineFrameInfo *MFI = MF.getFrameInfo();
474 int Offset = 0;
475
476 if (MFI->getNumObjects() == 0) {
477 return -1;
478 }
479
480 if (MRI.livein_empty()) {
481 return 0;
482 }
483
484 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
485 LE = MRI.livein_end();
486 LI != LE; ++LI) {
487 Offset = std::max(Offset,
488 GET_REG_INDEX(RI.getEncodingValue(LI->first)));
489 }
490
491 return Offset + 1;
492}
493
494int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
495 int Offset = 0;
496 const MachineFrameInfo *MFI = MF.getFrameInfo();
497
498 // Variable sized objects are not supported
499 assert(!MFI->hasVarSizedObjects());
500
501 if (MFI->getNumObjects() == 0) {
502 return -1;
503 }
504
505 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
506
507 return getIndirectIndexBegin(MF) + Offset;
508}
509
510std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
511 const MachineFunction &MF) const {
512 const AMDGPUFrameLowering *TFL =
513 static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
514 std::vector<unsigned> Regs;
515
516 unsigned StackWidth = TFL->getStackWidth(MF);
517 int End = getIndirectIndexEnd(MF);
518
519 if (End == -1) {
520 return Regs;
521 }
522
523 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
524 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
525 Regs.push_back(SuperReg);
526 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
527 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
528 Regs.push_back(Reg);
529 }
530 }
531 return Regs;
532}
533
534unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
535 unsigned Channel) const {
536 // XXX: Remove when we support a stack width > 2
537 assert(Channel == 0);
538 return RegIndex;
539}
540
541const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
542 unsigned SourceReg) const {
543 return &AMDGPU::R600_TReg32RegClass;
544}
545
546const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
547 return &AMDGPU::TRegMemRegClass;
548}
549
550MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
551 MachineBasicBlock::iterator I,
552 unsigned ValueReg, unsigned Address,
553 unsigned OffsetReg) const {
554 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
555 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
556 AMDGPU::AR_X, OffsetReg);
557 setImmOperand(MOVA, R600Operands::WRITE, 0);
558
559 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
560 AddrReg, ValueReg)
561 .addReg(AMDGPU::AR_X, RegState::Implicit);
562 setImmOperand(Mov, R600Operands::DST_REL, 1);
563 return Mov;
564}
565
566MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
567 MachineBasicBlock::iterator I,
568 unsigned ValueReg, unsigned Address,
569 unsigned OffsetReg) const {
570 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
571 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
572 AMDGPU::AR_X,
573 OffsetReg);
574 setImmOperand(MOVA, R600Operands::WRITE, 0);
575 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
576 ValueReg,
577 AddrReg)
578 .addReg(AMDGPU::AR_X, RegState::Implicit);
579 setImmOperand(Mov, R600Operands::SRC0_REL, 1);
580
581 return Mov;
582}
583
584const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
585 return &AMDGPU::IndirectRegRegClass;
586}
587
588
Tom Stellard75aadc22012-12-11 21:25:42 +0000589MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
590 MachineBasicBlock::iterator I,
591 unsigned Opcode,
592 unsigned DstReg,
593 unsigned Src0Reg,
594 unsigned Src1Reg) const {
595 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
596 DstReg); // $dst
597
598 if (Src1Reg) {
599 MIB.addImm(0) // $update_exec_mask
600 .addImm(0); // $update_predicate
601 }
602 MIB.addImm(1) // $write
603 .addImm(0) // $omod
604 .addImm(0) // $dst_rel
605 .addImm(0) // $dst_clamp
606 .addReg(Src0Reg) // $src0
607 .addImm(0) // $src0_neg
608 .addImm(0) // $src0_rel
Tom Stellard365366f2013-01-23 02:09:06 +0000609 .addImm(0) // $src0_abs
610 .addImm(-1); // $src0_sel
Tom Stellard75aadc22012-12-11 21:25:42 +0000611
612 if (Src1Reg) {
613 MIB.addReg(Src1Reg) // $src1
614 .addImm(0) // $src1_neg
615 .addImm(0) // $src1_rel
Tom Stellard365366f2013-01-23 02:09:06 +0000616 .addImm(0) // $src1_abs
617 .addImm(-1); // $src1_sel
Tom Stellard75aadc22012-12-11 21:25:42 +0000618 }
619
620 //XXX: The r600g finalizer expects this to be 1, once we've moved the
621 //scheduling to the backend, we can change the default to 0.
622 MIB.addImm(1) // $last
623 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
624 .addImm(0); // $literal
625
626 return MIB;
627}
628
629MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
630 MachineBasicBlock::iterator I,
631 unsigned DstReg,
632 uint64_t Imm) const {
633 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
634 AMDGPU::ALU_LITERAL_X);
635 setImmOperand(MovImm, R600Operands::IMM, Imm);
636 return MovImm;
637}
638
639int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
640 R600Operands::Ops Op) const {
641 return getOperandIdx(MI.getOpcode(), Op);
642}
643
644int R600InstrInfo::getOperandIdx(unsigned Opcode,
645 R600Operands::Ops Op) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000646 unsigned TargetFlags = get(Opcode).TSFlags;
647 unsigned OpTableIdx;
648
649 if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
650 switch (Op) {
651 case R600Operands::DST: return 0;
652 case R600Operands::SRC0: return 1;
653 case R600Operands::SRC1: return 2;
654 case R600Operands::SRC2: return 3;
655 default:
656 assert(!"Unknown operand type for instruction");
657 return -1;
658 }
659 }
660
661 if (TargetFlags & R600_InstFlag::OP1) {
662 OpTableIdx = 0;
663 } else if (TargetFlags & R600_InstFlag::OP2) {
664 OpTableIdx = 1;
665 } else {
666 assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
667 "for this instruction");
668 OpTableIdx = 2;
669 }
670
Tom Stellard365366f2013-01-23 02:09:06 +0000671 return R600Operands::ALUOpTable[OpTableIdx][Op];
Tom Stellard75aadc22012-12-11 21:25:42 +0000672}
673
674void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
675 int64_t Imm) const {
676 int Idx = getOperandIdx(*MI, Op);
677 assert(Idx != -1 && "Operand not supported for this instruction.");
678 assert(MI->getOperand(Idx).isImm());
679 MI->getOperand(Idx).setImm(Imm);
680}
681
682//===----------------------------------------------------------------------===//
683// Instruction flag getters/setters
684//===----------------------------------------------------------------------===//
685
686bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
687 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
688}
689
690MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
691 unsigned Flag) const {
692 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
693 int FlagIndex = 0;
694 if (Flag != 0) {
695 // If we pass something other than the default value of Flag to this
696 // function, it means we are want to set a flag on an instruction
697 // that uses native encoding.
698 assert(HAS_NATIVE_OPERANDS(TargetFlags));
699 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
700 switch (Flag) {
701 case MO_FLAG_CLAMP:
702 FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
703 break;
704 case MO_FLAG_MASK:
705 FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
706 break;
707 case MO_FLAG_NOT_LAST:
708 case MO_FLAG_LAST:
709 FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
710 break;
711 case MO_FLAG_NEG:
712 switch (SrcIdx) {
713 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
714 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
715 case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
716 }
717 break;
718
719 case MO_FLAG_ABS:
720 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
721 "instructions.");
Tom Stellard6975d352012-12-13 19:38:52 +0000722 (void)IsOP3;
Tom Stellard75aadc22012-12-11 21:25:42 +0000723 switch (SrcIdx) {
724 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
725 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
726 }
727 break;
728
729 default:
730 FlagIndex = -1;
731 break;
732 }
733 assert(FlagIndex != -1 && "Flag not supported for this instruction");
734 } else {
735 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
736 assert(FlagIndex != 0 &&
737 "Instruction flags not supported for this instruction");
738 }
739
740 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
741 assert(FlagOp.isImm());
742 return FlagOp;
743}
744
745void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
746 unsigned Flag) const {
747 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
748 if (Flag == 0) {
749 return;
750 }
751 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
752 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
753 if (Flag == MO_FLAG_NOT_LAST) {
754 clearFlag(MI, Operand, MO_FLAG_LAST);
755 } else if (Flag == MO_FLAG_MASK) {
756 clearFlag(MI, Operand, Flag);
757 } else {
758 FlagOp.setImm(1);
759 }
760 } else {
761 MachineOperand &FlagOp = getFlagOp(MI, Operand);
762 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
763 }
764}
765
766void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
767 unsigned Flag) const {
768 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
769 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
770 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
771 FlagOp.setImm(0);
772 } else {
773 MachineOperand &FlagOp = getFlagOp(MI);
774 unsigned InstFlags = FlagOp.getImm();
775 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
776 FlagOp.setImm(InstFlags);
777 }
778}