blob: 3b1a24015a821da400b129b4a06d5a1b4c24cc54 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief R600 Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15#include "R600InstrInfo.h"
Vincent Lejeune3a8d78a2013-04-30 00:14:44 +000016#include "AMDGPU.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000017#include "AMDGPUSubtarget.h"
Chandler Carruthbe810232013-01-02 10:22:59 +000018#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "R600Defines.h"
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000020#include "R600MachineFunctionInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000021#include "R600RegisterInfo.h"
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000022#include "llvm/CodeGen/MachineFrameInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000023#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000024#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000025
26#define GET_INSTRINFO_CTOR
27#include "AMDGPUGenDFAPacketizer.inc"
28
29using namespace llvm;
30
31R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
32 : AMDGPUInstrInfo(tm),
Bill Wendling37e9adb2013-06-07 20:28:55 +000033 RI(tm),
Vincent Lejeunec2991642013-04-30 00:13:39 +000034 ST(tm.getSubtarget<AMDGPUSubtarget>())
Tom Stellard75aadc22012-12-11 21:25:42 +000035 { }
36
37const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
38 return RI;
39}
40
41bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
42 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
43}
44
45bool R600InstrInfo::isVector(const MachineInstr &MI) const {
46 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
47}
48
49void
50R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
51 MachineBasicBlock::iterator MI, DebugLoc DL,
52 unsigned DestReg, unsigned SrcReg,
53 bool KillSrc) const {
54 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
55 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
56 for (unsigned I = 0; I < 4; I++) {
57 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
58 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
59 RI.getSubReg(DestReg, SubRegIndex),
60 RI.getSubReg(SrcReg, SubRegIndex))
61 .addReg(DestReg,
62 RegState::Define | RegState::Implicit);
63 }
64 } else {
65
66 // We can't copy vec4 registers
67 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
68 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
69
70 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
71 DestReg, SrcReg);
Tom Stellard02661d92013-06-25 21:22:18 +000072 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
Tom Stellard75aadc22012-12-11 21:25:42 +000073 .setIsKill(KillSrc);
74 }
75}
76
77MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
78 unsigned DstReg, int64_t Imm) const {
79 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
NAKAMURA Takumi2a0b40f2012-12-20 00:22:11 +000080 MachineInstrBuilder MIB(*MF, MI);
81 MIB.addReg(DstReg, RegState::Define);
82 MIB.addReg(AMDGPU::ALU_LITERAL_X);
83 MIB.addImm(Imm);
84 MIB.addReg(0); // PREDICATE_BIT
Tom Stellard75aadc22012-12-11 21:25:42 +000085
86 return MI;
87}
88
89unsigned R600InstrInfo::getIEQOpcode() const {
90 return AMDGPU::SETE_INT;
91}
92
93bool R600InstrInfo::isMov(unsigned Opcode) const {
94
95
96 switch(Opcode) {
97 default: return false;
98 case AMDGPU::MOV:
99 case AMDGPU::MOV_IMM_F32:
100 case AMDGPU::MOV_IMM_I32:
101 return true;
102 }
103}
104
105// Some instructions act as place holders to emulate operations that the GPU
106// hardware does automatically. This function can be used to check if
107// an opcode falls into this category.
108bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
109 switch (Opcode) {
110 default: return false;
111 case AMDGPU::RETURN:
Tom Stellard75aadc22012-12-11 21:25:42 +0000112 return true;
113 }
114}
115
116bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
117 switch(Opcode) {
118 default: return false;
Tom Stellard75aadc22012-12-11 21:25:42 +0000119 }
120}
121
122bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
123 switch(Opcode) {
124 default: return false;
125 case AMDGPU::CUBE_r600_pseudo:
126 case AMDGPU::CUBE_r600_real:
127 case AMDGPU::CUBE_eg_pseudo:
128 case AMDGPU::CUBE_eg_real:
129 return true;
130 }
131}
132
133bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
134 unsigned TargetFlags = get(Opcode).TSFlags;
135
Tom Stellard5eb903d2013-06-28 15:46:53 +0000136 return (TargetFlags & R600_InstFlag::ALU_INST);
Tom Stellard75aadc22012-12-11 21:25:42 +0000137}
138
Vincent Lejeune076c0b22013-04-30 00:14:17 +0000139bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
140 return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY);
141}
142
143bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
144 return isTransOnly(MI->getOpcode());
145}
146
Vincent Lejeunec2991642013-04-30 00:13:39 +0000147bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
Tom Stellardd93cede2013-05-06 17:50:57 +0000148 return ST.hasVertexCache() && IS_VTX(get(Opcode));
Vincent Lejeunec2991642013-04-30 00:13:39 +0000149}
150
151bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
Vincent Lejeune3a8d78a2013-04-30 00:14:44 +0000152 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
153 return MFI->ShaderType != ShaderType::COMPUTE && usesVertexCache(MI->getOpcode());
Vincent Lejeunec2991642013-04-30 00:13:39 +0000154}
155
156bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
Tom Stellardd93cede2013-05-06 17:50:57 +0000157 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
Vincent Lejeunec2991642013-04-30 00:13:39 +0000158}
159
160bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
Vincent Lejeune3a8d78a2013-04-30 00:14:44 +0000161 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
162 return (MFI->ShaderType == ShaderType::COMPUTE && usesVertexCache(MI->getOpcode())) ||
163 usesTextureCache(MI->getOpcode());
Vincent Lejeunec2991642013-04-30 00:13:39 +0000164}
165
Tom Stellardce540332013-06-28 15:46:59 +0000166bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
167 switch (Opcode) {
168 case AMDGPU::KILLGT:
169 case AMDGPU::GROUP_BARRIER:
170 return true;
171 default:
172 return false;
173 }
174}
175
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000176SmallVector<std::pair<MachineOperand *, int64_t>, 3>
177R600InstrInfo::getSrcs(MachineInstr *MI) const {
178 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
179
Vincent Lejeunec6896792013-06-04 23:17:15 +0000180 if (MI->getOpcode() == AMDGPU::DOT_4) {
Tom Stellard02661d92013-06-25 21:22:18 +0000181 static const unsigned OpTable[8][2] = {
182 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
183 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
184 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
185 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
186 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
187 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
188 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
189 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
Vincent Lejeunec6896792013-06-04 23:17:15 +0000190 };
191
192 for (unsigned j = 0; j < 8; j++) {
Tom Stellard02661d92013-06-25 21:22:18 +0000193 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
194 OpTable[j][0]));
Vincent Lejeunec6896792013-06-04 23:17:15 +0000195 unsigned Reg = MO.getReg();
196 if (Reg == AMDGPU::ALU_CONST) {
Tom Stellard02661d92013-06-25 21:22:18 +0000197 unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
198 OpTable[j][1])).getImm();
Vincent Lejeunec6896792013-06-04 23:17:15 +0000199 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
200 continue;
201 }
202
203 }
204 return Result;
205 }
206
Tom Stellard02661d92013-06-25 21:22:18 +0000207 static const unsigned OpTable[3][2] = {
208 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
209 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
210 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000211 };
212
213 for (unsigned j = 0; j < 3; j++) {
214 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
215 if (SrcIdx < 0)
216 break;
217 MachineOperand &MO = MI->getOperand(SrcIdx);
218 unsigned Reg = MI->getOperand(SrcIdx).getReg();
219 if (Reg == AMDGPU::ALU_CONST) {
220 unsigned Sel = MI->getOperand(
221 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
222 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
223 continue;
224 }
225 if (Reg == AMDGPU::ALU_LITERAL_X) {
226 unsigned Imm = MI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000227 getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000228 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
229 continue;
230 }
231 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
232 }
233 return Result;
234}
235
236std::vector<std::pair<int, unsigned> >
237R600InstrInfo::ExtractSrcs(MachineInstr *MI,
238 const DenseMap<unsigned, unsigned> &PV)
239 const {
240 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
241 const std::pair<int, unsigned> DummyPair(-1, 0);
242 std::vector<std::pair<int, unsigned> > Result;
243 unsigned i = 0;
244 for (unsigned n = Srcs.size(); i < n; ++i) {
245 unsigned Reg = Srcs[i].first->getReg();
246 unsigned Index = RI.getEncodingValue(Reg) & 0xff;
247 unsigned Chan = RI.getHWRegChan(Reg);
248 if (Index > 127) {
249 Result.push_back(DummyPair);
250 continue;
251 }
Vincent Lejeune41d4cf22013-06-17 20:16:40 +0000252 if (PV.find(Reg) != PV.end()) {
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000253 Result.push_back(DummyPair);
254 continue;
255 }
256 Result.push_back(std::pair<int, unsigned>(Index, Chan));
257 }
258 for (; i < 3; ++i)
259 Result.push_back(DummyPair);
260 return Result;
261}
262
263static std::vector<std::pair<int, unsigned> >
264Swizzle(std::vector<std::pair<int, unsigned> > Src,
265 R600InstrInfo::BankSwizzle Swz) {
266 switch (Swz) {
267 case R600InstrInfo::ALU_VEC_012:
268 break;
269 case R600InstrInfo::ALU_VEC_021:
270 std::swap(Src[1], Src[2]);
271 break;
272 case R600InstrInfo::ALU_VEC_102:
273 std::swap(Src[0], Src[1]);
274 break;
275 case R600InstrInfo::ALU_VEC_120:
276 std::swap(Src[0], Src[1]);
277 std::swap(Src[0], Src[2]);
278 break;
279 case R600InstrInfo::ALU_VEC_201:
280 std::swap(Src[0], Src[2]);
281 std::swap(Src[0], Src[1]);
282 break;
283 case R600InstrInfo::ALU_VEC_210:
284 std::swap(Src[0], Src[2]);
285 break;
286 }
287 return Src;
288}
289
290static bool
291isLegal(const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
292 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
293 unsigned CheckedSize) {
294 int Vector[4][3];
295 memset(Vector, -1, sizeof(Vector));
296 for (unsigned i = 0; i < CheckedSize; i++) {
297 const std::vector<std::pair<int, unsigned> > &Srcs =
298 Swizzle(IGSrcs[i], Swz[i]);
299 for (unsigned j = 0; j < 3; j++) {
300 const std::pair<int, unsigned> &Src = Srcs[j];
301 if (Src.first < 0)
302 continue;
303 if (Vector[Src.second][j] < 0)
304 Vector[Src.second][j] = Src.first;
305 if (Vector[Src.second][j] != Src.first)
306 return false;
307 }
308 }
309 return true;
310}
311
312static bool recursiveFitsFPLimitation(
313const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
314std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
315unsigned Depth = 0) {
316 if (!isLegal(IGSrcs, SwzCandidate, Depth))
317 return false;
318 if (IGSrcs.size() == Depth)
319 return true;
320 unsigned i = SwzCandidate[Depth];
321 for (; i < 6; i++) {
322 SwzCandidate[Depth] = (R600InstrInfo::BankSwizzle) i;
323 if (recursiveFitsFPLimitation(IGSrcs, SwzCandidate, Depth + 1))
324 return true;
325 }
326 SwzCandidate[Depth] = R600InstrInfo::ALU_VEC_012;
327 return false;
328}
329
330bool
331R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
332 const DenseMap<unsigned, unsigned> &PV,
333 std::vector<BankSwizzle> &ValidSwizzle)
334 const {
335 //Todo : support shared src0 - src1 operand
336
337 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
338 ValidSwizzle.clear();
339 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
340 IGSrcs.push_back(ExtractSrcs(IG[i], PV));
341 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
Tom Stellard02661d92013-06-25 21:22:18 +0000342 AMDGPU::OpName::bank_swizzle);
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000343 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
344 IG[i]->getOperand(Op).getImm());
345 }
346 bool Result = recursiveFitsFPLimitation(IGSrcs, ValidSwizzle);
347 if (!Result)
348 return false;
349 return true;
350}
351
352
Vincent Lejeune0a22bc42013-03-14 15:50:45 +0000353bool
354R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
355 const {
356 assert (Consts.size() <= 12 && "Too many operands in instructions group");
357 unsigned Pair1 = 0, Pair2 = 0;
358 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
359 unsigned ReadConstHalf = Consts[i] & 2;
360 unsigned ReadConstIndex = Consts[i] & (~3);
361 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
362 if (!Pair1) {
363 Pair1 = ReadHalfConst;
364 continue;
365 }
366 if (Pair1 == ReadHalfConst)
367 continue;
368 if (!Pair2) {
369 Pair2 = ReadHalfConst;
370 continue;
371 }
372 if (Pair2 != ReadHalfConst)
373 return false;
374 }
375 return true;
376}
377
378bool
379R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const {
380 std::vector<unsigned> Consts;
381 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000382 MachineInstr *MI = MIs[i];
Vincent Lejeune0a22bc42013-03-14 15:50:45 +0000383 if (!isALUInstr(MI->getOpcode()))
384 continue;
385
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000386 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Srcs =
387 getSrcs(MI);
388
389 for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
390 std::pair<MachineOperand *, unsigned> Src = Srcs[j];
391 if (Src.first->getReg() == AMDGPU::ALU_CONST)
392 Consts.push_back(Src.second);
393 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
394 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
395 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
396 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
Vincent Lejeune147700b2013-04-30 00:14:27 +0000397 Consts.push_back((Index << 2) | Chan);
Vincent Lejeune0a22bc42013-03-14 15:50:45 +0000398 }
399 }
400 }
401 return fitsConstReadLimitations(Consts);
402}
403
Tom Stellard75aadc22012-12-11 21:25:42 +0000404DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
405 const ScheduleDAG *DAG) const {
406 const InstrItineraryData *II = TM->getInstrItineraryData();
407 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
408}
409
410static bool
411isPredicateSetter(unsigned Opcode) {
412 switch (Opcode) {
413 case AMDGPU::PRED_X:
414 return true;
415 default:
416 return false;
417 }
418}
419
420static MachineInstr *
421findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
422 MachineBasicBlock::iterator I) {
423 while (I != MBB.begin()) {
424 --I;
425 MachineInstr *MI = I;
426 if (isPredicateSetter(MI->getOpcode()))
427 return MI;
428 }
429
430 return NULL;
431}
432
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000433static
434bool isJump(unsigned Opcode) {
435 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
436}
437
Tom Stellard75aadc22012-12-11 21:25:42 +0000438bool
439R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
440 MachineBasicBlock *&TBB,
441 MachineBasicBlock *&FBB,
442 SmallVectorImpl<MachineOperand> &Cond,
443 bool AllowModify) const {
444 // Most of the following comes from the ARM implementation of AnalyzeBranch
445
446 // If the block has no terminators, it just falls into the block after it.
447 MachineBasicBlock::iterator I = MBB.end();
448 if (I == MBB.begin())
449 return false;
450 --I;
451 while (I->isDebugValue()) {
452 if (I == MBB.begin())
453 return false;
454 --I;
455 }
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000456 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000457 return false;
458 }
459
460 // Get the last instruction in the block.
461 MachineInstr *LastInst = I;
462
463 // If there is only one terminator instruction, process it.
464 unsigned LastOpc = LastInst->getOpcode();
465 if (I == MBB.begin() ||
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000466 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000467 if (LastOpc == AMDGPU::JUMP) {
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000468 TBB = LastInst->getOperand(0).getMBB();
469 return false;
470 } else if (LastOpc == AMDGPU::JUMP_COND) {
471 MachineInstr *predSet = I;
472 while (!isPredicateSetter(predSet->getOpcode())) {
473 predSet = --I;
Tom Stellard75aadc22012-12-11 21:25:42 +0000474 }
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000475 TBB = LastInst->getOperand(0).getMBB();
476 Cond.push_back(predSet->getOperand(1));
477 Cond.push_back(predSet->getOperand(2));
478 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
479 return false;
Tom Stellard75aadc22012-12-11 21:25:42 +0000480 }
481 return true; // Can't handle indirect branch.
482 }
483
484 // Get the instruction before it if it is a terminator.
485 MachineInstr *SecondLastInst = I;
486 unsigned SecondLastOpc = SecondLastInst->getOpcode();
487
488 // If the block ends with a B and a Bcc, handle it.
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000489 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000490 MachineInstr *predSet = --I;
491 while (!isPredicateSetter(predSet->getOpcode())) {
492 predSet = --I;
493 }
494 TBB = SecondLastInst->getOperand(0).getMBB();
495 FBB = LastInst->getOperand(0).getMBB();
496 Cond.push_back(predSet->getOperand(1));
497 Cond.push_back(predSet->getOperand(2));
498 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
499 return false;
500 }
501
502 // Otherwise, can't handle this.
503 return true;
504}
505
506int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
507 const MachineInstr *MI = op.getParent();
508
509 switch (MI->getDesc().OpInfo->RegClass) {
510 default: // FIXME: fallthrough??
511 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
512 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
513 };
514}
515
516unsigned
517R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
518 MachineBasicBlock *TBB,
519 MachineBasicBlock *FBB,
520 const SmallVectorImpl<MachineOperand> &Cond,
521 DebugLoc DL) const {
522 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
523
524 if (FBB == 0) {
525 if (Cond.empty()) {
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000526 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
Tom Stellard75aadc22012-12-11 21:25:42 +0000527 return 1;
528 } else {
529 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
530 assert(PredSet && "No previous predicate !");
531 addFlag(PredSet, 0, MO_FLAG_PUSH);
532 PredSet->getOperand(2).setImm(Cond[1].getImm());
533
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000534 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
Tom Stellard75aadc22012-12-11 21:25:42 +0000535 .addMBB(TBB)
536 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
537 return 1;
538 }
539 } else {
540 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
541 assert(PredSet && "No previous predicate !");
542 addFlag(PredSet, 0, MO_FLAG_PUSH);
543 PredSet->getOperand(2).setImm(Cond[1].getImm());
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000544 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
Tom Stellard75aadc22012-12-11 21:25:42 +0000545 .addMBB(TBB)
546 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000547 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
Tom Stellard75aadc22012-12-11 21:25:42 +0000548 return 2;
549 }
550}
551
552unsigned
553R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
554
555 // Note : we leave PRED* instructions there.
556 // They may be needed when predicating instructions.
557
558 MachineBasicBlock::iterator I = MBB.end();
559
560 if (I == MBB.begin()) {
561 return 0;
562 }
563 --I;
564 switch (I->getOpcode()) {
565 default:
566 return 0;
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000567 case AMDGPU::JUMP_COND: {
568 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
569 clearFlag(predSet, 0, MO_FLAG_PUSH);
570 I->eraseFromParent();
571 break;
572 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000573 case AMDGPU::JUMP:
Tom Stellard75aadc22012-12-11 21:25:42 +0000574 I->eraseFromParent();
575 break;
576 }
577 I = MBB.end();
578
579 if (I == MBB.begin()) {
580 return 1;
581 }
582 --I;
583 switch (I->getOpcode()) {
584 // FIXME: only one case??
585 default:
586 return 1;
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000587 case AMDGPU::JUMP_COND: {
588 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
589 clearFlag(predSet, 0, MO_FLAG_PUSH);
590 I->eraseFromParent();
591 break;
592 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000593 case AMDGPU::JUMP:
Tom Stellard75aadc22012-12-11 21:25:42 +0000594 I->eraseFromParent();
595 break;
596 }
597 return 2;
598}
599
600bool
601R600InstrInfo::isPredicated(const MachineInstr *MI) const {
602 int idx = MI->findFirstPredOperandIdx();
603 if (idx < 0)
604 return false;
605
606 unsigned Reg = MI->getOperand(idx).getReg();
607 switch (Reg) {
608 default: return false;
609 case AMDGPU::PRED_SEL_ONE:
610 case AMDGPU::PRED_SEL_ZERO:
611 case AMDGPU::PREDICATE_BIT:
612 return true;
613 }
614}
615
616bool
617R600InstrInfo::isPredicable(MachineInstr *MI) const {
618 // XXX: KILL* instructions can be predicated, but they must be the last
619 // instruction in a clause, so this means any instructions after them cannot
620 // be predicated. Until we have proper support for instruction clauses in the
621 // backend, we will mark KILL* instructions as unpredicable.
622
623 if (MI->getOpcode() == AMDGPU::KILLGT) {
624 return false;
Vincent Lejeunefe32bd82013-03-05 19:12:06 +0000625 } else if (isVector(*MI)) {
626 return false;
Tom Stellard75aadc22012-12-11 21:25:42 +0000627 } else {
628 return AMDGPUInstrInfo::isPredicable(MI);
629 }
630}
631
632
633bool
634R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
635 unsigned NumCyles,
636 unsigned ExtraPredCycles,
637 const BranchProbability &Probability) const{
638 return true;
639}
640
641bool
642R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
643 unsigned NumTCycles,
644 unsigned ExtraTCycles,
645 MachineBasicBlock &FMBB,
646 unsigned NumFCycles,
647 unsigned ExtraFCycles,
648 const BranchProbability &Probability) const {
649 return true;
650}
651
652bool
653R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
654 unsigned NumCyles,
655 const BranchProbability &Probability)
656 const {
657 return true;
658}
659
660bool
661R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
662 MachineBasicBlock &FMBB) const {
663 return false;
664}
665
666
667bool
668R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
669 MachineOperand &MO = Cond[1];
670 switch (MO.getImm()) {
671 case OPCODE_IS_ZERO_INT:
672 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
673 break;
674 case OPCODE_IS_NOT_ZERO_INT:
675 MO.setImm(OPCODE_IS_ZERO_INT);
676 break;
677 case OPCODE_IS_ZERO:
678 MO.setImm(OPCODE_IS_NOT_ZERO);
679 break;
680 case OPCODE_IS_NOT_ZERO:
681 MO.setImm(OPCODE_IS_ZERO);
682 break;
683 default:
684 return true;
685 }
686
687 MachineOperand &MO2 = Cond[2];
688 switch (MO2.getReg()) {
689 case AMDGPU::PRED_SEL_ZERO:
690 MO2.setReg(AMDGPU::PRED_SEL_ONE);
691 break;
692 case AMDGPU::PRED_SEL_ONE:
693 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
694 break;
695 default:
696 return true;
697 }
698 return false;
699}
700
701bool
702R600InstrInfo::DefinesPredicate(MachineInstr *MI,
703 std::vector<MachineOperand> &Pred) const {
704 return isPredicateSetter(MI->getOpcode());
705}
706
707
708bool
709R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
710 const SmallVectorImpl<MachineOperand> &Pred2) const {
711 return false;
712}
713
714
715bool
716R600InstrInfo::PredicateInstruction(MachineInstr *MI,
717 const SmallVectorImpl<MachineOperand> &Pred) const {
718 int PIdx = MI->findFirstPredOperandIdx();
719
720 if (PIdx != -1) {
721 MachineOperand &PMO = MI->getOperand(PIdx);
722 PMO.setReg(Pred[2].getReg());
NAKAMURA Takumi2a0b40f2012-12-20 00:22:11 +0000723 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
724 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000725 return true;
726 }
727
728 return false;
729}
730
731unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
732 const MachineInstr *MI,
733 unsigned *PredCost) const {
734 if (PredCost)
735 *PredCost = 2;
736 return 2;
737}
738
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000739int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
740 const MachineRegisterInfo &MRI = MF.getRegInfo();
741 const MachineFrameInfo *MFI = MF.getFrameInfo();
742 int Offset = 0;
743
744 if (MFI->getNumObjects() == 0) {
745 return -1;
746 }
747
748 if (MRI.livein_empty()) {
749 return 0;
750 }
751
752 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
753 LE = MRI.livein_end();
754 LI != LE; ++LI) {
755 Offset = std::max(Offset,
756 GET_REG_INDEX(RI.getEncodingValue(LI->first)));
757 }
758
759 return Offset + 1;
760}
761
762int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
763 int Offset = 0;
764 const MachineFrameInfo *MFI = MF.getFrameInfo();
765
766 // Variable sized objects are not supported
767 assert(!MFI->hasVarSizedObjects());
768
769 if (MFI->getNumObjects() == 0) {
770 return -1;
771 }
772
773 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
774
775 return getIndirectIndexBegin(MF) + Offset;
776}
777
778std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
779 const MachineFunction &MF) const {
780 const AMDGPUFrameLowering *TFL =
781 static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
782 std::vector<unsigned> Regs;
783
784 unsigned StackWidth = TFL->getStackWidth(MF);
785 int End = getIndirectIndexEnd(MF);
786
787 if (End == -1) {
788 return Regs;
789 }
790
791 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
792 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
793 Regs.push_back(SuperReg);
794 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
795 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
796 Regs.push_back(Reg);
797 }
798 }
799 return Regs;
800}
801
802unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
803 unsigned Channel) const {
804 // XXX: Remove when we support a stack width > 2
805 assert(Channel == 0);
806 return RegIndex;
807}
808
809const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
810 unsigned SourceReg) const {
811 return &AMDGPU::R600_TReg32RegClass;
812}
813
814const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
815 return &AMDGPU::TRegMemRegClass;
816}
817
818MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
819 MachineBasicBlock::iterator I,
820 unsigned ValueReg, unsigned Address,
821 unsigned OffsetReg) const {
822 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
823 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
824 AMDGPU::AR_X, OffsetReg);
Tom Stellard02661d92013-06-25 21:22:18 +0000825 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000826
827 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
828 AddrReg, ValueReg)
Tom Stellardaad53762013-06-05 03:43:06 +0000829 .addReg(AMDGPU::AR_X,
830 RegState::Implicit | RegState::Kill);
Tom Stellard02661d92013-06-25 21:22:18 +0000831 setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000832 return Mov;
833}
834
835MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
836 MachineBasicBlock::iterator I,
837 unsigned ValueReg, unsigned Address,
838 unsigned OffsetReg) const {
839 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
840 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
841 AMDGPU::AR_X,
842 OffsetReg);
Tom Stellard02661d92013-06-25 21:22:18 +0000843 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000844 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
845 ValueReg,
846 AddrReg)
Tom Stellardaad53762013-06-05 03:43:06 +0000847 .addReg(AMDGPU::AR_X,
848 RegState::Implicit | RegState::Kill);
Tom Stellard02661d92013-06-25 21:22:18 +0000849 setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000850
851 return Mov;
852}
853
854const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
855 return &AMDGPU::IndirectRegRegClass;
856}
857
Vincent Lejeune80031d9f2013-04-03 16:49:34 +0000858unsigned R600InstrInfo::getMaxAlusPerClause() const {
859 return 115;
860}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000861
Tom Stellard75aadc22012-12-11 21:25:42 +0000862MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
863 MachineBasicBlock::iterator I,
864 unsigned Opcode,
865 unsigned DstReg,
866 unsigned Src0Reg,
867 unsigned Src1Reg) const {
868 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
869 DstReg); // $dst
870
871 if (Src1Reg) {
872 MIB.addImm(0) // $update_exec_mask
873 .addImm(0); // $update_predicate
874 }
875 MIB.addImm(1) // $write
876 .addImm(0) // $omod
877 .addImm(0) // $dst_rel
878 .addImm(0) // $dst_clamp
879 .addReg(Src0Reg) // $src0
880 .addImm(0) // $src0_neg
881 .addImm(0) // $src0_rel
Tom Stellard365366f2013-01-23 02:09:06 +0000882 .addImm(0) // $src0_abs
883 .addImm(-1); // $src0_sel
Tom Stellard75aadc22012-12-11 21:25:42 +0000884
885 if (Src1Reg) {
886 MIB.addReg(Src1Reg) // $src1
887 .addImm(0) // $src1_neg
888 .addImm(0) // $src1_rel
Tom Stellard365366f2013-01-23 02:09:06 +0000889 .addImm(0) // $src1_abs
890 .addImm(-1); // $src1_sel
Tom Stellard75aadc22012-12-11 21:25:42 +0000891 }
892
893 //XXX: The r600g finalizer expects this to be 1, once we've moved the
894 //scheduling to the backend, we can change the default to 0.
895 MIB.addImm(1) // $last
896 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
Vincent Lejeune22c42482013-04-30 00:14:08 +0000897 .addImm(0) // $literal
898 .addImm(0); // $bank_swizzle
Tom Stellard75aadc22012-12-11 21:25:42 +0000899
900 return MIB;
901}
902
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000903#define OPERAND_CASE(Label) \
904 case Label: { \
Tom Stellard02661d92013-06-25 21:22:18 +0000905 static const unsigned Ops[] = \
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000906 { \
907 Label##_X, \
908 Label##_Y, \
909 Label##_Z, \
910 Label##_W \
911 }; \
912 return Ops[Slot]; \
913 }
914
Tom Stellard02661d92013-06-25 21:22:18 +0000915static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000916 switch (Op) {
Tom Stellard02661d92013-06-25 21:22:18 +0000917 OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
918 OPERAND_CASE(AMDGPU::OpName::update_pred)
919 OPERAND_CASE(AMDGPU::OpName::write)
920 OPERAND_CASE(AMDGPU::OpName::omod)
921 OPERAND_CASE(AMDGPU::OpName::dst_rel)
922 OPERAND_CASE(AMDGPU::OpName::clamp)
923 OPERAND_CASE(AMDGPU::OpName::src0)
924 OPERAND_CASE(AMDGPU::OpName::src0_neg)
925 OPERAND_CASE(AMDGPU::OpName::src0_rel)
926 OPERAND_CASE(AMDGPU::OpName::src0_abs)
927 OPERAND_CASE(AMDGPU::OpName::src0_sel)
928 OPERAND_CASE(AMDGPU::OpName::src1)
929 OPERAND_CASE(AMDGPU::OpName::src1_neg)
930 OPERAND_CASE(AMDGPU::OpName::src1_rel)
931 OPERAND_CASE(AMDGPU::OpName::src1_abs)
932 OPERAND_CASE(AMDGPU::OpName::src1_sel)
933 OPERAND_CASE(AMDGPU::OpName::pred_sel)
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000934 default:
935 llvm_unreachable("Wrong Operand");
936 }
937}
938
939#undef OPERAND_CASE
940
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000941MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
942 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
943 const {
944 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
945 unsigned Opcode;
946 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellarda6c6e1b2013-06-07 20:37:48 +0000947 if (ST.getGeneration() <= AMDGPUSubtarget::R700)
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000948 Opcode = AMDGPU::DOT4_r600;
949 else
950 Opcode = AMDGPU::DOT4_eg;
951 MachineBasicBlock::iterator I = MI;
952 MachineOperand &Src0 = MI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000953 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000954 MachineOperand &Src1 = MI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000955 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000956 MachineInstr *MIB = buildDefaultInstruction(
957 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
Tom Stellard02661d92013-06-25 21:22:18 +0000958 static const unsigned Operands[14] = {
959 AMDGPU::OpName::update_exec_mask,
960 AMDGPU::OpName::update_pred,
961 AMDGPU::OpName::write,
962 AMDGPU::OpName::omod,
963 AMDGPU::OpName::dst_rel,
964 AMDGPU::OpName::clamp,
965 AMDGPU::OpName::src0_neg,
966 AMDGPU::OpName::src0_rel,
967 AMDGPU::OpName::src0_abs,
968 AMDGPU::OpName::src0_sel,
969 AMDGPU::OpName::src1_neg,
970 AMDGPU::OpName::src1_rel,
971 AMDGPU::OpName::src1_abs,
972 AMDGPU::OpName::src1_sel,
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000973 };
974
975 for (unsigned i = 0; i < 14; i++) {
976 MachineOperand &MO = MI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000977 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000978 assert (MO.isImm());
979 setImmOperand(MIB, Operands[i], MO.getImm());
980 }
981 MIB->getOperand(20).setImm(0);
982 return MIB;
983}
984
Tom Stellard75aadc22012-12-11 21:25:42 +0000985MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
986 MachineBasicBlock::iterator I,
987 unsigned DstReg,
988 uint64_t Imm) const {
989 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
990 AMDGPU::ALU_LITERAL_X);
Tom Stellard02661d92013-06-25 21:22:18 +0000991 setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
Tom Stellard75aadc22012-12-11 21:25:42 +0000992 return MovImm;
993}
994
Tom Stellard02661d92013-06-25 21:22:18 +0000995int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000996 return getOperandIdx(MI.getOpcode(), Op);
997}
998
Tom Stellard02661d92013-06-25 21:22:18 +0000999int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1000 return AMDGPU::getNamedOperandIdx(Opcode, Op);
Vincent Lejeunec6896792013-06-04 23:17:15 +00001001}
1002
Tom Stellard02661d92013-06-25 21:22:18 +00001003void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
Tom Stellard75aadc22012-12-11 21:25:42 +00001004 int64_t Imm) const {
1005 int Idx = getOperandIdx(*MI, Op);
1006 assert(Idx != -1 && "Operand not supported for this instruction.");
1007 assert(MI->getOperand(Idx).isImm());
1008 MI->getOperand(Idx).setImm(Imm);
1009}
1010
1011//===----------------------------------------------------------------------===//
1012// Instruction flag getters/setters
1013//===----------------------------------------------------------------------===//
1014
1015bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
1016 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
1017}
1018
1019MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
1020 unsigned Flag) const {
1021 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1022 int FlagIndex = 0;
1023 if (Flag != 0) {
1024 // If we pass something other than the default value of Flag to this
1025 // function, it means we are want to set a flag on an instruction
1026 // that uses native encoding.
1027 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1028 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1029 switch (Flag) {
1030 case MO_FLAG_CLAMP:
Tom Stellard02661d92013-06-25 21:22:18 +00001031 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
Tom Stellard75aadc22012-12-11 21:25:42 +00001032 break;
1033 case MO_FLAG_MASK:
Tom Stellard02661d92013-06-25 21:22:18 +00001034 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
Tom Stellard75aadc22012-12-11 21:25:42 +00001035 break;
1036 case MO_FLAG_NOT_LAST:
1037 case MO_FLAG_LAST:
Tom Stellard02661d92013-06-25 21:22:18 +00001038 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
Tom Stellard75aadc22012-12-11 21:25:42 +00001039 break;
1040 case MO_FLAG_NEG:
1041 switch (SrcIdx) {
Tom Stellard02661d92013-06-25 21:22:18 +00001042 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
1043 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
1044 case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
Tom Stellard75aadc22012-12-11 21:25:42 +00001045 }
1046 break;
1047
1048 case MO_FLAG_ABS:
1049 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1050 "instructions.");
Tom Stellard6975d352012-12-13 19:38:52 +00001051 (void)IsOP3;
Tom Stellard75aadc22012-12-11 21:25:42 +00001052 switch (SrcIdx) {
Tom Stellard02661d92013-06-25 21:22:18 +00001053 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
1054 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
Tom Stellard75aadc22012-12-11 21:25:42 +00001055 }
1056 break;
1057
1058 default:
1059 FlagIndex = -1;
1060 break;
1061 }
1062 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1063 } else {
1064 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1065 assert(FlagIndex != 0 &&
1066 "Instruction flags not supported for this instruction");
1067 }
1068
1069 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
1070 assert(FlagOp.isImm());
1071 return FlagOp;
1072}
1073
1074void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
1075 unsigned Flag) const {
1076 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1077 if (Flag == 0) {
1078 return;
1079 }
1080 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1081 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1082 if (Flag == MO_FLAG_NOT_LAST) {
1083 clearFlag(MI, Operand, MO_FLAG_LAST);
1084 } else if (Flag == MO_FLAG_MASK) {
1085 clearFlag(MI, Operand, Flag);
1086 } else {
1087 FlagOp.setImm(1);
1088 }
1089 } else {
1090 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1091 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1092 }
1093}
1094
1095void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
1096 unsigned Flag) const {
1097 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1098 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1099 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1100 FlagOp.setImm(0);
1101 } else {
1102 MachineOperand &FlagOp = getFlagOp(MI);
1103 unsigned InstFlags = FlagOp.getImm();
1104 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1105 FlagOp.setImm(InstFlags);
1106 }
1107}