blob: 21945c4cce13ed4e5a4e75e40f954133679c5e42 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief R600 Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
Chandler Carruth6bda14b2017-06-06 11:49:48 +000015#include "R600InstrInfo.h"
Vincent Lejeune3a8d78a2013-04-30 00:14:44 +000016#include "AMDGPU.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000017#include "AMDGPUInstrInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000018#include "AMDGPUSubtarget.h"
19#include "R600Defines.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000020#include "R600FrameLowering.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000021#include "R600RegisterInfo.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000022#include "Utils/AMDGPUBaseInfo.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/CodeGen/MachineBasicBlock.h"
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000027#include "llvm/CodeGen/MachineFrameInfo.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000028#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstr.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000030#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000031#include "llvm/CodeGen/MachineOperand.h"
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000033#include "llvm/CodeGen/TargetRegisterInfo.h"
34#include "llvm/CodeGen/TargetSubtargetInfo.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000035#include "llvm/Support/ErrorHandling.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000036#include <algorithm>
37#include <cassert>
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000038#include <cstdint>
Chandler Carruth6bda14b2017-06-06 11:49:48 +000039#include <cstring>
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000040#include <iterator>
41#include <utility>
42#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000043
Chandler Carruthd174b722014-04-22 02:03:14 +000044using namespace llvm;
45
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000046#define GET_INSTRINFO_CTOR_DTOR
Tom Stellard75aadc22012-12-11 21:25:42 +000047#include "AMDGPUGenDFAPacketizer.inc"
48
Matt Arsenault43e92fe2016-06-24 06:30:11 +000049R600InstrInfo::R600InstrInfo(const R600Subtarget &ST)
50 : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
Tom Stellard75aadc22012-12-11 21:25:42 +000051
Tom Stellard75aadc22012-12-11 21:25:42 +000052bool R600InstrInfo::isVector(const MachineInstr &MI) const {
53 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
54}
55
Benjamin Kramerbdc49562016-06-12 15:39:02 +000056void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
57 MachineBasicBlock::iterator MI,
58 const DebugLoc &DL, unsigned DestReg,
59 unsigned SrcReg, bool KillSrc) const {
Tom Stellard0344cdf2013-08-01 15:23:42 +000060 unsigned VectorComponents = 0;
Tom Stellard880a80a2014-06-17 16:53:14 +000061 if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
62 AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
63 (AMDGPU::R600_Reg128RegClass.contains(SrcReg) ||
64 AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) {
Tom Stellard0344cdf2013-08-01 15:23:42 +000065 VectorComponents = 4;
Tom Stellard880a80a2014-06-17 16:53:14 +000066 } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) ||
67 AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) &&
68 (AMDGPU::R600_Reg64RegClass.contains(SrcReg) ||
69 AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) {
Tom Stellard0344cdf2013-08-01 15:23:42 +000070 VectorComponents = 2;
71 }
72
73 if (VectorComponents > 0) {
74 for (unsigned I = 0; I < VectorComponents; I++) {
Tom Stellard75aadc22012-12-11 21:25:42 +000075 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
76 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
77 RI.getSubReg(DestReg, SubRegIndex),
78 RI.getSubReg(SrcReg, SubRegIndex))
79 .addReg(DestReg,
80 RegState::Define | RegState::Implicit);
81 }
82 } else {
Tom Stellard75aadc22012-12-11 21:25:42 +000083 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
84 DestReg, SrcReg);
Tom Stellard02661d92013-06-25 21:22:18 +000085 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
Tom Stellard75aadc22012-12-11 21:25:42 +000086 .setIsKill(KillSrc);
87 }
88}
89
Tom Stellardcd6b0a62013-11-22 00:41:08 +000090/// \returns true if \p MBBI can be moved into a new basic.
91bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
92 MachineBasicBlock::iterator MBBI) const {
93 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
94 E = MBBI->operands_end(); I != E; ++I) {
95 if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
96 I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
97 return false;
98 }
99 return true;
100}
101
Tom Stellard75aadc22012-12-11 21:25:42 +0000102bool R600InstrInfo::isMov(unsigned Opcode) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000103 switch(Opcode) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000104 default:
105 return false;
Tom Stellard75aadc22012-12-11 21:25:42 +0000106 case AMDGPU::MOV:
107 case AMDGPU::MOV_IMM_F32:
108 case AMDGPU::MOV_IMM_I32:
109 return true;
110 }
111}
112
Tom Stellard75aadc22012-12-11 21:25:42 +0000113bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
Aaron Ballmanf04bbd82013-07-10 17:19:22 +0000114 return false;
Tom Stellard75aadc22012-12-11 21:25:42 +0000115}
116
117bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
118 switch(Opcode) {
119 default: return false;
120 case AMDGPU::CUBE_r600_pseudo:
121 case AMDGPU::CUBE_r600_real:
122 case AMDGPU::CUBE_eg_pseudo:
123 case AMDGPU::CUBE_eg_real:
124 return true;
125 }
126}
127
128bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
129 unsigned TargetFlags = get(Opcode).TSFlags;
130
Tom Stellard5eb903d2013-06-28 15:46:53 +0000131 return (TargetFlags & R600_InstFlag::ALU_INST);
Tom Stellard75aadc22012-12-11 21:25:42 +0000132}
133
Tom Stellardc026e8b2013-06-28 15:47:08 +0000134bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
135 unsigned TargetFlags = get(Opcode).TSFlags;
136
137 return ((TargetFlags & R600_InstFlag::OP1) |
138 (TargetFlags & R600_InstFlag::OP2) |
139 (TargetFlags & R600_InstFlag::OP3));
140}
141
142bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
143 unsigned TargetFlags = get(Opcode).TSFlags;
144
145 return ((TargetFlags & R600_InstFlag::LDS_1A) |
Tom Stellardf3d166a2013-08-26 15:05:49 +0000146 (TargetFlags & R600_InstFlag::LDS_1A1D) |
147 (TargetFlags & R600_InstFlag::LDS_1A2D));
Tom Stellardc026e8b2013-06-28 15:47:08 +0000148}
149
Tom Stellard8f9fc202013-11-15 00:12:45 +0000150bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
151 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
152}
153
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000154bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const {
155 if (isALUInstr(MI.getOpcode()))
Vincent Lejeunea4da6fb2013-10-01 19:32:58 +0000156 return true;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000157 if (isVector(MI) || isCubeOp(MI.getOpcode()))
Vincent Lejeunea4da6fb2013-10-01 19:32:58 +0000158 return true;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000159 switch (MI.getOpcode()) {
Vincent Lejeunea4da6fb2013-10-01 19:32:58 +0000160 case AMDGPU::PRED_X:
161 case AMDGPU::INTERP_PAIR_XY:
162 case AMDGPU::INTERP_PAIR_ZW:
163 case AMDGPU::INTERP_VEC_LOAD:
164 case AMDGPU::COPY:
165 case AMDGPU::DOT_4:
166 return true;
167 default:
168 return false;
169 }
170}
171
Vincent Lejeune076c0b22013-04-30 00:14:17 +0000172bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
Vincent Lejeune4d5c5e52013-09-04 19:53:30 +0000173 if (ST.hasCaymanISA())
174 return false;
175 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
Vincent Lejeune076c0b22013-04-30 00:14:17 +0000176}
177
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000178bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const {
179 return isTransOnly(MI.getOpcode());
Vincent Lejeune076c0b22013-04-30 00:14:17 +0000180}
181
Vincent Lejeune4d5c5e52013-09-04 19:53:30 +0000182bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
183 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
184}
185
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000186bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const {
187 return isVectorOnly(MI.getOpcode());
Vincent Lejeune4d5c5e52013-09-04 19:53:30 +0000188}
189
Tom Stellard676c16d2013-08-16 01:11:51 +0000190bool R600InstrInfo::isExport(unsigned Opcode) const {
191 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
192}
193
Vincent Lejeunec2991642013-04-30 00:13:39 +0000194bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
Tom Stellardd93cede2013-05-06 17:50:57 +0000195 return ST.hasVertexCache() && IS_VTX(get(Opcode));
Vincent Lejeunec2991642013-04-30 00:13:39 +0000196}
197
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000198bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
199 const MachineFunction *MF = MI.getParent()->getParent();
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000200 return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000201 usesVertexCache(MI.getOpcode());
Vincent Lejeunec2991642013-04-30 00:13:39 +0000202}
203
204bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
Tom Stellardd93cede2013-05-06 17:50:57 +0000205 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
Vincent Lejeunec2991642013-04-30 00:13:39 +0000206}
207
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000208bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
209 const MachineFunction *MF = MI.getParent()->getParent();
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000210 return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000211 usesVertexCache(MI.getOpcode())) ||
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000212 usesTextureCache(MI.getOpcode());
Vincent Lejeunec2991642013-04-30 00:13:39 +0000213}
214
Tom Stellardce540332013-06-28 15:46:59 +0000215bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
216 switch (Opcode) {
217 case AMDGPU::KILLGT:
218 case AMDGPU::GROUP_BARRIER:
219 return true;
220 default:
221 return false;
222 }
223}
224
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000225bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
226 return MI.findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
Tom Stellard26a3b672013-10-22 18:19:10 +0000227}
228
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000229bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
230 return MI.findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
Tom Stellard26a3b672013-10-22 18:19:10 +0000231}
232
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000233bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
234 if (!isALUInstr(MI.getOpcode())) {
Tom Stellard7f6fa4c2013-09-12 02:55:06 +0000235 return false;
236 }
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000237 for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
238 E = MI.operands_end();
239 I != E; ++I) {
Tom Stellard7f6fa4c2013-09-12 02:55:06 +0000240 if (!I->isReg() || !I->isUse() ||
241 TargetRegisterInfo::isVirtualRegister(I->getReg()))
242 continue;
243
244 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
245 return true;
246 }
247 return false;
248}
249
Tom Stellard84021442013-07-23 01:48:24 +0000250int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
Jan Vesely468e0552015-03-02 18:56:52 +0000251 static const unsigned SrcSelTable[][2] = {
Tom Stellard84021442013-07-23 01:48:24 +0000252 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
253 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
254 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
255 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
256 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
257 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
258 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
259 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
260 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
261 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
262 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
263 };
264
Jan Vesely468e0552015-03-02 18:56:52 +0000265 for (const auto &Row : SrcSelTable) {
266 if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
267 return getOperandIdx(Opcode, Row[1]);
Tom Stellard84021442013-07-23 01:48:24 +0000268 }
269 }
270 return -1;
271}
Tom Stellard84021442013-07-23 01:48:24 +0000272
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000273SmallVector<std::pair<MachineOperand *, int64_t>, 3>
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000274R600InstrInfo::getSrcs(MachineInstr &MI) const {
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000275 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
276
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000277 if (MI.getOpcode() == AMDGPU::DOT_4) {
Tom Stellard02661d92013-06-25 21:22:18 +0000278 static const unsigned OpTable[8][2] = {
279 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
280 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
281 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
282 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
283 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
284 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
285 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
286 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
Vincent Lejeunec6896792013-06-04 23:17:15 +0000287 };
288
289 for (unsigned j = 0; j < 8; j++) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000290 MachineOperand &MO =
291 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
Vincent Lejeunec6896792013-06-04 23:17:15 +0000292 unsigned Reg = MO.getReg();
293 if (Reg == AMDGPU::ALU_CONST) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000294 MachineOperand &Sel =
295 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
Jan Veselybbc22312016-05-04 14:55:45 +0000296 Result.push_back(std::make_pair(&MO, Sel.getImm()));
Vincent Lejeunec6896792013-06-04 23:17:15 +0000297 continue;
298 }
Matt Arsenault0163e032014-07-20 06:31:06 +0000299
Vincent Lejeunec6896792013-06-04 23:17:15 +0000300 }
301 return Result;
302 }
303
Tom Stellard02661d92013-06-25 21:22:18 +0000304 static const unsigned OpTable[3][2] = {
305 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
306 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
307 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000308 };
309
310 for (unsigned j = 0; j < 3; j++) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000311 int SrcIdx = getOperandIdx(MI.getOpcode(), OpTable[j][0]);
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000312 if (SrcIdx < 0)
313 break;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000314 MachineOperand &MO = MI.getOperand(SrcIdx);
Jan Veselybbc22312016-05-04 14:55:45 +0000315 unsigned Reg = MO.getReg();
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000316 if (Reg == AMDGPU::ALU_CONST) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000317 MachineOperand &Sel =
318 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
Jan Veselybbc22312016-05-04 14:55:45 +0000319 Result.push_back(std::make_pair(&MO, Sel.getImm()));
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000320 continue;
321 }
322 if (Reg == AMDGPU::ALU_LITERAL_X) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000323 MachineOperand &Operand =
324 MI.getOperand(getOperandIdx(MI.getOpcode(), AMDGPU::OpName::literal));
Jan Veselyfac8d7e2016-05-13 20:39:20 +0000325 if (Operand.isImm()) {
326 Result.push_back(std::make_pair(&MO, Operand.getImm()));
327 continue;
328 }
329 assert(Operand.isGlobal());
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000330 }
Jan Veselybbc22312016-05-04 14:55:45 +0000331 Result.push_back(std::make_pair(&MO, 0));
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000332 }
333 return Result;
334}
335
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000336std::vector<std::pair<int, unsigned>>
337R600InstrInfo::ExtractSrcs(MachineInstr &MI,
Vincent Lejeune77a83522013-06-29 19:32:43 +0000338 const DenseMap<unsigned, unsigned> &PV,
339 unsigned &ConstCount) const {
340 ConstCount = 0;
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000341 const std::pair<int, unsigned> DummyPair(-1, 0);
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000342 std::vector<std::pair<int, unsigned>> Result;
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000343 unsigned i = 0;
Benjamin Kramer22ff8652016-07-30 11:31:16 +0000344 for (const auto &Src : getSrcs(MI)) {
345 ++i;
346 unsigned Reg = Src.first->getReg();
Jan Veselybbc22312016-05-04 14:55:45 +0000347 int Index = RI.getEncodingValue(Reg) & 0xff;
Tom Stellardc026e8b2013-06-28 15:47:08 +0000348 if (Reg == AMDGPU::OQAP) {
Jan Veselybbc22312016-05-04 14:55:45 +0000349 Result.push_back(std::make_pair(Index, 0U));
Tom Stellardc026e8b2013-06-28 15:47:08 +0000350 }
Vincent Lejeune41d4cf22013-06-17 20:16:40 +0000351 if (PV.find(Reg) != PV.end()) {
Vincent Lejeune77a83522013-06-29 19:32:43 +0000352 // 255 is used to tells its a PS/PV reg
Jan Veselybbc22312016-05-04 14:55:45 +0000353 Result.push_back(std::make_pair(255, 0U));
Vincent Lejeune77a83522013-06-29 19:32:43 +0000354 continue;
355 }
356 if (Index > 127) {
357 ConstCount++;
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000358 Result.push_back(DummyPair);
359 continue;
360 }
Vincent Lejeune77a83522013-06-29 19:32:43 +0000361 unsigned Chan = RI.getHWRegChan(Reg);
Jan Veselybbc22312016-05-04 14:55:45 +0000362 Result.push_back(std::make_pair(Index, Chan));
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000363 }
364 for (; i < 3; ++i)
365 Result.push_back(DummyPair);
366 return Result;
367}
368
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000369static std::vector<std::pair<int, unsigned>>
370Swizzle(std::vector<std::pair<int, unsigned>> Src,
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000371 R600InstrInfo::BankSwizzle Swz) {
Vincent Lejeune744efa42013-09-04 19:53:54 +0000372 if (Src[0] == Src[1])
373 Src[1].first = -1;
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000374 switch (Swz) {
Vincent Lejeunebb8a87212013-06-29 19:32:29 +0000375 case R600InstrInfo::ALU_VEC_012_SCL_210:
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000376 break;
Vincent Lejeunebb8a87212013-06-29 19:32:29 +0000377 case R600InstrInfo::ALU_VEC_021_SCL_122:
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000378 std::swap(Src[1], Src[2]);
379 break;
Vincent Lejeunebb8a87212013-06-29 19:32:29 +0000380 case R600InstrInfo::ALU_VEC_102_SCL_221:
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000381 std::swap(Src[0], Src[1]);
382 break;
Vincent Lejeunebb8a87212013-06-29 19:32:29 +0000383 case R600InstrInfo::ALU_VEC_120_SCL_212:
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000384 std::swap(Src[0], Src[1]);
385 std::swap(Src[0], Src[2]);
386 break;
387 case R600InstrInfo::ALU_VEC_201:
388 std::swap(Src[0], Src[2]);
389 std::swap(Src[0], Src[1]);
390 break;
391 case R600InstrInfo::ALU_VEC_210:
392 std::swap(Src[0], Src[2]);
393 break;
394 }
395 return Src;
396}
397
Matt Arsenaultd7f44142016-07-15 21:26:46 +0000398static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
Vincent Lejeune77a83522013-06-29 19:32:43 +0000399 switch (Swz) {
400 case R600InstrInfo::ALU_VEC_012_SCL_210: {
401 unsigned Cycles[3] = { 2, 1, 0};
402 return Cycles[Op];
403 }
404 case R600InstrInfo::ALU_VEC_021_SCL_122: {
405 unsigned Cycles[3] = { 1, 2, 2};
406 return Cycles[Op];
407 }
408 case R600InstrInfo::ALU_VEC_120_SCL_212: {
409 unsigned Cycles[3] = { 2, 1, 2};
410 return Cycles[Op];
411 }
412 case R600InstrInfo::ALU_VEC_102_SCL_221: {
413 unsigned Cycles[3] = { 2, 2, 1};
414 return Cycles[Op];
415 }
416 default:
417 llvm_unreachable("Wrong Swizzle for Trans Slot");
Vincent Lejeune77a83522013-06-29 19:32:43 +0000418 }
419}
420
421/// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
422/// in the same Instruction Group while meeting read port limitations given a
423/// Swz swizzle sequence.
424unsigned R600InstrInfo::isLegalUpTo(
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000425 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
Vincent Lejeune77a83522013-06-29 19:32:43 +0000426 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000427 const std::vector<std::pair<int, unsigned>> &TransSrcs,
Vincent Lejeune77a83522013-06-29 19:32:43 +0000428 R600InstrInfo::BankSwizzle TransSwz) const {
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000429 int Vector[4][3];
430 memset(Vector, -1, sizeof(Vector));
Vincent Lejeune77a83522013-06-29 19:32:43 +0000431 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000432 const std::vector<std::pair<int, unsigned>> &Srcs =
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000433 Swizzle(IGSrcs[i], Swz[i]);
434 for (unsigned j = 0; j < 3; j++) {
435 const std::pair<int, unsigned> &Src = Srcs[j];
Vincent Lejeune77a83522013-06-29 19:32:43 +0000436 if (Src.first < 0 || Src.first == 255)
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000437 continue;
Tom Stellardc026e8b2013-06-28 15:47:08 +0000438 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
Vincent Lejeune77a83522013-06-29 19:32:43 +0000439 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
440 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
Tom Stellardc026e8b2013-06-28 15:47:08 +0000441 // The value from output queue A (denoted by register OQAP) can
442 // only be fetched during the first cycle.
443 return false;
444 }
445 // OQAP does not count towards the normal read port restrictions
446 continue;
447 }
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000448 if (Vector[Src.second][j] < 0)
449 Vector[Src.second][j] = Src.first;
450 if (Vector[Src.second][j] != Src.first)
Vincent Lejeune77a83522013-06-29 19:32:43 +0000451 return i;
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000452 }
453 }
Vincent Lejeune77a83522013-06-29 19:32:43 +0000454 // Now check Trans Alu
455 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
456 const std::pair<int, unsigned> &Src = TransSrcs[i];
457 unsigned Cycle = getTransSwizzle(TransSwz, i);
458 if (Src.first < 0)
459 continue;
460 if (Src.first == 255)
461 continue;
462 if (Vector[Src.second][Cycle] < 0)
463 Vector[Src.second][Cycle] = Src.first;
464 if (Vector[Src.second][Cycle] != Src.first)
465 return IGSrcs.size() - 1;
466 }
467 return IGSrcs.size();
468}
469
470/// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
471/// (in lexicographic term) swizzle sequence assuming that all swizzles after
472/// Idx can be skipped
473static bool
474NextPossibleSolution(
475 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
476 unsigned Idx) {
477 assert(Idx < SwzCandidate.size());
478 int ResetIdx = Idx;
479 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
480 ResetIdx --;
481 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
482 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
483 }
484 if (ResetIdx == -1)
485 return false;
Benjamin Kramer39690642013-06-29 20:04:19 +0000486 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
487 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
Vincent Lejeune77a83522013-06-29 19:32:43 +0000488 return true;
489}
490
491/// Enumerate all possible Swizzle sequence to find one that can meet all
492/// read port requirements.
493bool R600InstrInfo::FindSwizzleForVectorSlot(
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000494 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
Vincent Lejeune77a83522013-06-29 19:32:43 +0000495 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000496 const std::vector<std::pair<int, unsigned>> &TransSrcs,
Vincent Lejeune77a83522013-06-29 19:32:43 +0000497 R600InstrInfo::BankSwizzle TransSwz) const {
498 unsigned ValidUpTo = 0;
499 do {
500 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
501 if (ValidUpTo == IGSrcs.size())
502 return true;
503 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
504 return false;
505}
506
507/// Instructions in Trans slot can't read gpr at cycle 0 if they also read
508/// a const, and can't read a gpr at cycle 1 if they read 2 const.
509static bool
510isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000511 const std::vector<std::pair<int, unsigned>> &TransOps,
Vincent Lejeune77a83522013-06-29 19:32:43 +0000512 unsigned ConstCount) {
Vincent Lejeune7e2c8322013-09-04 19:53:46 +0000513 // TransALU can't read 3 constants
514 if (ConstCount > 2)
515 return false;
Vincent Lejeune77a83522013-06-29 19:32:43 +0000516 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
517 const std::pair<int, unsigned> &Src = TransOps[i];
518 unsigned Cycle = getTransSwizzle(TransSwz, i);
519 if (Src.first < 0)
520 continue;
521 if (ConstCount > 0 && Cycle == 0)
522 return false;
523 if (ConstCount > 1 && Cycle == 1)
524 return false;
525 }
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000526 return true;
527}
528
Tom Stellardc026e8b2013-06-28 15:47:08 +0000529bool
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000530R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
Vincent Lejeune77a83522013-06-29 19:32:43 +0000531 const DenseMap<unsigned, unsigned> &PV,
532 std::vector<BankSwizzle> &ValidSwizzle,
533 bool isLastAluTrans)
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000534 const {
535 //Todo : support shared src0 - src1 operand
536
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000537 std::vector<std::vector<std::pair<int, unsigned>>> IGSrcs;
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000538 ValidSwizzle.clear();
Vincent Lejeune77a83522013-06-29 19:32:43 +0000539 unsigned ConstCount;
Vincent Lejeunea8a50242013-06-30 21:44:06 +0000540 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000541 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000542 IGSrcs.push_back(ExtractSrcs(*IG[i], PV, ConstCount));
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000543 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
Tom Stellard02661d92013-06-25 21:22:18 +0000544 AMDGPU::OpName::bank_swizzle);
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000545 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
546 IG[i]->getOperand(Op).getImm());
547 }
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000548 std::vector<std::pair<int, unsigned>> TransOps;
Vincent Lejeune77a83522013-06-29 19:32:43 +0000549 if (!isLastAluTrans)
550 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
551
Benjamin Kramere12a6ba2014-10-03 18:33:16 +0000552 TransOps = std::move(IGSrcs.back());
Vincent Lejeune77a83522013-06-29 19:32:43 +0000553 IGSrcs.pop_back();
554 ValidSwizzle.pop_back();
555
556 static const R600InstrInfo::BankSwizzle TransSwz[] = {
557 ALU_VEC_012_SCL_210,
558 ALU_VEC_021_SCL_122,
559 ALU_VEC_120_SCL_212,
560 ALU_VEC_102_SCL_221
561 };
562 for (unsigned i = 0; i < 4; i++) {
563 TransBS = TransSwz[i];
564 if (!isConstCompatible(TransBS, TransOps, ConstCount))
565 continue;
566 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
567 TransBS);
568 if (Result) {
569 ValidSwizzle.push_back(TransBS);
570 return true;
571 }
572 }
573
574 return false;
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000575}
576
Vincent Lejeune0a22bc42013-03-14 15:50:45 +0000577bool
578R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
579 const {
580 assert (Consts.size() <= 12 && "Too many operands in instructions group");
581 unsigned Pair1 = 0, Pair2 = 0;
582 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
583 unsigned ReadConstHalf = Consts[i] & 2;
584 unsigned ReadConstIndex = Consts[i] & (~3);
585 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
586 if (!Pair1) {
587 Pair1 = ReadHalfConst;
588 continue;
589 }
590 if (Pair1 == ReadHalfConst)
591 continue;
592 if (!Pair2) {
593 Pair2 = ReadHalfConst;
594 continue;
595 }
596 if (Pair2 != ReadHalfConst)
597 return false;
598 }
599 return true;
600}
601
602bool
Vincent Lejeune77a83522013-06-29 19:32:43 +0000603R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
604 const {
Vincent Lejeune0a22bc42013-03-14 15:50:45 +0000605 std::vector<unsigned> Consts;
Vincent Lejeunebb3f9312013-07-31 19:32:07 +0000606 SmallSet<int64_t, 4> Literals;
Vincent Lejeune0a22bc42013-03-14 15:50:45 +0000607 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000608 MachineInstr &MI = *MIs[i];
609 if (!isALUInstr(MI.getOpcode()))
Vincent Lejeune0a22bc42013-03-14 15:50:45 +0000610 continue;
611
Benjamin Kramer22ff8652016-07-30 11:31:16 +0000612 for (const auto &Src : getSrcs(MI)) {
Vincent Lejeunebb3f9312013-07-31 19:32:07 +0000613 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
614 Literals.insert(Src.second);
615 if (Literals.size() > 4)
616 return false;
Vincent Lejeune0fca91d2013-05-17 16:50:02 +0000617 if (Src.first->getReg() == AMDGPU::ALU_CONST)
618 Consts.push_back(Src.second);
619 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
620 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
621 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
622 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
Vincent Lejeune147700b2013-04-30 00:14:27 +0000623 Consts.push_back((Index << 2) | Chan);
Vincent Lejeune0a22bc42013-03-14 15:50:45 +0000624 }
625 }
626 }
627 return fitsConstReadLimitations(Consts);
628}
629
Eric Christopher143f02c2014-10-09 01:59:35 +0000630DFAPacketizer *
631R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
632 const InstrItineraryData *II = STI.getInstrItineraryData();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000633 return static_cast<const R600Subtarget &>(STI).createDFAPacketizer(II);
Tom Stellard75aadc22012-12-11 21:25:42 +0000634}
635
636static bool
637isPredicateSetter(unsigned Opcode) {
638 switch (Opcode) {
639 case AMDGPU::PRED_X:
640 return true;
641 default:
642 return false;
643 }
644}
645
646static MachineInstr *
647findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
648 MachineBasicBlock::iterator I) {
649 while (I != MBB.begin()) {
650 --I;
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000651 MachineInstr &MI = *I;
652 if (isPredicateSetter(MI.getOpcode()))
653 return &MI;
Tom Stellard75aadc22012-12-11 21:25:42 +0000654 }
655
Craig Topper062a2ba2014-04-25 05:30:21 +0000656 return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +0000657}
658
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000659static
660bool isJump(unsigned Opcode) {
661 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
662}
663
Vincent Lejeune269708b2013-10-01 19:32:38 +0000664static bool isBranch(unsigned Opcode) {
665 return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
666 Opcode == AMDGPU::BRANCH_COND_f32;
667}
668
Jacques Pienaar71c30a12016-07-15 14:41:04 +0000669bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
670 MachineBasicBlock *&TBB,
671 MachineBasicBlock *&FBB,
672 SmallVectorImpl<MachineOperand> &Cond,
673 bool AllowModify) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000674 // Most of the following comes from the ARM implementation of AnalyzeBranch
675
676 // If the block has no terminators, it just falls into the block after it.
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000677 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
678 if (I == MBB.end())
Tom Stellard75aadc22012-12-11 21:25:42 +0000679 return false;
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000680
Vincent Lejeune269708b2013-10-01 19:32:38 +0000681 // AMDGPU::BRANCH* instructions are only available after isel and are not
682 // handled
683 if (isBranch(I->getOpcode()))
684 return true;
Duncan P. N. Exon Smithf197b1f2016-08-12 05:05:36 +0000685 if (!isJump(I->getOpcode())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000686 return false;
687 }
688
Tom Stellarda64353e2014-01-23 18:49:34 +0000689 // Remove successive JUMP
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000690 while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
691 MachineBasicBlock::iterator PriorI = std::prev(I);
Tom Stellarda64353e2014-01-23 18:49:34 +0000692 if (AllowModify)
693 I->removeFromParent();
694 I = PriorI;
695 }
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000696 MachineInstr &LastInst = *I;
Tom Stellard75aadc22012-12-11 21:25:42 +0000697
698 // If there is only one terminator instruction, process it.
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000699 unsigned LastOpc = LastInst.getOpcode();
Duncan P. N. Exon Smithf197b1f2016-08-12 05:05:36 +0000700 if (I == MBB.begin() || !isJump((--I)->getOpcode())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000701 if (LastOpc == AMDGPU::JUMP) {
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000702 TBB = LastInst.getOperand(0).getMBB();
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000703 return false;
704 } else if (LastOpc == AMDGPU::JUMP_COND) {
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000705 auto predSet = I;
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000706 while (!isPredicateSetter(predSet->getOpcode())) {
707 predSet = --I;
Tom Stellard75aadc22012-12-11 21:25:42 +0000708 }
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000709 TBB = LastInst.getOperand(0).getMBB();
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000710 Cond.push_back(predSet->getOperand(1));
711 Cond.push_back(predSet->getOperand(2));
712 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
713 return false;
Tom Stellard75aadc22012-12-11 21:25:42 +0000714 }
715 return true; // Can't handle indirect branch.
716 }
717
718 // Get the instruction before it if it is a terminator.
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000719 MachineInstr &SecondLastInst = *I;
720 unsigned SecondLastOpc = SecondLastInst.getOpcode();
Tom Stellard75aadc22012-12-11 21:25:42 +0000721
722 // If the block ends with a B and a Bcc, handle it.
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000723 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000724 auto predSet = --I;
Tom Stellard75aadc22012-12-11 21:25:42 +0000725 while (!isPredicateSetter(predSet->getOpcode())) {
726 predSet = --I;
727 }
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000728 TBB = SecondLastInst.getOperand(0).getMBB();
729 FBB = LastInst.getOperand(0).getMBB();
Tom Stellard75aadc22012-12-11 21:25:42 +0000730 Cond.push_back(predSet->getOperand(1));
731 Cond.push_back(predSet->getOperand(2));
732 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
733 return false;
734 }
735
736 // Otherwise, can't handle this.
737 return true;
738}
739
Vincent Lejeunece499742013-07-09 15:03:33 +0000740static
741MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
742 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
743 It != E; ++It) {
744 if (It->getOpcode() == AMDGPU::CF_ALU ||
745 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
Duncan P. N. Exon Smith18720962016-09-11 18:51:28 +0000746 return It.getReverse();
Vincent Lejeunece499742013-07-09 15:03:33 +0000747 }
748 return MBB.end();
749}
750
Matt Arsenaulte8e0f5c2016-09-14 17:24:15 +0000751unsigned R600InstrInfo::insertBranch(MachineBasicBlock &MBB,
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000752 MachineBasicBlock *TBB,
753 MachineBasicBlock *FBB,
754 ArrayRef<MachineOperand> Cond,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000755 const DebugLoc &DL,
756 int *BytesAdded) const {
Matt Arsenaulte8e0f5c2016-09-14 17:24:15 +0000757 assert(TBB && "insertBranch must not be told to insert a fallthrough");
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000758 assert(!BytesAdded && "code size not handled");
Tom Stellard75aadc22012-12-11 21:25:42 +0000759
Craig Topper062a2ba2014-04-25 05:30:21 +0000760 if (!FBB) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000761 if (Cond.empty()) {
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000762 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
Tom Stellard75aadc22012-12-11 21:25:42 +0000763 return 1;
764 } else {
765 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
766 assert(PredSet && "No previous predicate !");
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000767 addFlag(*PredSet, 0, MO_FLAG_PUSH);
Tom Stellard75aadc22012-12-11 21:25:42 +0000768 PredSet->getOperand(2).setImm(Cond[1].getImm());
769
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000770 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
Tom Stellard75aadc22012-12-11 21:25:42 +0000771 .addMBB(TBB)
772 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
Vincent Lejeunece499742013-07-09 15:03:33 +0000773 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
774 if (CfAlu == MBB.end())
775 return 1;
776 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
777 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
Tom Stellard75aadc22012-12-11 21:25:42 +0000778 return 1;
779 }
780 } else {
781 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
782 assert(PredSet && "No previous predicate !");
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000783 addFlag(*PredSet, 0, MO_FLAG_PUSH);
Tom Stellard75aadc22012-12-11 21:25:42 +0000784 PredSet->getOperand(2).setImm(Cond[1].getImm());
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000785 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
Tom Stellard75aadc22012-12-11 21:25:42 +0000786 .addMBB(TBB)
787 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000788 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
Vincent Lejeunece499742013-07-09 15:03:33 +0000789 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
790 if (CfAlu == MBB.end())
791 return 2;
792 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
793 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
Tom Stellard75aadc22012-12-11 21:25:42 +0000794 return 2;
795 }
796}
797
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +0000798unsigned R600InstrInfo::removeBranch(MachineBasicBlock &MBB,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000799 int *BytesRemoved) const {
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000800 assert(!BytesRemoved && "code size not handled");
Tom Stellard75aadc22012-12-11 21:25:42 +0000801
802 // Note : we leave PRED* instructions there.
803 // They may be needed when predicating instructions.
804
805 MachineBasicBlock::iterator I = MBB.end();
806
807 if (I == MBB.begin()) {
808 return 0;
809 }
810 --I;
811 switch (I->getOpcode()) {
812 default:
813 return 0;
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000814 case AMDGPU::JUMP_COND: {
815 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000816 clearFlag(*predSet, 0, MO_FLAG_PUSH);
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000817 I->eraseFromParent();
Vincent Lejeunece499742013-07-09 15:03:33 +0000818 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
819 if (CfAlu == MBB.end())
820 break;
821 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
822 CfAlu->setDesc(get(AMDGPU::CF_ALU));
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000823 break;
824 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000825 case AMDGPU::JUMP:
Tom Stellard75aadc22012-12-11 21:25:42 +0000826 I->eraseFromParent();
827 break;
828 }
829 I = MBB.end();
830
831 if (I == MBB.begin()) {
832 return 1;
833 }
834 --I;
835 switch (I->getOpcode()) {
836 // FIXME: only one case??
837 default:
838 return 1;
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000839 case AMDGPU::JUMP_COND: {
840 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000841 clearFlag(*predSet, 0, MO_FLAG_PUSH);
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000842 I->eraseFromParent();
Vincent Lejeunece499742013-07-09 15:03:33 +0000843 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
844 if (CfAlu == MBB.end())
845 break;
846 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
847 CfAlu->setDesc(get(AMDGPU::CF_ALU));
Vincent Lejeunee5ecf102013-03-11 18:15:06 +0000848 break;
849 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000850 case AMDGPU::JUMP:
Tom Stellard75aadc22012-12-11 21:25:42 +0000851 I->eraseFromParent();
852 break;
853 }
854 return 2;
855}
856
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000857bool R600InstrInfo::isPredicated(const MachineInstr &MI) const {
858 int idx = MI.findFirstPredOperandIdx();
Tom Stellard75aadc22012-12-11 21:25:42 +0000859 if (idx < 0)
860 return false;
861
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000862 unsigned Reg = MI.getOperand(idx).getReg();
Tom Stellard75aadc22012-12-11 21:25:42 +0000863 switch (Reg) {
864 default: return false;
865 case AMDGPU::PRED_SEL_ONE:
866 case AMDGPU::PRED_SEL_ZERO:
867 case AMDGPU::PREDICATE_BIT:
868 return true;
869 }
870}
871
Krzysztof Parzyszekcc318712017-03-03 18:30:54 +0000872bool R600InstrInfo::isPredicable(const MachineInstr &MI) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000873 // XXX: KILL* instructions can be predicated, but they must be the last
874 // instruction in a clause, so this means any instructions after them cannot
875 // be predicated. Until we have proper support for instruction clauses in the
876 // backend, we will mark KILL* instructions as unpredicable.
877
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000878 if (MI.getOpcode() == AMDGPU::KILLGT) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000879 return false;
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000880 } else if (MI.getOpcode() == AMDGPU::CF_ALU) {
Vincent Lejeunece499742013-07-09 15:03:33 +0000881 // If the clause start in the middle of MBB then the MBB has more
882 // than a single clause, unable to predicate several clauses.
Krzysztof Parzyszekcc318712017-03-03 18:30:54 +0000883 if (MI.getParent()->begin() != MachineBasicBlock::const_iterator(MI))
Vincent Lejeunece499742013-07-09 15:03:33 +0000884 return false;
885 // TODO: We don't support KC merging atm
Matt Arsenault8226fc42016-03-02 23:00:21 +0000886 return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0;
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000887 } else if (isVector(MI)) {
Vincent Lejeunefe32bd82013-03-05 19:12:06 +0000888 return false;
Tom Stellard75aadc22012-12-11 21:25:42 +0000889 } else {
890 return AMDGPUInstrInfo::isPredicable(MI);
891 }
892}
893
Tom Stellard75aadc22012-12-11 21:25:42 +0000894bool
895R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
Sanjay Patelfa929a22017-03-15 15:37:42 +0000896 unsigned NumCycles,
Tom Stellard75aadc22012-12-11 21:25:42 +0000897 unsigned ExtraPredCycles,
Cong Houc536bd92015-09-10 23:10:42 +0000898 BranchProbability Probability) const{
Tom Stellard75aadc22012-12-11 21:25:42 +0000899 return true;
900}
901
902bool
903R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
904 unsigned NumTCycles,
905 unsigned ExtraTCycles,
906 MachineBasicBlock &FMBB,
907 unsigned NumFCycles,
908 unsigned ExtraFCycles,
Cong Houc536bd92015-09-10 23:10:42 +0000909 BranchProbability Probability) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000910 return true;
911}
912
913bool
914R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
Sanjay Patelfa929a22017-03-15 15:37:42 +0000915 unsigned NumCycles,
Cong Houc536bd92015-09-10 23:10:42 +0000916 BranchProbability Probability)
Tom Stellard75aadc22012-12-11 21:25:42 +0000917 const {
918 return true;
919}
920
921bool
922R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
923 MachineBasicBlock &FMBB) const {
924 return false;
925}
926
Tom Stellard75aadc22012-12-11 21:25:42 +0000927bool
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +0000928R600InstrInfo::reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000929 MachineOperand &MO = Cond[1];
930 switch (MO.getImm()) {
Matt Arsenault44f6d692016-08-13 01:43:46 +0000931 case AMDGPU::PRED_SETE_INT:
932 MO.setImm(AMDGPU::PRED_SETNE_INT);
Tom Stellard75aadc22012-12-11 21:25:42 +0000933 break;
Matt Arsenault44f6d692016-08-13 01:43:46 +0000934 case AMDGPU::PRED_SETNE_INT:
935 MO.setImm(AMDGPU::PRED_SETE_INT);
Tom Stellard75aadc22012-12-11 21:25:42 +0000936 break;
Matt Arsenault44f6d692016-08-13 01:43:46 +0000937 case AMDGPU::PRED_SETE:
938 MO.setImm(AMDGPU::PRED_SETNE);
Tom Stellard75aadc22012-12-11 21:25:42 +0000939 break;
Matt Arsenault44f6d692016-08-13 01:43:46 +0000940 case AMDGPU::PRED_SETNE:
941 MO.setImm(AMDGPU::PRED_SETE);
Tom Stellard75aadc22012-12-11 21:25:42 +0000942 break;
943 default:
944 return true;
945 }
946
947 MachineOperand &MO2 = Cond[2];
948 switch (MO2.getReg()) {
949 case AMDGPU::PRED_SEL_ZERO:
950 MO2.setReg(AMDGPU::PRED_SEL_ONE);
951 break;
952 case AMDGPU::PRED_SEL_ONE:
953 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
954 break;
955 default:
956 return true;
957 }
958 return false;
959}
960
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000961bool R600InstrInfo::DefinesPredicate(MachineInstr &MI,
962 std::vector<MachineOperand> &Pred) const {
963 return isPredicateSetter(MI.getOpcode());
Tom Stellard75aadc22012-12-11 21:25:42 +0000964}
965
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000966bool R600InstrInfo::PredicateInstruction(MachineInstr &MI,
967 ArrayRef<MachineOperand> Pred) const {
968 int PIdx = MI.findFirstPredOperandIdx();
Tom Stellard75aadc22012-12-11 21:25:42 +0000969
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000970 if (MI.getOpcode() == AMDGPU::CF_ALU) {
971 MI.getOperand(8).setImm(0);
Vincent Lejeunece499742013-07-09 15:03:33 +0000972 return true;
973 }
974
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000975 if (MI.getOpcode() == AMDGPU::DOT_4) {
976 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_X))
Vincent Lejeune745d4292013-11-16 16:24:41 +0000977 .setReg(Pred[2].getReg());
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000978 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Y))
Vincent Lejeune745d4292013-11-16 16:24:41 +0000979 .setReg(Pred[2].getReg());
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000980 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Z))
Vincent Lejeune745d4292013-11-16 16:24:41 +0000981 .setReg(Pred[2].getReg());
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000982 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_W))
Vincent Lejeune745d4292013-11-16 16:24:41 +0000983 .setReg(Pred[2].getReg());
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000984 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
Vincent Lejeune745d4292013-11-16 16:24:41 +0000985 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
986 return true;
987 }
988
Tom Stellard75aadc22012-12-11 21:25:42 +0000989 if (PIdx != -1) {
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000990 MachineOperand &PMO = MI.getOperand(PIdx);
Tom Stellard75aadc22012-12-11 21:25:42 +0000991 PMO.setReg(Pred[2].getReg());
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000992 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
NAKAMURA Takumi2a0b40f2012-12-20 00:22:11 +0000993 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000994 return true;
995 }
996
997 return false;
998}
999
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +00001000unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const {
Arnold Schwaighoferd2f96b92013-09-30 15:28:56 +00001001 return 2;
1002}
1003
Tom Stellard75aadc22012-12-11 21:25:42 +00001004unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001005 const MachineInstr &,
Tom Stellard75aadc22012-12-11 21:25:42 +00001006 unsigned *PredCost) const {
1007 if (PredCost)
1008 *PredCost = 2;
1009 return 2;
1010}
1011
Tom Stellard1242ce92016-02-05 18:44:57 +00001012unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
1013 unsigned Channel) const {
1014 assert(Channel == 0);
1015 return RegIndex;
1016}
1017
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001018bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1019 switch (MI.getOpcode()) {
Tom Stellard2ff72622016-01-28 16:04:37 +00001020 default: {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001021 MachineBasicBlock *MBB = MI.getParent();
1022 int OffsetOpIdx =
1023 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::addr);
1024 // addr is a custom operand with multiple MI operands, and only the
1025 // first MI operand is given a name.
Tom Stellard2ff72622016-01-28 16:04:37 +00001026 int RegOpIdx = OffsetOpIdx + 1;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001027 int ChanOpIdx =
1028 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::chan);
1029 if (isRegisterLoad(MI)) {
1030 int DstOpIdx =
1031 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
1032 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1033 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
Tom Stellard2ff72622016-01-28 16:04:37 +00001034 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001035 unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
Tom Stellard2ff72622016-01-28 16:04:37 +00001036 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001037 buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
Tom Stellard2ff72622016-01-28 16:04:37 +00001038 getIndirectAddrRegClass()->getRegister(Address));
1039 } else {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001040 buildIndirectRead(MBB, MI, MI.getOperand(DstOpIdx).getReg(), Address,
1041 OffsetReg);
Tom Stellard2ff72622016-01-28 16:04:37 +00001042 }
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001043 } else if (isRegisterStore(MI)) {
1044 int ValOpIdx =
1045 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::val);
1046 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1047 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
Tom Stellard2ff72622016-01-28 16:04:37 +00001048 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001049 unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
Tom Stellard2ff72622016-01-28 16:04:37 +00001050 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
1051 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001052 MI.getOperand(ValOpIdx).getReg());
Tom Stellard2ff72622016-01-28 16:04:37 +00001053 } else {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001054 buildIndirectWrite(MBB, MI, MI.getOperand(ValOpIdx).getReg(),
Tom Stellard2ff72622016-01-28 16:04:37 +00001055 calculateIndirectAddress(RegIndex, Channel),
1056 OffsetReg);
1057 }
1058 } else {
1059 return false;
1060 }
1061
1062 MBB->erase(MI);
1063 return true;
1064 }
Tom Stellard880a80a2014-06-17 16:53:14 +00001065 case AMDGPU::R600_EXTRACT_ELT_V2:
1066 case AMDGPU::R600_EXTRACT_ELT_V4:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001067 buildIndirectRead(MI.getParent(), MI, MI.getOperand(0).getReg(),
1068 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1069 MI.getOperand(2).getReg(),
1070 RI.getHWRegChan(MI.getOperand(1).getReg()));
Tom Stellard880a80a2014-06-17 16:53:14 +00001071 break;
1072 case AMDGPU::R600_INSERT_ELT_V2:
1073 case AMDGPU::R600_INSERT_ELT_V4:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001074 buildIndirectWrite(MI.getParent(), MI, MI.getOperand(2).getReg(), // Value
1075 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1076 MI.getOperand(3).getReg(), // Offset
1077 RI.getHWRegChan(MI.getOperand(1).getReg())); // Channel
Tom Stellard880a80a2014-06-17 16:53:14 +00001078 break;
1079 }
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001080 MI.eraseFromParent();
Tom Stellard880a80a2014-06-17 16:53:14 +00001081 return true;
1082}
1083
Eugene Zelenko734bb7b2017-01-20 17:52:16 +00001084void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001085 const MachineFunction &MF) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001086 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1087 const R600FrameLowering *TFL = ST.getFrameLowering();
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001088
1089 unsigned StackWidth = TFL->getStackWidth(MF);
1090 int End = getIndirectIndexEnd(MF);
1091
Tom Stellard81d871d2013-11-13 23:36:50 +00001092 if (End == -1)
1093 return;
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001094
1095 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1096 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
Tom Stellard81d871d2013-11-13 23:36:50 +00001097 Reserved.set(SuperReg);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001098 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1099 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
Tom Stellard81d871d2013-11-13 23:36:50 +00001100 Reserved.set(Reg);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001101 }
1102 }
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001103}
1104
Tom Stellard26a3b672013-10-22 18:19:10 +00001105const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1106 return &AMDGPU::R600_TReg32_XRegClass;
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001107}
1108
1109MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1110 MachineBasicBlock::iterator I,
1111 unsigned ValueReg, unsigned Address,
1112 unsigned OffsetReg) const {
Tom Stellard880a80a2014-06-17 16:53:14 +00001113 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
1114}
1115
1116MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1117 MachineBasicBlock::iterator I,
1118 unsigned ValueReg, unsigned Address,
1119 unsigned OffsetReg,
1120 unsigned AddrChan) const {
1121 unsigned AddrReg;
1122 switch (AddrChan) {
1123 default: llvm_unreachable("Invalid Channel");
1124 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1125 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1126 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1127 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1128 }
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001129 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1130 AMDGPU::AR_X, OffsetReg);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001131 setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001132
1133 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1134 AddrReg, ValueReg)
Tom Stellardaad53762013-06-05 03:43:06 +00001135 .addReg(AMDGPU::AR_X,
1136 RegState::Implicit | RegState::Kill);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001137 setImmOperand(*Mov, AMDGPU::OpName::dst_rel, 1);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001138 return Mov;
1139}
1140
1141MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1142 MachineBasicBlock::iterator I,
1143 unsigned ValueReg, unsigned Address,
1144 unsigned OffsetReg) const {
Tom Stellard880a80a2014-06-17 16:53:14 +00001145 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
1146}
1147
1148MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1149 MachineBasicBlock::iterator I,
1150 unsigned ValueReg, unsigned Address,
1151 unsigned OffsetReg,
1152 unsigned AddrChan) const {
1153 unsigned AddrReg;
1154 switch (AddrChan) {
1155 default: llvm_unreachable("Invalid Channel");
1156 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1157 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1158 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1159 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1160 }
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001161 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1162 AMDGPU::AR_X,
1163 OffsetReg);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001164 setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001165 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1166 ValueReg,
1167 AddrReg)
Tom Stellardaad53762013-06-05 03:43:06 +00001168 .addReg(AMDGPU::AR_X,
1169 RegState::Implicit | RegState::Kill);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001170 setImmOperand(*Mov, AMDGPU::OpName::src0_rel, 1);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001171
1172 return Mov;
1173}
1174
Matt Arsenault52a4d9b2016-07-09 18:11:15 +00001175int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
1176 const MachineRegisterInfo &MRI = MF.getRegInfo();
Matthias Braun941a7052016-07-28 18:40:00 +00001177 const MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenault52a4d9b2016-07-09 18:11:15 +00001178 int Offset = -1;
1179
Matthias Braun941a7052016-07-28 18:40:00 +00001180 if (MFI.getNumObjects() == 0) {
Matt Arsenault52a4d9b2016-07-09 18:11:15 +00001181 return -1;
1182 }
1183
1184 if (MRI.livein_empty()) {
1185 return 0;
1186 }
1187
1188 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
Krzysztof Parzyszek72518ea2017-10-16 19:08:41 +00001189 for (std::pair<unsigned, unsigned> LI : MRI.liveins()) {
1190 unsigned Reg = LI.first;
Matt Arsenault52a4d9b2016-07-09 18:11:15 +00001191 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
1192 !IndirectRC->contains(Reg))
1193 continue;
1194
1195 unsigned RegIndex;
1196 unsigned RegEnd;
1197 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
1198 ++RegIndex) {
1199 if (IndirectRC->getRegister(RegIndex) == Reg)
1200 break;
1201 }
1202 Offset = std::max(Offset, (int)RegIndex);
1203 }
1204
1205 return Offset + 1;
1206}
1207
1208int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
1209 int Offset = 0;
Matthias Braun941a7052016-07-28 18:40:00 +00001210 const MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenault52a4d9b2016-07-09 18:11:15 +00001211
1212 // Variable sized objects are not supported
Matthias Braun941a7052016-07-28 18:40:00 +00001213 if (MFI.hasVarSizedObjects()) {
Matt Arsenault52a4d9b2016-07-09 18:11:15 +00001214 return -1;
1215 }
1216
Matthias Braun941a7052016-07-28 18:40:00 +00001217 if (MFI.getNumObjects() == 0) {
Matt Arsenault52a4d9b2016-07-09 18:11:15 +00001218 return -1;
1219 }
1220
1221 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1222 const R600FrameLowering *TFL = ST.getFrameLowering();
1223
1224 unsigned IgnoredFrameReg;
1225 Offset = TFL->getFrameIndexReference(MF, -1, IgnoredFrameReg);
1226
1227 return getIndirectIndexBegin(MF) + Offset;
1228}
1229
Vincent Lejeune80031d9f2013-04-03 16:49:34 +00001230unsigned R600InstrInfo::getMaxAlusPerClause() const {
1231 return 115;
1232}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001233
Tom Stellard75aadc22012-12-11 21:25:42 +00001234MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1235 MachineBasicBlock::iterator I,
1236 unsigned Opcode,
1237 unsigned DstReg,
1238 unsigned Src0Reg,
1239 unsigned Src1Reg) const {
1240 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1241 DstReg); // $dst
1242
1243 if (Src1Reg) {
1244 MIB.addImm(0) // $update_exec_mask
1245 .addImm(0); // $update_predicate
1246 }
1247 MIB.addImm(1) // $write
1248 .addImm(0) // $omod
1249 .addImm(0) // $dst_rel
1250 .addImm(0) // $dst_clamp
1251 .addReg(Src0Reg) // $src0
1252 .addImm(0) // $src0_neg
1253 .addImm(0) // $src0_rel
Tom Stellard365366f2013-01-23 02:09:06 +00001254 .addImm(0) // $src0_abs
1255 .addImm(-1); // $src0_sel
Tom Stellard75aadc22012-12-11 21:25:42 +00001256
1257 if (Src1Reg) {
1258 MIB.addReg(Src1Reg) // $src1
1259 .addImm(0) // $src1_neg
1260 .addImm(0) // $src1_rel
Tom Stellard365366f2013-01-23 02:09:06 +00001261 .addImm(0) // $src1_abs
1262 .addImm(-1); // $src1_sel
Tom Stellard75aadc22012-12-11 21:25:42 +00001263 }
1264
1265 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1266 //scheduling to the backend, we can change the default to 0.
1267 MIB.addImm(1) // $last
1268 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
Vincent Lejeune22c42482013-04-30 00:14:08 +00001269 .addImm(0) // $literal
1270 .addImm(0); // $bank_swizzle
Tom Stellard75aadc22012-12-11 21:25:42 +00001271
1272 return MIB;
1273}
1274
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001275#define OPERAND_CASE(Label) \
1276 case Label: { \
Tom Stellard02661d92013-06-25 21:22:18 +00001277 static const unsigned Ops[] = \
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001278 { \
1279 Label##_X, \
1280 Label##_Y, \
1281 Label##_Z, \
1282 Label##_W \
1283 }; \
1284 return Ops[Slot]; \
1285 }
1286
Tom Stellard02661d92013-06-25 21:22:18 +00001287static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001288 switch (Op) {
Tom Stellard02661d92013-06-25 21:22:18 +00001289 OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1290 OPERAND_CASE(AMDGPU::OpName::update_pred)
1291 OPERAND_CASE(AMDGPU::OpName::write)
1292 OPERAND_CASE(AMDGPU::OpName::omod)
1293 OPERAND_CASE(AMDGPU::OpName::dst_rel)
1294 OPERAND_CASE(AMDGPU::OpName::clamp)
1295 OPERAND_CASE(AMDGPU::OpName::src0)
1296 OPERAND_CASE(AMDGPU::OpName::src0_neg)
1297 OPERAND_CASE(AMDGPU::OpName::src0_rel)
1298 OPERAND_CASE(AMDGPU::OpName::src0_abs)
1299 OPERAND_CASE(AMDGPU::OpName::src0_sel)
1300 OPERAND_CASE(AMDGPU::OpName::src1)
1301 OPERAND_CASE(AMDGPU::OpName::src1_neg)
1302 OPERAND_CASE(AMDGPU::OpName::src1_rel)
1303 OPERAND_CASE(AMDGPU::OpName::src1_abs)
1304 OPERAND_CASE(AMDGPU::OpName::src1_sel)
1305 OPERAND_CASE(AMDGPU::OpName::pred_sel)
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001306 default:
1307 llvm_unreachable("Wrong Operand");
1308 }
1309}
1310
1311#undef OPERAND_CASE
1312
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001313MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1314 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1315 const {
1316 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1317 unsigned Opcode;
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001318 if (ST.getGeneration() <= R600Subtarget::R700)
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001319 Opcode = AMDGPU::DOT4_r600;
1320 else
1321 Opcode = AMDGPU::DOT4_eg;
1322 MachineBasicBlock::iterator I = MI;
1323 MachineOperand &Src0 = MI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +00001324 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001325 MachineOperand &Src1 = MI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +00001326 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001327 MachineInstr *MIB = buildDefaultInstruction(
1328 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
Tom Stellard02661d92013-06-25 21:22:18 +00001329 static const unsigned Operands[14] = {
1330 AMDGPU::OpName::update_exec_mask,
1331 AMDGPU::OpName::update_pred,
1332 AMDGPU::OpName::write,
1333 AMDGPU::OpName::omod,
1334 AMDGPU::OpName::dst_rel,
1335 AMDGPU::OpName::clamp,
1336 AMDGPU::OpName::src0_neg,
1337 AMDGPU::OpName::src0_rel,
1338 AMDGPU::OpName::src0_abs,
1339 AMDGPU::OpName::src0_sel,
1340 AMDGPU::OpName::src1_neg,
1341 AMDGPU::OpName::src1_rel,
1342 AMDGPU::OpName::src1_abs,
1343 AMDGPU::OpName::src1_sel,
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001344 };
1345
Vincent Lejeune745d4292013-11-16 16:24:41 +00001346 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1347 getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
1348 MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
1349 .setReg(MO.getReg());
1350
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001351 for (unsigned i = 0; i < 14; i++) {
1352 MachineOperand &MO = MI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +00001353 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001354 assert (MO.isImm());
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001355 setImmOperand(*MIB, Operands[i], MO.getImm());
Vincent Lejeune519f21e2013-05-17 16:50:32 +00001356 }
1357 MIB->getOperand(20).setImm(0);
1358 return MIB;
1359}
1360
Tom Stellard75aadc22012-12-11 21:25:42 +00001361MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1362 MachineBasicBlock::iterator I,
1363 unsigned DstReg,
1364 uint64_t Imm) const {
1365 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1366 AMDGPU::ALU_LITERAL_X);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001367 setImmOperand(*MovImm, AMDGPU::OpName::literal, Imm);
Tom Stellard75aadc22012-12-11 21:25:42 +00001368 return MovImm;
1369}
1370
Tom Stellard26a3b672013-10-22 18:19:10 +00001371MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1372 MachineBasicBlock::iterator I,
1373 unsigned DstReg, unsigned SrcReg) const {
1374 return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
1375}
1376
Tom Stellard02661d92013-06-25 21:22:18 +00001377int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
Tom Stellard75aadc22012-12-11 21:25:42 +00001378 return getOperandIdx(MI.getOpcode(), Op);
1379}
1380
Tom Stellard02661d92013-06-25 21:22:18 +00001381int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1382 return AMDGPU::getNamedOperandIdx(Opcode, Op);
Vincent Lejeunec6896792013-06-04 23:17:15 +00001383}
1384
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001385void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
Tom Stellard75aadc22012-12-11 21:25:42 +00001386 int64_t Imm) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001387 int Idx = getOperandIdx(MI, Op);
Tom Stellard75aadc22012-12-11 21:25:42 +00001388 assert(Idx != -1 && "Operand not supported for this instruction.");
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001389 assert(MI.getOperand(Idx).isImm());
1390 MI.getOperand(Idx).setImm(Imm);
Tom Stellard75aadc22012-12-11 21:25:42 +00001391}
1392
1393//===----------------------------------------------------------------------===//
1394// Instruction flag getters/setters
1395//===----------------------------------------------------------------------===//
1396
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001397MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
Tom Stellard75aadc22012-12-11 21:25:42 +00001398 unsigned Flag) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001399 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
Tom Stellard75aadc22012-12-11 21:25:42 +00001400 int FlagIndex = 0;
1401 if (Flag != 0) {
1402 // If we pass something other than the default value of Flag to this
1403 // function, it means we are want to set a flag on an instruction
1404 // that uses native encoding.
1405 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1406 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1407 switch (Flag) {
1408 case MO_FLAG_CLAMP:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001409 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::clamp);
Tom Stellard75aadc22012-12-11 21:25:42 +00001410 break;
1411 case MO_FLAG_MASK:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001412 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::write);
Tom Stellard75aadc22012-12-11 21:25:42 +00001413 break;
1414 case MO_FLAG_NOT_LAST:
1415 case MO_FLAG_LAST:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001416 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::last);
Tom Stellard75aadc22012-12-11 21:25:42 +00001417 break;
1418 case MO_FLAG_NEG:
1419 switch (SrcIdx) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001420 case 0:
1421 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_neg);
1422 break;
1423 case 1:
1424 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_neg);
1425 break;
1426 case 2:
1427 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src2_neg);
1428 break;
Tom Stellard75aadc22012-12-11 21:25:42 +00001429 }
1430 break;
1431
1432 case MO_FLAG_ABS:
1433 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1434 "instructions.");
Tom Stellard6975d352012-12-13 19:38:52 +00001435 (void)IsOP3;
Tom Stellard75aadc22012-12-11 21:25:42 +00001436 switch (SrcIdx) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001437 case 0:
1438 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_abs);
1439 break;
1440 case 1:
1441 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_abs);
1442 break;
Tom Stellard75aadc22012-12-11 21:25:42 +00001443 }
1444 break;
1445
1446 default:
1447 FlagIndex = -1;
1448 break;
1449 }
1450 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1451 } else {
1452 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1453 assert(FlagIndex != 0 &&
1454 "Instruction flags not supported for this instruction");
1455 }
1456
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001457 MachineOperand &FlagOp = MI.getOperand(FlagIndex);
Tom Stellard75aadc22012-12-11 21:25:42 +00001458 assert(FlagOp.isImm());
1459 return FlagOp;
1460}
1461
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001462void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
Tom Stellard75aadc22012-12-11 21:25:42 +00001463 unsigned Flag) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001464 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
Tom Stellard75aadc22012-12-11 21:25:42 +00001465 if (Flag == 0) {
1466 return;
1467 }
1468 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1469 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1470 if (Flag == MO_FLAG_NOT_LAST) {
1471 clearFlag(MI, Operand, MO_FLAG_LAST);
1472 } else if (Flag == MO_FLAG_MASK) {
1473 clearFlag(MI, Operand, Flag);
1474 } else {
1475 FlagOp.setImm(1);
1476 }
1477 } else {
1478 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1479 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1480 }
1481}
1482
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001483void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
Tom Stellard75aadc22012-12-11 21:25:42 +00001484 unsigned Flag) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001485 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
Tom Stellard75aadc22012-12-11 21:25:42 +00001486 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1487 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1488 FlagOp.setImm(0);
1489 } else {
1490 MachineOperand &FlagOp = getFlagOp(MI);
1491 unsigned InstFlags = FlagOp.getImm();
1492 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1493 FlagOp.setImm(InstFlags);
1494 }
1495}
Yaxun Liu920cc2f2017-11-10 01:53:24 +00001496
1497unsigned R600InstrInfo::getAddressSpaceForPseudoSourceKind(
1498 PseudoSourceValue::PSVKind Kind) const {
1499 switch (Kind) {
1500 case PseudoSourceValue::Stack:
1501 case PseudoSourceValue::FixedStack:
1502 return AMDGPUASI.PRIVATE_ADDRESS;
1503 case PseudoSourceValue::ConstantPool:
1504 case PseudoSourceValue::GOT:
1505 case PseudoSourceValue::JumpTable:
1506 case PseudoSourceValue::GlobalValueCallEntry:
1507 case PseudoSourceValue::ExternalSymbolCallEntry:
1508 case PseudoSourceValue::TargetCustom:
1509 return AMDGPUASI.CONSTANT_ADDRESS;
1510 }
1511 llvm_unreachable("Invalid pseudo source kind");
1512 return AMDGPUASI.PRIVATE_ADDRESS;
1513}