blob: 04b03c24eaa8d736ae0643ce7a3bfdf39cec01f0 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- R600ExpandSpecialInstrs.cpp - Expand special instructions ---------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// Vector, Reduction, and Cube instructions need to fill the entire instruction
12/// group to work correctly. This pass expands these individual instructions
13/// into several instructions that will completely fill the instruction group.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPU.h"
18#include "R600Defines.h"
19#include "R600InstrInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000020#include "R600MachineFunctionInfo.h"
Chandler Carruthbe810232013-01-02 10:22:59 +000021#include "R600RegisterInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000022#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023#include "llvm/CodeGen/MachineFunctionPass.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26
27using namespace llvm;
28
29namespace {
30
31class R600ExpandSpecialInstrsPass : public MachineFunctionPass {
32
33private:
34 static char ID;
35 const R600InstrInfo *TII;
36
Vincent Lejeunef92d64d2013-12-10 14:43:27 +000037 void SetFlagInNewMI(MachineInstr *NewMI, const MachineInstr *OldMI,
38 unsigned Op);
39
Tom Stellard75aadc22012-12-11 21:25:42 +000040public:
41 R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID),
Craig Topper062a2ba2014-04-25 05:30:21 +000042 TII(nullptr) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000043
Craig Topper5656db42014-04-29 07:57:24 +000044 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard75aadc22012-12-11 21:25:42 +000045
Craig Topper5656db42014-04-29 07:57:24 +000046 const char *getPassName() const override {
Tom Stellard75aadc22012-12-11 21:25:42 +000047 return "R600 Expand special instructions pass";
48 }
49};
50
51} // End anonymous namespace
52
53char R600ExpandSpecialInstrsPass::ID = 0;
54
55FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
56 return new R600ExpandSpecialInstrsPass(TM);
57}
58
Vincent Lejeunef92d64d2013-12-10 14:43:27 +000059void R600ExpandSpecialInstrsPass::SetFlagInNewMI(MachineInstr *NewMI,
60 const MachineInstr *OldMI, unsigned Op) {
61 int OpIdx = TII->getOperandIdx(*OldMI, Op);
62 if (OpIdx > -1) {
63 uint64_t Val = OldMI->getOperand(OpIdx).getImm();
64 TII->setImmOperand(NewMI, Op, Val);
65 }
66}
67
Tom Stellard75aadc22012-12-11 21:25:42 +000068bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
Eric Christopherd9134482014-08-04 21:25:23 +000069 TII = static_cast<const R600InstrInfo *>(
70 MF.getTarget().getSubtargetImpl()->getInstrInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +000071
72 const R600RegisterInfo &TRI = TII->getRegisterInfo();
73
74 for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
75 BB != BB_E; ++BB) {
76 MachineBasicBlock &MBB = *BB;
77 MachineBasicBlock::iterator I = MBB.begin();
78 while (I != MBB.end()) {
79 MachineInstr &MI = *I;
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +000080 I = std::next(I);
Tom Stellard75aadc22012-12-11 21:25:42 +000081
Tom Stellard8f9fc202013-11-15 00:12:45 +000082 // Expand LDS_*_RET instructions
83 if (TII->isLDSRetInstr(MI.getOpcode())) {
84 int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
85 assert(DstIdx != -1);
86 MachineOperand &DstOp = MI.getOperand(DstIdx);
87 MachineInstr *Mov = TII->buildMovInstr(&MBB, I,
88 DstOp.getReg(), AMDGPU::OQAP);
89 DstOp.setReg(AMDGPU::OQAP);
90 int LDSPredSelIdx = TII->getOperandIdx(MI.getOpcode(),
91 AMDGPU::OpName::pred_sel);
92 int MovPredSelIdx = TII->getOperandIdx(Mov->getOpcode(),
93 AMDGPU::OpName::pred_sel);
94 // Copy the pred_sel bit
95 Mov->getOperand(MovPredSelIdx).setReg(
96 MI.getOperand(LDSPredSelIdx).getReg());
97 }
98
Tom Stellard75aadc22012-12-11 21:25:42 +000099 switch (MI.getOpcode()) {
100 default: break;
101 // Expand PRED_X to one of the PRED_SET instructions.
102 case AMDGPU::PRED_X: {
103 uint64_t Flags = MI.getOperand(3).getImm();
104 // The native opcode used by PRED_X is stored as an immediate in the
105 // third operand.
106 MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
107 MI.getOperand(2).getImm(), // opcode
108 MI.getOperand(0).getReg(), // dst
109 MI.getOperand(1).getReg(), // src0
110 AMDGPU::ZERO); // src1
111 TII->addFlag(PredSet, 0, MO_FLAG_MASK);
112 if (Flags & MO_FLAG_PUSH) {
Tom Stellard02661d92013-06-25 21:22:18 +0000113 TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000114 } else {
Tom Stellard02661d92013-06-25 21:22:18 +0000115 TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000116 }
117 MI.eraseFromParent();
118 continue;
119 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000120
Tom Stellard41afe6a2013-02-05 17:09:14 +0000121 case AMDGPU::INTERP_PAIR_XY: {
122 MachineInstr *BMI;
123 unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
124 MI.getOperand(2).getImm());
125
126 for (unsigned Chan = 0; Chan < 4; ++Chan) {
127 unsigned DstReg;
128
129 if (Chan < 2)
130 DstReg = MI.getOperand(Chan).getReg();
131 else
132 DstReg = Chan == 2 ? AMDGPU::T0_Z : AMDGPU::T0_W;
133
134 BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_XY,
135 DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
136
137 if (Chan > 0) {
138 BMI->bundleWithPred();
139 }
140 if (Chan >= 2)
141 TII->addFlag(BMI, 0, MO_FLAG_MASK);
142 if (Chan != 3)
143 TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
144 }
145
146 MI.eraseFromParent();
147 continue;
148 }
149
150 case AMDGPU::INTERP_PAIR_ZW: {
151 MachineInstr *BMI;
152 unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
153 MI.getOperand(2).getImm());
154
155 for (unsigned Chan = 0; Chan < 4; ++Chan) {
156 unsigned DstReg;
157
158 if (Chan < 2)
159 DstReg = Chan == 0 ? AMDGPU::T0_X : AMDGPU::T0_Y;
160 else
161 DstReg = MI.getOperand(Chan-2).getReg();
162
163 BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_ZW,
164 DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
165
166 if (Chan > 0) {
167 BMI->bundleWithPred();
168 }
169 if (Chan < 2)
170 TII->addFlag(BMI, 0, MO_FLAG_MASK);
171 if (Chan != 3)
172 TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
173 }
174
175 MI.eraseFromParent();
176 continue;
177 }
178
179 case AMDGPU::INTERP_VEC_LOAD: {
180 const R600RegisterInfo &TRI = TII->getRegisterInfo();
181 MachineInstr *BMI;
182 unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
183 MI.getOperand(1).getImm());
184 unsigned DstReg = MI.getOperand(0).getReg();
185
186 for (unsigned Chan = 0; Chan < 4; ++Chan) {
187 BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_LOAD_P0,
188 TRI.getSubReg(DstReg, TRI.getSubRegFromChannel(Chan)), PReg);
189 if (Chan > 0) {
190 BMI->bundleWithPred();
191 }
192 if (Chan != 3)
193 TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
194 }
195
196 MI.eraseFromParent();
197 continue;
198 }
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000199 case AMDGPU::DOT_4: {
200
201 const R600RegisterInfo &TRI = TII->getRegisterInfo();
202
203 unsigned DstReg = MI.getOperand(0).getReg();
204 unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
205
206 for (unsigned Chan = 0; Chan < 4; ++Chan) {
207 bool Mask = (Chan != TRI.getHWRegChan(DstReg));
208 unsigned SubDstReg =
209 AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
210 MachineInstr *BMI =
211 TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg);
212 if (Chan > 0) {
213 BMI->bundleWithPred();
214 }
215 if (Mask) {
216 TII->addFlag(BMI, 0, MO_FLAG_MASK);
217 }
218 if (Chan != 3)
219 TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
220 unsigned Opcode = BMI->getOpcode();
221 // While not strictly necessary from hw point of view, we force
222 // all src operands of a dot4 inst to belong to the same slot.
223 unsigned Src0 = BMI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000224 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000225 .getReg();
226 unsigned Src1 = BMI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000227 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000228 .getReg();
Rafael Espindolaf5688272013-05-22 01:29:38 +0000229 (void) Src0;
230 (void) Src1;
Vincent Lejeunec6896792013-06-04 23:17:15 +0000231 if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
232 (TRI.getEncodingValue(Src1) & 0xff) < 127)
233 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000234 }
235 MI.eraseFromParent();
236 continue;
237 }
Tom Stellard41afe6a2013-02-05 17:09:14 +0000238 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000239
240 bool IsReduction = TII->isReductionOp(MI.getOpcode());
241 bool IsVector = TII->isVector(MI);
242 bool IsCube = TII->isCubeOp(MI.getOpcode());
243 if (!IsReduction && !IsVector && !IsCube) {
244 continue;
245 }
246
247 // Expand the instruction
248 //
249 // Reduction instructions:
250 // T0_X = DP4 T1_XYZW, T2_XYZW
251 // becomes:
252 // TO_X = DP4 T1_X, T2_X
253 // TO_Y (write masked) = DP4 T1_Y, T2_Y
254 // TO_Z (write masked) = DP4 T1_Z, T2_Z
255 // TO_W (write masked) = DP4 T1_W, T2_W
256 //
257 // Vector instructions:
258 // T0_X = MULLO_INT T1_X, T2_X
259 // becomes:
260 // T0_X = MULLO_INT T1_X, T2_X
261 // T0_Y (write masked) = MULLO_INT T1_X, T2_X
262 // T0_Z (write masked) = MULLO_INT T1_X, T2_X
263 // T0_W (write masked) = MULLO_INT T1_X, T2_X
264 //
265 // Cube instructions:
266 // T0_XYZW = CUBE T1_XYZW
267 // becomes:
268 // TO_X = CUBE T1_Z, T1_Y
269 // T0_Y = CUBE T1_Z, T1_X
270 // T0_Z = CUBE T1_X, T1_Z
271 // T0_W = CUBE T1_Y, T1_Z
272 for (unsigned Chan = 0; Chan < 4; Chan++) {
273 unsigned DstReg = MI.getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000274 TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
Tom Stellard75aadc22012-12-11 21:25:42 +0000275 unsigned Src0 = MI.getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000276 TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
Tom Stellard75aadc22012-12-11 21:25:42 +0000277 unsigned Src1 = 0;
278
279 // Determine the correct source registers
280 if (!IsCube) {
Tom Stellard02661d92013-06-25 21:22:18 +0000281 int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000282 if (Src1Idx != -1) {
283 Src1 = MI.getOperand(Src1Idx).getReg();
284 }
285 }
286 if (IsReduction) {
287 unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
288 Src0 = TRI.getSubReg(Src0, SubRegIndex);
289 Src1 = TRI.getSubReg(Src1, SubRegIndex);
290 } else if (IsCube) {
291 static const int CubeSrcSwz[] = {2, 2, 0, 1};
292 unsigned SubRegIndex0 = TRI.getSubRegFromChannel(CubeSrcSwz[Chan]);
293 unsigned SubRegIndex1 = TRI.getSubRegFromChannel(CubeSrcSwz[3 - Chan]);
294 Src1 = TRI.getSubReg(Src0, SubRegIndex1);
295 Src0 = TRI.getSubReg(Src0, SubRegIndex0);
296 }
297
298 // Determine the correct destination registers;
299 bool Mask = false;
300 bool NotLast = true;
301 if (IsCube) {
302 unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
303 DstReg = TRI.getSubReg(DstReg, SubRegIndex);
304 } else {
305 // Mask the write if the original instruction does not write to
306 // the current Channel.
307 Mask = (Chan != TRI.getHWRegChan(DstReg));
308 unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
309 DstReg = AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
310 }
311
312 // Set the IsLast bit
313 NotLast = (Chan != 3 );
314
315 // Add the new instruction
316 unsigned Opcode = MI.getOpcode();
317 switch (Opcode) {
318 case AMDGPU::CUBE_r600_pseudo:
319 Opcode = AMDGPU::CUBE_r600_real;
320 break;
321 case AMDGPU::CUBE_eg_pseudo:
322 Opcode = AMDGPU::CUBE_eg_real;
323 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000324 default:
325 break;
326 }
327
328 MachineInstr *NewMI =
329 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1);
330
Jakob Stoklund Olesen436eea92012-12-13 00:59:38 +0000331 if (Chan != 0)
332 NewMI->bundleWithPred();
Tom Stellard75aadc22012-12-11 21:25:42 +0000333 if (Mask) {
334 TII->addFlag(NewMI, 0, MO_FLAG_MASK);
335 }
336 if (NotLast) {
337 TII->addFlag(NewMI, 0, MO_FLAG_NOT_LAST);
338 }
Vincent Lejeunef92d64d2013-12-10 14:43:27 +0000339 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::clamp);
340 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::literal);
341 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_abs);
342 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_abs);
343 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_neg);
344 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_neg);
Tom Stellard75aadc22012-12-11 21:25:42 +0000345 }
346 MI.eraseFromParent();
347 }
348 }
349 return false;
350}