blob: aeee4aa89562353ca8cb60c226ca9c3d81b74804 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- R600ExpandSpecialInstrs.cpp - Expand special instructions ---------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// Vector, Reduction, and Cube instructions need to fill the entire instruction
12/// group to work correctly. This pass expands these individual instructions
13/// into several instructions that will completely fill the instruction group.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPU.h"
18#include "R600Defines.h"
19#include "R600InstrInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000020#include "R600MachineFunctionInfo.h"
Chandler Carruthbe810232013-01-02 10:22:59 +000021#include "R600RegisterInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000022#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25
26using namespace llvm;
27
28namespace {
29
30class R600ExpandSpecialInstrsPass : public MachineFunctionPass {
31
32private:
33 static char ID;
34 const R600InstrInfo *TII;
35
36 bool ExpandInputPerspective(MachineInstr& MI);
37 bool ExpandInputConstant(MachineInstr& MI);
38
39public:
40 R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID),
Bill Wendling37e9adb2013-06-07 20:28:55 +000041 TII(0) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000042
43 virtual bool runOnMachineFunction(MachineFunction &MF);
44
45 const char *getPassName() const {
46 return "R600 Expand special instructions pass";
47 }
48};
49
50} // End anonymous namespace
51
52char R600ExpandSpecialInstrsPass::ID = 0;
53
54FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
55 return new R600ExpandSpecialInstrsPass(TM);
56}
57
Tom Stellard75aadc22012-12-11 21:25:42 +000058bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
Bill Wendling37e9adb2013-06-07 20:28:55 +000059 TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +000060
61 const R600RegisterInfo &TRI = TII->getRegisterInfo();
62
63 for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
64 BB != BB_E; ++BB) {
65 MachineBasicBlock &MBB = *BB;
66 MachineBasicBlock::iterator I = MBB.begin();
67 while (I != MBB.end()) {
68 MachineInstr &MI = *I;
69 I = llvm::next(I);
70
Tom Stellard8f9fc202013-11-15 00:12:45 +000071 // Expand LDS_*_RET instructions
72 if (TII->isLDSRetInstr(MI.getOpcode())) {
73 int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
74 assert(DstIdx != -1);
75 MachineOperand &DstOp = MI.getOperand(DstIdx);
76 MachineInstr *Mov = TII->buildMovInstr(&MBB, I,
77 DstOp.getReg(), AMDGPU::OQAP);
78 DstOp.setReg(AMDGPU::OQAP);
79 int LDSPredSelIdx = TII->getOperandIdx(MI.getOpcode(),
80 AMDGPU::OpName::pred_sel);
81 int MovPredSelIdx = TII->getOperandIdx(Mov->getOpcode(),
82 AMDGPU::OpName::pred_sel);
83 // Copy the pred_sel bit
84 Mov->getOperand(MovPredSelIdx).setReg(
85 MI.getOperand(LDSPredSelIdx).getReg());
86 }
87
Tom Stellard75aadc22012-12-11 21:25:42 +000088 switch (MI.getOpcode()) {
89 default: break;
90 // Expand PRED_X to one of the PRED_SET instructions.
91 case AMDGPU::PRED_X: {
92 uint64_t Flags = MI.getOperand(3).getImm();
93 // The native opcode used by PRED_X is stored as an immediate in the
94 // third operand.
95 MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
96 MI.getOperand(2).getImm(), // opcode
97 MI.getOperand(0).getReg(), // dst
98 MI.getOperand(1).getReg(), // src0
99 AMDGPU::ZERO); // src1
100 TII->addFlag(PredSet, 0, MO_FLAG_MASK);
101 if (Flags & MO_FLAG_PUSH) {
Tom Stellard02661d92013-06-25 21:22:18 +0000102 TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000103 } else {
Tom Stellard02661d92013-06-25 21:22:18 +0000104 TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000105 }
106 MI.eraseFromParent();
107 continue;
108 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000109
Tom Stellard41afe6a2013-02-05 17:09:14 +0000110 case AMDGPU::INTERP_PAIR_XY: {
111 MachineInstr *BMI;
112 unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
113 MI.getOperand(2).getImm());
114
115 for (unsigned Chan = 0; Chan < 4; ++Chan) {
116 unsigned DstReg;
117
118 if (Chan < 2)
119 DstReg = MI.getOperand(Chan).getReg();
120 else
121 DstReg = Chan == 2 ? AMDGPU::T0_Z : AMDGPU::T0_W;
122
123 BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_XY,
124 DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
125
126 if (Chan > 0) {
127 BMI->bundleWithPred();
128 }
129 if (Chan >= 2)
130 TII->addFlag(BMI, 0, MO_FLAG_MASK);
131 if (Chan != 3)
132 TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
133 }
134
135 MI.eraseFromParent();
136 continue;
137 }
138
139 case AMDGPU::INTERP_PAIR_ZW: {
140 MachineInstr *BMI;
141 unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
142 MI.getOperand(2).getImm());
143
144 for (unsigned Chan = 0; Chan < 4; ++Chan) {
145 unsigned DstReg;
146
147 if (Chan < 2)
148 DstReg = Chan == 0 ? AMDGPU::T0_X : AMDGPU::T0_Y;
149 else
150 DstReg = MI.getOperand(Chan-2).getReg();
151
152 BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_ZW,
153 DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
154
155 if (Chan > 0) {
156 BMI->bundleWithPred();
157 }
158 if (Chan < 2)
159 TII->addFlag(BMI, 0, MO_FLAG_MASK);
160 if (Chan != 3)
161 TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
162 }
163
164 MI.eraseFromParent();
165 continue;
166 }
167
168 case AMDGPU::INTERP_VEC_LOAD: {
169 const R600RegisterInfo &TRI = TII->getRegisterInfo();
170 MachineInstr *BMI;
171 unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
172 MI.getOperand(1).getImm());
173 unsigned DstReg = MI.getOperand(0).getReg();
174
175 for (unsigned Chan = 0; Chan < 4; ++Chan) {
176 BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_LOAD_P0,
177 TRI.getSubReg(DstReg, TRI.getSubRegFromChannel(Chan)), PReg);
178 if (Chan > 0) {
179 BMI->bundleWithPred();
180 }
181 if (Chan != 3)
182 TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
183 }
184
185 MI.eraseFromParent();
186 continue;
187 }
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000188 case AMDGPU::DOT_4: {
189
190 const R600RegisterInfo &TRI = TII->getRegisterInfo();
191
192 unsigned DstReg = MI.getOperand(0).getReg();
193 unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
194
195 for (unsigned Chan = 0; Chan < 4; ++Chan) {
196 bool Mask = (Chan != TRI.getHWRegChan(DstReg));
197 unsigned SubDstReg =
198 AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
199 MachineInstr *BMI =
200 TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg);
201 if (Chan > 0) {
202 BMI->bundleWithPred();
203 }
204 if (Mask) {
205 TII->addFlag(BMI, 0, MO_FLAG_MASK);
206 }
207 if (Chan != 3)
208 TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
209 unsigned Opcode = BMI->getOpcode();
210 // While not strictly necessary from hw point of view, we force
211 // all src operands of a dot4 inst to belong to the same slot.
212 unsigned Src0 = BMI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000213 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000214 .getReg();
215 unsigned Src1 = BMI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000216 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000217 .getReg();
Rafael Espindolaf5688272013-05-22 01:29:38 +0000218 (void) Src0;
219 (void) Src1;
Vincent Lejeunec6896792013-06-04 23:17:15 +0000220 if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
221 (TRI.getEncodingValue(Src1) & 0xff) < 127)
222 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000223 }
224 MI.eraseFromParent();
225 continue;
226 }
Tom Stellard41afe6a2013-02-05 17:09:14 +0000227 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000228
229 bool IsReduction = TII->isReductionOp(MI.getOpcode());
230 bool IsVector = TII->isVector(MI);
231 bool IsCube = TII->isCubeOp(MI.getOpcode());
232 if (!IsReduction && !IsVector && !IsCube) {
233 continue;
234 }
235
236 // Expand the instruction
237 //
238 // Reduction instructions:
239 // T0_X = DP4 T1_XYZW, T2_XYZW
240 // becomes:
241 // TO_X = DP4 T1_X, T2_X
242 // TO_Y (write masked) = DP4 T1_Y, T2_Y
243 // TO_Z (write masked) = DP4 T1_Z, T2_Z
244 // TO_W (write masked) = DP4 T1_W, T2_W
245 //
246 // Vector instructions:
247 // T0_X = MULLO_INT T1_X, T2_X
248 // becomes:
249 // T0_X = MULLO_INT T1_X, T2_X
250 // T0_Y (write masked) = MULLO_INT T1_X, T2_X
251 // T0_Z (write masked) = MULLO_INT T1_X, T2_X
252 // T0_W (write masked) = MULLO_INT T1_X, T2_X
253 //
254 // Cube instructions:
255 // T0_XYZW = CUBE T1_XYZW
256 // becomes:
257 // TO_X = CUBE T1_Z, T1_Y
258 // T0_Y = CUBE T1_Z, T1_X
259 // T0_Z = CUBE T1_X, T1_Z
260 // T0_W = CUBE T1_Y, T1_Z
261 for (unsigned Chan = 0; Chan < 4; Chan++) {
262 unsigned DstReg = MI.getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000263 TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
Tom Stellard75aadc22012-12-11 21:25:42 +0000264 unsigned Src0 = MI.getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000265 TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
Tom Stellard75aadc22012-12-11 21:25:42 +0000266 unsigned Src1 = 0;
267
268 // Determine the correct source registers
269 if (!IsCube) {
Tom Stellard02661d92013-06-25 21:22:18 +0000270 int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000271 if (Src1Idx != -1) {
272 Src1 = MI.getOperand(Src1Idx).getReg();
273 }
274 }
275 if (IsReduction) {
276 unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
277 Src0 = TRI.getSubReg(Src0, SubRegIndex);
278 Src1 = TRI.getSubReg(Src1, SubRegIndex);
279 } else if (IsCube) {
280 static const int CubeSrcSwz[] = {2, 2, 0, 1};
281 unsigned SubRegIndex0 = TRI.getSubRegFromChannel(CubeSrcSwz[Chan]);
282 unsigned SubRegIndex1 = TRI.getSubRegFromChannel(CubeSrcSwz[3 - Chan]);
283 Src1 = TRI.getSubReg(Src0, SubRegIndex1);
284 Src0 = TRI.getSubReg(Src0, SubRegIndex0);
285 }
286
287 // Determine the correct destination registers;
288 bool Mask = false;
289 bool NotLast = true;
290 if (IsCube) {
291 unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
292 DstReg = TRI.getSubReg(DstReg, SubRegIndex);
293 } else {
294 // Mask the write if the original instruction does not write to
295 // the current Channel.
296 Mask = (Chan != TRI.getHWRegChan(DstReg));
297 unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
298 DstReg = AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
299 }
300
301 // Set the IsLast bit
302 NotLast = (Chan != 3 );
303
304 // Add the new instruction
305 unsigned Opcode = MI.getOpcode();
306 switch (Opcode) {
307 case AMDGPU::CUBE_r600_pseudo:
308 Opcode = AMDGPU::CUBE_r600_real;
309 break;
310 case AMDGPU::CUBE_eg_pseudo:
311 Opcode = AMDGPU::CUBE_eg_real;
312 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000313 default:
314 break;
315 }
316
317 MachineInstr *NewMI =
318 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1);
319
Jakob Stoklund Olesen436eea92012-12-13 00:59:38 +0000320 if (Chan != 0)
321 NewMI->bundleWithPred();
Tom Stellard75aadc22012-12-11 21:25:42 +0000322 if (Mask) {
323 TII->addFlag(NewMI, 0, MO_FLAG_MASK);
324 }
325 if (NotLast) {
326 TII->addFlag(NewMI, 0, MO_FLAG_NOT_LAST);
327 }
328 }
329 MI.eraseFromParent();
330 }
331 }
332 return false;
333}