blob: 3e46e6387614efea34a1980d4ec791e90e9de2cc [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- R600ExpandSpecialInstrs.cpp - Expand special instructions ---------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// Vector, Reduction, and Cube instructions need to fill the entire instruction
12/// group to work correctly. This pass expands these individual instructions
13/// into several instructions that will completely fill the instruction group.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPU.h"
18#include "R600Defines.h"
19#include "R600InstrInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000020#include "R600MachineFunctionInfo.h"
Chandler Carruthbe810232013-01-02 10:22:59 +000021#include "R600RegisterInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000022#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023#include "llvm/CodeGen/MachineFunctionPass.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26
27using namespace llvm;
28
29namespace {
30
31class R600ExpandSpecialInstrsPass : public MachineFunctionPass {
Tom Stellard75aadc22012-12-11 21:25:42 +000032private:
33 static char ID;
34 const R600InstrInfo *TII;
35
Vincent Lejeunef92d64d2013-12-10 14:43:27 +000036 void SetFlagInNewMI(MachineInstr *NewMI, const MachineInstr *OldMI,
37 unsigned Op);
38
Tom Stellard75aadc22012-12-11 21:25:42 +000039public:
40 R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID),
Craig Topper062a2ba2014-04-25 05:30:21 +000041 TII(nullptr) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000042
Craig Topper5656db42014-04-29 07:57:24 +000043 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard75aadc22012-12-11 21:25:42 +000044
Mehdi Amini117296c2016-10-01 02:56:57 +000045 StringRef getPassName() const override {
Tom Stellard75aadc22012-12-11 21:25:42 +000046 return "R600 Expand special instructions pass";
47 }
48};
49
50} // End anonymous namespace
51
52char R600ExpandSpecialInstrsPass::ID = 0;
53
54FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
55 return new R600ExpandSpecialInstrsPass(TM);
56}
57
Vincent Lejeunef92d64d2013-12-10 14:43:27 +000058void R600ExpandSpecialInstrsPass::SetFlagInNewMI(MachineInstr *NewMI,
59 const MachineInstr *OldMI, unsigned Op) {
60 int OpIdx = TII->getOperandIdx(*OldMI, Op);
61 if (OpIdx > -1) {
62 uint64_t Val = OldMI->getOperand(OpIdx).getImm();
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +000063 TII->setImmOperand(*NewMI, Op, Val);
Vincent Lejeunef92d64d2013-12-10 14:43:27 +000064 }
65}
66
Tom Stellard75aadc22012-12-11 21:25:42 +000067bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000068 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
69 TII = ST.getInstrInfo();
Tom Stellard75aadc22012-12-11 21:25:42 +000070
71 const R600RegisterInfo &TRI = TII->getRegisterInfo();
72
73 for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
74 BB != BB_E; ++BB) {
75 MachineBasicBlock &MBB = *BB;
76 MachineBasicBlock::iterator I = MBB.begin();
77 while (I != MBB.end()) {
78 MachineInstr &MI = *I;
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +000079 I = std::next(I);
Tom Stellard75aadc22012-12-11 21:25:42 +000080
Tom Stellard8f9fc202013-11-15 00:12:45 +000081 // Expand LDS_*_RET instructions
82 if (TII->isLDSRetInstr(MI.getOpcode())) {
83 int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
84 assert(DstIdx != -1);
85 MachineOperand &DstOp = MI.getOperand(DstIdx);
86 MachineInstr *Mov = TII->buildMovInstr(&MBB, I,
87 DstOp.getReg(), AMDGPU::OQAP);
88 DstOp.setReg(AMDGPU::OQAP);
89 int LDSPredSelIdx = TII->getOperandIdx(MI.getOpcode(),
90 AMDGPU::OpName::pred_sel);
91 int MovPredSelIdx = TII->getOperandIdx(Mov->getOpcode(),
92 AMDGPU::OpName::pred_sel);
93 // Copy the pred_sel bit
94 Mov->getOperand(MovPredSelIdx).setReg(
95 MI.getOperand(LDSPredSelIdx).getReg());
96 }
97
Tom Stellard75aadc22012-12-11 21:25:42 +000098 switch (MI.getOpcode()) {
99 default: break;
100 // Expand PRED_X to one of the PRED_SET instructions.
101 case AMDGPU::PRED_X: {
102 uint64_t Flags = MI.getOperand(3).getImm();
103 // The native opcode used by PRED_X is stored as an immediate in the
104 // third operand.
105 MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
106 MI.getOperand(2).getImm(), // opcode
107 MI.getOperand(0).getReg(), // dst
108 MI.getOperand(1).getReg(), // src0
109 AMDGPU::ZERO); // src1
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000110 TII->addFlag(*PredSet, 0, MO_FLAG_MASK);
Tom Stellard75aadc22012-12-11 21:25:42 +0000111 if (Flags & MO_FLAG_PUSH) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000112 TII->setImmOperand(*PredSet, AMDGPU::OpName::update_exec_mask, 1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000113 } else {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000114 TII->setImmOperand(*PredSet, AMDGPU::OpName::update_pred, 1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000115 }
116 MI.eraseFromParent();
117 continue;
118 }
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000119 case AMDGPU::DOT_4: {
120
121 const R600RegisterInfo &TRI = TII->getRegisterInfo();
122
123 unsigned DstReg = MI.getOperand(0).getReg();
124 unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
125
126 for (unsigned Chan = 0; Chan < 4; ++Chan) {
127 bool Mask = (Chan != TRI.getHWRegChan(DstReg));
128 unsigned SubDstReg =
129 AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
130 MachineInstr *BMI =
131 TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg);
132 if (Chan > 0) {
133 BMI->bundleWithPred();
134 }
135 if (Mask) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000136 TII->addFlag(*BMI, 0, MO_FLAG_MASK);
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000137 }
138 if (Chan != 3)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000139 TII->addFlag(*BMI, 0, MO_FLAG_NOT_LAST);
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000140 unsigned Opcode = BMI->getOpcode();
141 // While not strictly necessary from hw point of view, we force
142 // all src operands of a dot4 inst to belong to the same slot.
143 unsigned Src0 = BMI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000144 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000145 .getReg();
146 unsigned Src1 = BMI->getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000147 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000148 .getReg();
Rafael Espindolaf5688272013-05-22 01:29:38 +0000149 (void) Src0;
150 (void) Src1;
Vincent Lejeunec6896792013-06-04 23:17:15 +0000151 if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
152 (TRI.getEncodingValue(Src1) & 0xff) < 127)
153 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000154 }
155 MI.eraseFromParent();
156 continue;
157 }
Tom Stellard41afe6a2013-02-05 17:09:14 +0000158 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000159
160 bool IsReduction = TII->isReductionOp(MI.getOpcode());
161 bool IsVector = TII->isVector(MI);
162 bool IsCube = TII->isCubeOp(MI.getOpcode());
163 if (!IsReduction && !IsVector && !IsCube) {
164 continue;
165 }
166
167 // Expand the instruction
168 //
169 // Reduction instructions:
170 // T0_X = DP4 T1_XYZW, T2_XYZW
171 // becomes:
172 // TO_X = DP4 T1_X, T2_X
173 // TO_Y (write masked) = DP4 T1_Y, T2_Y
174 // TO_Z (write masked) = DP4 T1_Z, T2_Z
175 // TO_W (write masked) = DP4 T1_W, T2_W
176 //
177 // Vector instructions:
178 // T0_X = MULLO_INT T1_X, T2_X
179 // becomes:
180 // T0_X = MULLO_INT T1_X, T2_X
181 // T0_Y (write masked) = MULLO_INT T1_X, T2_X
182 // T0_Z (write masked) = MULLO_INT T1_X, T2_X
183 // T0_W (write masked) = MULLO_INT T1_X, T2_X
184 //
185 // Cube instructions:
186 // T0_XYZW = CUBE T1_XYZW
187 // becomes:
188 // TO_X = CUBE T1_Z, T1_Y
189 // T0_Y = CUBE T1_Z, T1_X
190 // T0_Z = CUBE T1_X, T1_Z
191 // T0_W = CUBE T1_Y, T1_Z
192 for (unsigned Chan = 0; Chan < 4; Chan++) {
193 unsigned DstReg = MI.getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000194 TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
Tom Stellard75aadc22012-12-11 21:25:42 +0000195 unsigned Src0 = MI.getOperand(
Tom Stellard02661d92013-06-25 21:22:18 +0000196 TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
Tom Stellard75aadc22012-12-11 21:25:42 +0000197 unsigned Src1 = 0;
198
199 // Determine the correct source registers
200 if (!IsCube) {
Tom Stellard02661d92013-06-25 21:22:18 +0000201 int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000202 if (Src1Idx != -1) {
203 Src1 = MI.getOperand(Src1Idx).getReg();
204 }
205 }
206 if (IsReduction) {
207 unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
208 Src0 = TRI.getSubReg(Src0, SubRegIndex);
209 Src1 = TRI.getSubReg(Src1, SubRegIndex);
210 } else if (IsCube) {
211 static const int CubeSrcSwz[] = {2, 2, 0, 1};
212 unsigned SubRegIndex0 = TRI.getSubRegFromChannel(CubeSrcSwz[Chan]);
213 unsigned SubRegIndex1 = TRI.getSubRegFromChannel(CubeSrcSwz[3 - Chan]);
214 Src1 = TRI.getSubReg(Src0, SubRegIndex1);
215 Src0 = TRI.getSubReg(Src0, SubRegIndex0);
216 }
217
218 // Determine the correct destination registers;
219 bool Mask = false;
220 bool NotLast = true;
221 if (IsCube) {
222 unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
223 DstReg = TRI.getSubReg(DstReg, SubRegIndex);
224 } else {
225 // Mask the write if the original instruction does not write to
226 // the current Channel.
227 Mask = (Chan != TRI.getHWRegChan(DstReg));
228 unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
229 DstReg = AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
230 }
231
232 // Set the IsLast bit
233 NotLast = (Chan != 3 );
234
235 // Add the new instruction
236 unsigned Opcode = MI.getOpcode();
237 switch (Opcode) {
238 case AMDGPU::CUBE_r600_pseudo:
239 Opcode = AMDGPU::CUBE_r600_real;
240 break;
241 case AMDGPU::CUBE_eg_pseudo:
242 Opcode = AMDGPU::CUBE_eg_real;
243 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000244 default:
245 break;
246 }
247
248 MachineInstr *NewMI =
249 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1);
250
Jakob Stoklund Olesen436eea92012-12-13 00:59:38 +0000251 if (Chan != 0)
252 NewMI->bundleWithPred();
Tom Stellard75aadc22012-12-11 21:25:42 +0000253 if (Mask) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000254 TII->addFlag(*NewMI, 0, MO_FLAG_MASK);
Tom Stellard75aadc22012-12-11 21:25:42 +0000255 }
256 if (NotLast) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000257 TII->addFlag(*NewMI, 0, MO_FLAG_NOT_LAST);
Tom Stellard75aadc22012-12-11 21:25:42 +0000258 }
Vincent Lejeunef92d64d2013-12-10 14:43:27 +0000259 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::clamp);
260 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::literal);
261 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_abs);
262 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_abs);
263 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_neg);
264 SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_neg);
Tom Stellard75aadc22012-12-11 21:25:42 +0000265 }
266 MI.eraseFromParent();
267 }
268 }
269 return false;
270}