blob: 4c991c7c21a5b29a6dd4e0cda8784a28602615fd [file] [log] [blame]
Matt Arsenaulte6740752016-09-29 01:44:16 +00001//===-- SIOptimizeExecMasking.cpp -----------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "AMDGPU.h"
11#include "AMDGPUSubtarget.h"
12#include "SIInstrInfo.h"
13#include "llvm/CodeGen/LiveIntervalAnalysis.h"
14#include "llvm/CodeGen/MachineFunctionPass.h"
15#include "llvm/CodeGen/MachineInstrBuilder.h"
16#include "llvm/CodeGen/MachineRegisterInfo.h"
17#include "llvm/Support/Debug.h"
18
19using namespace llvm;
20
21#define DEBUG_TYPE "si-optimize-exec-masking"
22
23namespace {
24
25class SIOptimizeExecMasking : public MachineFunctionPass {
26public:
27 static char ID;
28
29public:
30 SIOptimizeExecMasking() : MachineFunctionPass(ID) {
31 initializeSIOptimizeExecMaskingPass(*PassRegistry::getPassRegistry());
32 }
33
34 bool runOnMachineFunction(MachineFunction &MF) override;
35
Mehdi Amini117296c2016-10-01 02:56:57 +000036 StringRef getPassName() const override {
Matt Arsenaulte6740752016-09-29 01:44:16 +000037 return "SI optimize exec mask operations";
38 }
39
40 void getAnalysisUsage(AnalysisUsage &AU) const override {
41 AU.setPreservesCFG();
42 MachineFunctionPass::getAnalysisUsage(AU);
43 }
44};
45
46} // End anonymous namespace.
47
48INITIALIZE_PASS_BEGIN(SIOptimizeExecMasking, DEBUG_TYPE,
49 "SI optimize exec mask operations", false, false)
50INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
51INITIALIZE_PASS_END(SIOptimizeExecMasking, DEBUG_TYPE,
52 "SI optimize exec mask operations", false, false)
53
54char SIOptimizeExecMasking::ID = 0;
55
56char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID;
57
58/// If \p MI is a copy from exec, return the register copied to.
59static unsigned isCopyFromExec(const MachineInstr &MI) {
60 switch (MI.getOpcode()) {
61 case AMDGPU::COPY:
62 case AMDGPU::S_MOV_B64:
63 case AMDGPU::S_MOV_B64_term: {
64 const MachineOperand &Src = MI.getOperand(1);
65 if (Src.isReg() && Src.getReg() == AMDGPU::EXEC)
66 return MI.getOperand(0).getReg();
67 }
68 }
69
70 return AMDGPU::NoRegister;
71}
72
73/// If \p MI is a copy to exec, return the register copied from.
74static unsigned isCopyToExec(const MachineInstr &MI) {
75 switch (MI.getOpcode()) {
76 case AMDGPU::COPY:
77 case AMDGPU::S_MOV_B64: {
78 const MachineOperand &Dst = MI.getOperand(0);
79 if (Dst.isReg() && Dst.getReg() == AMDGPU::EXEC)
80 return MI.getOperand(1).getReg();
81 break;
82 }
83 case AMDGPU::S_MOV_B64_term:
84 llvm_unreachable("should have been replaced");
85 }
86
87 return AMDGPU::NoRegister;
88}
89
Stanislav Mekhanoshinda0edef2017-08-01 23:44:35 +000090/// If \p MI is a logical operation on an exec value,
91/// return the register copied to.
92static unsigned isLogicalOpOnExec(const MachineInstr &MI) {
93 switch (MI.getOpcode()) {
94 case AMDGPU::S_AND_B64:
95 case AMDGPU::S_OR_B64:
96 case AMDGPU::S_XOR_B64:
97 case AMDGPU::S_ANDN2_B64:
98 case AMDGPU::S_ORN2_B64:
99 case AMDGPU::S_NAND_B64:
100 case AMDGPU::S_NOR_B64:
101 case AMDGPU::S_XNOR_B64: {
102 const MachineOperand &Src1 = MI.getOperand(1);
103 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC)
104 return MI.getOperand(0).getReg();
105 const MachineOperand &Src2 = MI.getOperand(2);
106 if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC)
107 return MI.getOperand(0).getReg();
108 }
109 }
110
111 return AMDGPU::NoRegister;
112}
113
Matt Arsenaulte6740752016-09-29 01:44:16 +0000114static unsigned getSaveExecOp(unsigned Opc) {
115 switch (Opc) {
116 case AMDGPU::S_AND_B64:
117 return AMDGPU::S_AND_SAVEEXEC_B64;
118 case AMDGPU::S_OR_B64:
119 return AMDGPU::S_OR_SAVEEXEC_B64;
120 case AMDGPU::S_XOR_B64:
121 return AMDGPU::S_XOR_SAVEEXEC_B64;
122 case AMDGPU::S_ANDN2_B64:
123 return AMDGPU::S_ANDN2_SAVEEXEC_B64;
124 case AMDGPU::S_ORN2_B64:
125 return AMDGPU::S_ORN2_SAVEEXEC_B64;
126 case AMDGPU::S_NAND_B64:
127 return AMDGPU::S_NAND_SAVEEXEC_B64;
128 case AMDGPU::S_NOR_B64:
129 return AMDGPU::S_NOR_SAVEEXEC_B64;
130 case AMDGPU::S_XNOR_B64:
131 return AMDGPU::S_XNOR_SAVEEXEC_B64;
132 default:
133 return AMDGPU::INSTRUCTION_LIST_END;
134 }
135}
136
137// These are only terminators to get correct spill code placement during
138// register allocation, so turn them back into normal instructions. Only one of
139// these is expected per block.
140static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI) {
141 switch (MI.getOpcode()) {
142 case AMDGPU::S_MOV_B64_term: {
143 MI.setDesc(TII.get(AMDGPU::COPY));
144 return true;
145 }
146 case AMDGPU::S_XOR_B64_term: {
147 // This is only a terminator to get the correct spill code placement during
148 // register allocation.
149 MI.setDesc(TII.get(AMDGPU::S_XOR_B64));
150 return true;
151 }
152 case AMDGPU::S_ANDN2_B64_term: {
153 // This is only a terminator to get the correct spill code placement during
154 // register allocation.
155 MI.setDesc(TII.get(AMDGPU::S_ANDN2_B64));
156 return true;
157 }
158 default:
159 return false;
160 }
161}
162
163static MachineBasicBlock::reverse_iterator fixTerminators(
164 const SIInstrInfo &TII,
165 MachineBasicBlock &MBB) {
166 MachineBasicBlock::reverse_iterator I = MBB.rbegin(), E = MBB.rend();
167 for (; I != E; ++I) {
168 if (!I->isTerminator())
169 return I;
170
171 if (removeTerminatorBit(TII, *I))
172 return I;
173 }
174
175 return E;
176}
177
178static MachineBasicBlock::reverse_iterator findExecCopy(
179 const SIInstrInfo &TII,
180 MachineBasicBlock &MBB,
181 MachineBasicBlock::reverse_iterator I,
182 unsigned CopyToExec) {
183 const unsigned InstLimit = 25;
184
185 auto E = MBB.rend();
186 for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
187 unsigned CopyFromExec = isCopyFromExec(*I);
188 if (CopyFromExec != AMDGPU::NoRegister)
189 return I;
190 }
191
192 return E;
193}
194
195// XXX - Seems LivePhysRegs doesn't work correctly since it will incorrectly
196// repor tthe register as unavailable because a super-register with a lane mask
197// as unavailable.
198static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {
199 for (MachineBasicBlock *Succ : MBB.successors()) {
200 if (Succ->isLiveIn(Reg))
201 return true;
202 }
203
204 return false;
205}
206
207bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
Matt Arsenaultf42074b2017-10-10 20:48:36 +0000208 if (skipFunction(*MF.getFunction()))
209 return false;
210
Matt Arsenaulte6740752016-09-29 01:44:16 +0000211 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
212 const SIRegisterInfo *TRI = ST.getRegisterInfo();
213 const SIInstrInfo *TII = ST.getInstrInfo();
214
215 // Optimize sequences emitted for control flow lowering. They are originally
216 // emitted as the separate operations because spill code may need to be
217 // inserted for the saved copy of exec.
218 //
219 // x = copy exec
220 // z = s_<op>_b64 x, y
221 // exec = copy z
222 // =>
223 // x = s_<op>_saveexec_b64 y
224 //
225
226 for (MachineBasicBlock &MBB : MF) {
227 MachineBasicBlock::reverse_iterator I = fixTerminators(*TII, MBB);
228 MachineBasicBlock::reverse_iterator E = MBB.rend();
229 if (I == E)
230 continue;
231
232 unsigned CopyToExec = isCopyToExec(*I);
233 if (CopyToExec == AMDGPU::NoRegister)
234 continue;
235
236 // Scan backwards to find the def.
237 auto CopyToExecInst = &*I;
238 auto CopyFromExecInst = findExecCopy(*TII, MBB, I, CopyToExec);
Stanislav Mekhanoshinda0edef2017-08-01 23:44:35 +0000239 if (CopyFromExecInst == E) {
240 auto PrepareExecInst = std::next(I);
241 if (PrepareExecInst == E)
242 continue;
243 // Fold exec = COPY (S_AND_B64 reg, exec) -> exec = S_AND_B64 reg, exec
244 if (CopyToExecInst->getOperand(1).isKill() &&
245 isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) {
246 DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
247
248 PrepareExecInst->getOperand(0).setReg(AMDGPU::EXEC);
249
250 DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
251
252 CopyToExecInst->eraseFromParent();
253 }
254
Matt Arsenaulte6740752016-09-29 01:44:16 +0000255 continue;
Stanislav Mekhanoshinda0edef2017-08-01 23:44:35 +0000256 }
Matt Arsenaulte6740752016-09-29 01:44:16 +0000257
258 if (isLiveOut(MBB, CopyToExec)) {
259 // The copied register is live out and has a second use in another block.
260 DEBUG(dbgs() << "Exec copy source register is live out\n");
261 continue;
262 }
263
264 unsigned CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
265 MachineInstr *SaveExecInst = nullptr;
266 SmallVector<MachineInstr *, 4> OtherUseInsts;
267
268 for (MachineBasicBlock::iterator J
269 = std::next(CopyFromExecInst->getIterator()), JE = I->getIterator();
270 J != JE; ++J) {
271 if (SaveExecInst && J->readsRegister(AMDGPU::EXEC, TRI)) {
272 DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
273 // Make sure this is inserted after any VALU ops that may have been
274 // scheduled in between.
275 SaveExecInst = nullptr;
276 break;
277 }
278
279 if (J->modifiesRegister(CopyToExec, TRI)) {
280 if (SaveExecInst) {
281 DEBUG(dbgs() << "Multiple instructions modify "
282 << PrintReg(CopyToExec, TRI) << '\n');
283 SaveExecInst = nullptr;
284 break;
285 }
286
287 unsigned SaveExecOp = getSaveExecOp(J->getOpcode());
288 if (SaveExecOp == AMDGPU::INSTRUCTION_LIST_END)
289 break;
290
291 if (J->readsRegister(CopyFromExec, TRI)) {
292 SaveExecInst = &*J;
293 DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
Nicolai Haehnle87bc4c22016-10-07 08:40:14 +0000294 continue;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000295 } else {
296 DEBUG(dbgs() << "Instruction does not read exec copy: " << *J << '\n');
297 break;
298 }
299 }
300
Nicolai Haehnle87bc4c22016-10-07 08:40:14 +0000301 if (SaveExecInst && J->readsRegister(CopyToExec, TRI)) {
302 assert(SaveExecInst != &*J);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000303 OtherUseInsts.push_back(&*J);
Nicolai Haehnle87bc4c22016-10-07 08:40:14 +0000304 }
Matt Arsenaulte6740752016-09-29 01:44:16 +0000305 }
306
307 if (!SaveExecInst)
308 continue;
309
310 DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
311
312 MachineOperand &Src0 = SaveExecInst->getOperand(1);
313 MachineOperand &Src1 = SaveExecInst->getOperand(2);
314
Matt Arsenaulte6740752016-09-29 01:44:16 +0000315 MachineOperand *OtherOp = nullptr;
316
317 if (Src0.isReg() && Src0.getReg() == CopyFromExec) {
Matt Arsenaulte6740752016-09-29 01:44:16 +0000318 OtherOp = &Src1;
319 } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) {
320 if (!SaveExecInst->isCommutable())
321 break;
322
Matt Arsenaulte6740752016-09-29 01:44:16 +0000323 OtherOp = &Src0;
324 } else
325 llvm_unreachable("unexpected");
326
327 CopyFromExecInst->eraseFromParent();
328
329 auto InsPt = SaveExecInst->getIterator();
330 const DebugLoc &DL = SaveExecInst->getDebugLoc();
331
332 BuildMI(MBB, InsPt, DL, TII->get(getSaveExecOp(SaveExecInst->getOpcode())),
333 CopyFromExec)
334 .addReg(OtherOp->getReg());
335 SaveExecInst->eraseFromParent();
336
337 CopyToExecInst->eraseFromParent();
338
339 for (MachineInstr *OtherInst : OtherUseInsts) {
340 OtherInst->substituteRegister(CopyToExec, AMDGPU::EXEC,
341 AMDGPU::NoSubRegister, *TRI);
342 }
343 }
344
345 return true;
346
347}