blob: 4140196e75224f97b9ddbdbd1d9794d501191898 [file] [log] [blame]
Matt Arsenault41033282014-10-10 22:01:59 +00001//===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
17//
18// Future improvements:
19//
20// - This currently relies on the scheduler to place loads and stores next to
21// each other, and then only merges adjacent pairs of instructions. It would
22// be good to be more flexible with interleaved instructions, and possibly run
23// before scheduling. It currently missing stores of constants because loading
24// the constant into the data register is placed between the stores, although
25// this is arguably a scheduling problem.
26//
27// - Live interval recomputing seems inefficient. This currently only matches
28// one pair, and recomputes live intervals and moves on to the next pair. It
29// would be better to compute a list of all merges that need to occur
30//
31// - With a list of instructions to process, we can also merge more. If a
32// cluster of loads have offsets that are too large to fit in the 8-bit
33// offsets, but are close enough to fit in the 8 bits, we can add to the base
34// pointer and use the new reduced offsets.
35//
36//===----------------------------------------------------------------------===//
37
38#include "AMDGPU.h"
39#include "SIInstrInfo.h"
40#include "SIRegisterInfo.h"
41#include "llvm/CodeGen/LiveIntervalAnalysis.h"
42#include "llvm/CodeGen/LiveVariables.h"
43#include "llvm/CodeGen/MachineFunction.h"
44#include "llvm/CodeGen/MachineFunctionPass.h"
45#include "llvm/CodeGen/MachineInstrBuilder.h"
46#include "llvm/CodeGen/MachineRegisterInfo.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Target/TargetMachine.h"
49
50using namespace llvm;
51
52#define DEBUG_TYPE "si-load-store-opt"
53
54namespace {
55
56class SILoadStoreOptimizer : public MachineFunctionPass {
57private:
58 const TargetMachine *TM;
59 const SIInstrInfo *TII;
60 const SIRegisterInfo *TRI;
61 MachineRegisterInfo *MRI;
62 LiveIntervals *LIS;
63
64
65 static bool offsetsCanBeCombined(unsigned Offset0,
66 unsigned Offset1,
67 unsigned EltSize);
68
69 MachineBasicBlock::iterator findMatchingDSInst(MachineBasicBlock::iterator I,
70 unsigned EltSize);
71
72 void updateRegDefsUses(unsigned SrcReg,
73 unsigned DstReg,
74 unsigned SubIdx);
75
76 MachineBasicBlock::iterator mergeRead2Pair(
77 MachineBasicBlock::iterator I,
78 MachineBasicBlock::iterator Paired,
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +000079 unsigned EltSize);
Matt Arsenault41033282014-10-10 22:01:59 +000080
81 MachineBasicBlock::iterator mergeWrite2Pair(
82 MachineBasicBlock::iterator I,
83 MachineBasicBlock::iterator Paired,
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +000084 unsigned EltSize);
Matt Arsenault41033282014-10-10 22:01:59 +000085
86public:
87 static char ID;
88
89 SILoadStoreOptimizer() :
90 MachineFunctionPass(ID),
91 TM(nullptr),
92 TII(nullptr),
93 TRI(nullptr),
94 MRI(nullptr),
95 LIS(nullptr) {
96
97 }
98
99 SILoadStoreOptimizer(const TargetMachine &TM_) :
100 MachineFunctionPass(ID),
101 TM(&TM_),
102 TII(static_cast<const SIInstrInfo*>(TM->getSubtargetImpl()->getInstrInfo())) {
103 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
104 }
105
106 bool optimizeBlock(MachineBasicBlock &MBB);
107
108 bool runOnMachineFunction(MachineFunction &MF) override;
109
110 const char *getPassName() const override {
111 return "SI Load / Store Optimizer";
112 }
113
114 void getAnalysisUsage(AnalysisUsage &AU) const override {
115 AU.setPreservesCFG();
116 AU.addPreserved<SlotIndexes>();
117 AU.addPreserved<LiveIntervals>();
118 AU.addPreserved<LiveVariables>();
119 AU.addRequired<LiveIntervals>();
120
121 MachineFunctionPass::getAnalysisUsage(AU);
122 }
123};
124
125} // End anonymous namespace.
126
127INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
128 "SI Load / Store Optimizer", false, false)
129INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
130INITIALIZE_PASS_DEPENDENCY(LiveVariables)
131INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
132INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
133 "SI Load / Store Optimizer", false, false)
134
135char SILoadStoreOptimizer::ID = 0;
136
137char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
138
139FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
140 return new SILoadStoreOptimizer(TM);
141}
142
143bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0,
144 unsigned Offset1,
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000145 unsigned Size) {
Matt Arsenault41033282014-10-10 22:01:59 +0000146 // XXX - Would the same offset be OK? Is there any reason this would happen or
147 // be useful?
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000148 if (Offset0 == Offset1)
149 return false;
150
151 // This won't be valid if the offset isn't aligned.
152 if ((Offset0 % Size != 0) || (Offset1 % Size != 0))
153 return false;
154
155 unsigned EltOffset0 = Offset0 / Size;
156 unsigned EltOffset1 = Offset1 / Size;
157
158 // Check if the new offsets fit in the reduced 8-bit range.
159 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1))
160 return true;
161
162 // If the offset in elements doesn't fit in 8-bits, we might be able to use
163 // the stride 64 versions.
164 if ((EltOffset0 % 64 != 0) || (EltOffset1 % 64) != 0)
165 return false;
166
167 return isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64);
Matt Arsenault41033282014-10-10 22:01:59 +0000168}
169
170MachineBasicBlock::iterator
171SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
172 unsigned EltSize){
173 MachineBasicBlock::iterator E = I->getParent()->end();
174 MachineBasicBlock::iterator MBBI = I;
175 ++MBBI;
176
177 if (MBBI->getOpcode() != I->getOpcode())
178 return E;
179
180 // Don't merge volatiles.
181 if (MBBI->hasOrderedMemoryRef())
182 return E;
183
184 int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr);
185 const MachineOperand &AddrReg0 = I->getOperand(AddrIdx);
186 const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
187
188 // Check same base pointer. Be careful of subregisters, which can occur with
189 // vectors of pointers.
190 if (AddrReg0.getReg() == AddrReg1.getReg() &&
191 AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
192 int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
193 AMDGPU::OpName::offset);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000194 unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
195 unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
Matt Arsenault41033282014-10-10 22:01:59 +0000196
197 // Check both offsets fit in the reduced range.
198 if (offsetsCanBeCombined(Offset0, Offset1, EltSize))
199 return MBBI;
200 }
201
202 return E;
203}
204
205void SILoadStoreOptimizer::updateRegDefsUses(unsigned SrcReg,
206 unsigned DstReg,
207 unsigned SubIdx) {
208 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg),
209 E = MRI->reg_end(); I != E; ) {
210 MachineOperand &O = *I;
211 ++I;
212 O.substVirtReg(DstReg, SubIdx, *TRI);
213 }
214}
215
216MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
217 MachineBasicBlock::iterator I,
218 MachineBasicBlock::iterator Paired,
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000219 unsigned EltSize) {
Matt Arsenault41033282014-10-10 22:01:59 +0000220 MachineBasicBlock *MBB = I->getParent();
221
222 // Be careful, since the addresses could be subregisters themselves in weird
223 // cases, like vectors of pointers.
224 const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
225
226 unsigned DestReg0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst)->getReg();
227 unsigned DestReg1
228 = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst)->getReg();
229
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000230 unsigned Offset0
231 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
Matt Arsenault41033282014-10-10 22:01:59 +0000232 unsigned Offset1
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000233 = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
234
235 unsigned NewOffset0 = Offset0 / EltSize;
236 unsigned NewOffset1 = Offset1 / EltSize;
237 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
238
239 // Prefer the st64 form if we can use it, even if we can fit the offset in the
240 // non st64 version. I'm not sure if there's any real reason to do this.
241 bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
242 if (UseST64) {
243 NewOffset0 /= 64;
244 NewOffset1 /= 64;
245 Opc = (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
246 }
247
248 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
249 (NewOffset0 != NewOffset1) &&
250 "Computed offset doesn't fit");
251
252 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000253
254 const TargetRegisterClass *SuperRC
255 = (EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
256 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
257
258 DebugLoc DL = I->getDebugLoc();
259 MachineInstrBuilder Read2
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000260 = BuildMI(*MBB, I, DL, Read2Desc, DestReg)
Matt Arsenault41033282014-10-10 22:01:59 +0000261 .addImm(0) // gds
262 .addOperand(*AddrReg) // addr
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000263 .addImm(NewOffset0) // offset0
264 .addImm(NewOffset1) // offset1
Matt Arsenault41033282014-10-10 22:01:59 +0000265 .addMemOperand(*I->memoperands_begin())
266 .addMemOperand(*Paired->memoperands_begin());
267
268 LIS->InsertMachineInstrInMaps(Read2);
269
270 unsigned SubRegIdx0 = (EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
271 unsigned SubRegIdx1 = (EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
272 updateRegDefsUses(DestReg0, DestReg, SubRegIdx0);
273 updateRegDefsUses(DestReg1, DestReg, SubRegIdx1);
274
275 LIS->RemoveMachineInstrFromMaps(I);
276 LIS->RemoveMachineInstrFromMaps(Paired);
277 I->eraseFromParent();
278 Paired->eraseFromParent();
279
280 LiveInterval &AddrRegLI = LIS->getInterval(AddrReg->getReg());
281 LIS->shrinkToUses(&AddrRegLI);
282
283 LIS->getInterval(DestReg); // Create new LI
284
285 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Reid Klecknerda00cf52014-10-31 23:19:46 +0000286 return Read2.getInstr();
Matt Arsenault41033282014-10-10 22:01:59 +0000287}
288
289MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
290 MachineBasicBlock::iterator I,
291 MachineBasicBlock::iterator Paired,
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000292 unsigned EltSize) {
Matt Arsenault41033282014-10-10 22:01:59 +0000293 MachineBasicBlock *MBB = I->getParent();
294
295 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
296 // sure we preserve the subregister index and any register flags set on them.
297 const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
298 const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0);
299 const MachineOperand *Data1
300 = TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
301
Matt Arsenault41033282014-10-10 22:01:59 +0000302
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000303 unsigned Offset0
304 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
305 unsigned Offset1
306 = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
307
308 unsigned NewOffset0 = Offset0 / EltSize;
309 unsigned NewOffset1 = Offset1 / EltSize;
310 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
311
312 // Prefer the st64 form if we can use it, even if we can fit the offset in the
313 // non st64 version. I'm not sure if there's any real reason to do this.
314 bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
315 if (UseST64) {
316 NewOffset0 /= 64;
317 NewOffset1 /= 64;
318 Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
319 }
320
321 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
322 (NewOffset0 != NewOffset1) &&
323 "Computed offset doesn't fit");
324
325 const MCInstrDesc &Write2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000326 DebugLoc DL = I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000327
Matt Arsenault41033282014-10-10 22:01:59 +0000328 MachineInstrBuilder Write2
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000329 = BuildMI(*MBB, I, DL, Write2Desc)
Matt Arsenault41033282014-10-10 22:01:59 +0000330 .addImm(0) // gds
331 .addOperand(*Addr) // addr
332 .addOperand(*Data0) // data0
333 .addOperand(*Data1) // data1
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000334 .addImm(NewOffset0) // offset0
335 .addImm(NewOffset1) // offset1
Matt Arsenault41033282014-10-10 22:01:59 +0000336 .addMemOperand(*I->memoperands_begin())
337 .addMemOperand(*Paired->memoperands_begin());
338
339 // XXX - How do we express subregisters here?
340 unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg() };
341
342 LIS->RemoveMachineInstrFromMaps(I);
343 LIS->RemoveMachineInstrFromMaps(Paired);
344 I->eraseFromParent();
345 Paired->eraseFromParent();
346
347 LIS->repairIntervalsInRange(MBB, Write2, Write2, OrigRegs);
348
349 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Reid Klecknerda00cf52014-10-31 23:19:46 +0000350 return Write2.getInstr();
Matt Arsenault41033282014-10-10 22:01:59 +0000351}
352
353// Scan through looking for adjacent LDS operations with constant offsets from
354// the same base register. We rely on the scheduler to do the hard work of
355// clustering nearby loads, and assume these are all adjacent.
356bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000357 bool Modified = false;
358
359 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
360 MachineInstr &MI = *I;
361
362 // Don't combine if volatile.
363 if (MI.hasOrderedMemoryRef()) {
364 ++I;
365 continue;
366 }
367
368 unsigned Opc = MI.getOpcode();
369 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
370 unsigned Size = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
371 MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
372 if (Match != E) {
373 Modified = true;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000374 I = mergeRead2Pair(I, Match, Size);
Matt Arsenault41033282014-10-10 22:01:59 +0000375 } else {
376 ++I;
377 }
378
379 continue;
380 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
381 unsigned Size = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
382 MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
383 if (Match != E) {
384 Modified = true;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000385 I = mergeWrite2Pair(I, Match, Size);
Matt Arsenault41033282014-10-10 22:01:59 +0000386 } else {
387 ++I;
388 }
389
390 continue;
391 }
392
393 ++I;
394 }
395
396 return Modified;
397}
398
399bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
400 const TargetSubtargetInfo *STM = MF.getTarget().getSubtargetImpl();
401 TRI = static_cast<const SIRegisterInfo*>(STM->getRegisterInfo());
402 TII = static_cast<const SIInstrInfo*>(STM->getInstrInfo());
403 MRI = &MF.getRegInfo();
404
405 LIS = &getAnalysis<LiveIntervals>();
406
407 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
408
409 assert(!MRI->isSSA());
410
411 bool Modified = false;
412
413 for (MachineBasicBlock &MBB : MF)
414 Modified |= optimizeBlock(MBB);
415
416 return Modified;
417}