blob: fe729cd048d025c8bd54b3897951fc872a5cf427 [file] [log] [blame]
Matt Arsenault41033282014-10-10 22:01:59 +00001//===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
17//
18// Future improvements:
19//
20// - This currently relies on the scheduler to place loads and stores next to
21// each other, and then only merges adjacent pairs of instructions. It would
22// be good to be more flexible with interleaved instructions, and possibly run
23// before scheduling. It currently missing stores of constants because loading
24// the constant into the data register is placed between the stores, although
25// this is arguably a scheduling problem.
26//
27// - Live interval recomputing seems inefficient. This currently only matches
28// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000029// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000030//
31// - With a list of instructions to process, we can also merge more. If a
32// cluster of loads have offsets that are too large to fit in the 8-bit
33// offsets, but are close enough to fit in the 8 bits, we can add to the base
34// pointer and use the new reduced offsets.
35//
36//===----------------------------------------------------------------------===//
37
38#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000039#include "AMDGPUSubtarget.h"
Matt Arsenault41033282014-10-10 22:01:59 +000040#include "SIInstrInfo.h"
41#include "SIRegisterInfo.h"
42#include "llvm/CodeGen/LiveIntervalAnalysis.h"
43#include "llvm/CodeGen/LiveVariables.h"
44#include "llvm/CodeGen/MachineFunction.h"
45#include "llvm/CodeGen/MachineFunctionPass.h"
46#include "llvm/CodeGen/MachineInstrBuilder.h"
47#include "llvm/CodeGen/MachineRegisterInfo.h"
48#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000049#include "llvm/Support/raw_ostream.h"
Matt Arsenault41033282014-10-10 22:01:59 +000050#include "llvm/Target/TargetMachine.h"
51
52using namespace llvm;
53
54#define DEBUG_TYPE "si-load-store-opt"
55
56namespace {
57
58class SILoadStoreOptimizer : public MachineFunctionPass {
59private:
Matt Arsenault41033282014-10-10 22:01:59 +000060 const SIInstrInfo *TII;
61 const SIRegisterInfo *TRI;
62 MachineRegisterInfo *MRI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +000063 AliasAnalysis *AA;
Matt Arsenault41033282014-10-10 22:01:59 +000064
Matt Arsenault41033282014-10-10 22:01:59 +000065 static bool offsetsCanBeCombined(unsigned Offset0,
66 unsigned Offset1,
67 unsigned EltSize);
68
Tom Stellardc2ff0eb2016-08-29 19:15:22 +000069 MachineBasicBlock::iterator findMatchingDSInst(
70 MachineBasicBlock::iterator I,
71 unsigned EltSize,
72 SmallVectorImpl<MachineInstr*> &InstsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +000073
Matt Arsenault41033282014-10-10 22:01:59 +000074 MachineBasicBlock::iterator mergeRead2Pair(
75 MachineBasicBlock::iterator I,
76 MachineBasicBlock::iterator Paired,
Tom Stellardc2ff0eb2016-08-29 19:15:22 +000077 unsigned EltSize,
78 ArrayRef<MachineInstr*> InstsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +000079
80 MachineBasicBlock::iterator mergeWrite2Pair(
81 MachineBasicBlock::iterator I,
82 MachineBasicBlock::iterator Paired,
Tom Stellardc2ff0eb2016-08-29 19:15:22 +000083 unsigned EltSize,
84 ArrayRef<MachineInstr*> InstsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +000085
86public:
87 static char ID;
88
Eric Christopher7792e322015-01-30 23:24:40 +000089 SILoadStoreOptimizer()
90 : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), MRI(nullptr),
Tom Stellardc2ff0eb2016-08-29 19:15:22 +000091 AA(nullptr) {}
Matt Arsenault41033282014-10-10 22:01:59 +000092
Eric Christopher7792e322015-01-30 23:24:40 +000093 SILoadStoreOptimizer(const TargetMachine &TM_) : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +000094 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
95 }
96
97 bool optimizeBlock(MachineBasicBlock &MBB);
98
99 bool runOnMachineFunction(MachineFunction &MF) override;
100
101 const char *getPassName() const override {
102 return "SI Load / Store Optimizer";
103 }
104
105 void getAnalysisUsage(AnalysisUsage &AU) const override {
106 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000107 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000108
109 MachineFunctionPass::getAnalysisUsage(AU);
110 }
111};
112
113} // End anonymous namespace.
114
115INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
116 "SI Load / Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000117INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Matt Arsenault41033282014-10-10 22:01:59 +0000118INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
119 "SI Load / Store Optimizer", false, false)
120
121char SILoadStoreOptimizer::ID = 0;
122
123char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
124
125FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
126 return new SILoadStoreOptimizer(TM);
127}
128
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000129static void moveInstsAfter(MachineBasicBlock::iterator I,
130 ArrayRef<MachineInstr*> InstsToMove) {
131 MachineBasicBlock *MBB = I->getParent();
132 ++I;
133 for (MachineInstr *MI : InstsToMove) {
134 MI->removeFromParent();
135 MBB->insert(I, MI);
136 }
137}
138
139static void addDefsToList(const MachineInstr &MI,
140 SmallVectorImpl<const MachineOperand *> &Defs) {
141 for (const MachineOperand &Def : MI.defs()) {
142 Defs.push_back(&Def);
143 }
144}
145
146static bool
147canMoveInstsAcrossMemOp(MachineInstr &MemOp,
148 ArrayRef<MachineInstr*> InstsToMove,
149 const SIInstrInfo *TII,
150 AliasAnalysis *AA) {
151
152 assert(MemOp.mayLoadOrStore());
153
154 for (MachineInstr *InstToMove : InstsToMove) {
155 if (!InstToMove->mayLoadOrStore())
156 continue;
157 if (!TII->areMemAccessesTriviallyDisjoint(MemOp, *InstToMove, AA))
158 return false;
159 }
160 return true;
161}
162
Matt Arsenault41033282014-10-10 22:01:59 +0000163bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0,
164 unsigned Offset1,
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000165 unsigned Size) {
Matt Arsenault41033282014-10-10 22:01:59 +0000166 // XXX - Would the same offset be OK? Is there any reason this would happen or
167 // be useful?
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000168 if (Offset0 == Offset1)
169 return false;
170
171 // This won't be valid if the offset isn't aligned.
172 if ((Offset0 % Size != 0) || (Offset1 % Size != 0))
173 return false;
174
175 unsigned EltOffset0 = Offset0 / Size;
176 unsigned EltOffset1 = Offset1 / Size;
177
178 // Check if the new offsets fit in the reduced 8-bit range.
179 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1))
180 return true;
181
182 // If the offset in elements doesn't fit in 8-bits, we might be able to use
183 // the stride 64 versions.
184 if ((EltOffset0 % 64 != 0) || (EltOffset1 % 64) != 0)
185 return false;
186
187 return isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64);
Matt Arsenault41033282014-10-10 22:01:59 +0000188}
189
190MachineBasicBlock::iterator
191SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000192 unsigned EltSize,
193 SmallVectorImpl<MachineInstr*> &InstsToMove) {
Matt Arsenault41033282014-10-10 22:01:59 +0000194 MachineBasicBlock::iterator E = I->getParent()->end();
195 MachineBasicBlock::iterator MBBI = I;
196 ++MBBI;
197
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000198 SmallVector<const MachineOperand *, 8> DefsToMove;
199 addDefsToList(*I, DefsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000200
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000201 for ( ; MBBI != E; ++MBBI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000202
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000203 if (MBBI->getOpcode() != I->getOpcode()) {
Matt Arsenault41033282014-10-10 22:01:59 +0000204
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000205 // This is not a matching DS instruction, but we can keep looking as
206 // long as one of these conditions are met:
207 // 1. It is safe to move I down past MBBI.
208 // 2. It is safe to move MBBI down past the instruction that I will
209 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000210
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000211 if (MBBI->hasUnmodeledSideEffects())
212 // We can't re-order this instruction with respect to other memory
213 // opeations, so we fail both conditions mentioned above.
214 return E;
215
216 if (MBBI->mayLoadOrStore() &&
217 !TII->areMemAccessesTriviallyDisjoint(*I, *MBBI, AA)) {
218 // We fail condition #1, but we may still be able to satisfy condition
219 // #2. Add this instruction to the move list and then we will check
220 // if condition #2 holds once we have selected the matching instruction.
221 InstsToMove.push_back(&*MBBI);
222 addDefsToList(*MBBI, DefsToMove);
223 continue;
224 }
225
226 // When we match I with another DS instruction we will be moving I down
227 // to the location of the matched instruction any uses of I will need to
228 // be moved down as well.
229 for (const MachineOperand *Def : DefsToMove) {
230 bool ReadDef = MBBI->readsVirtualRegister(Def->getReg());
231 // If ReadDef is true, then there is a use of Def between I
232 // and the instruction that I will potentially be merged with. We
233 // will need to move this instruction after the merged instructions.
234 if (ReadDef) {
235 InstsToMove.push_back(&*MBBI);
236 addDefsToList(*MBBI, DefsToMove);
237 break;
238 }
239 }
240 continue;
241 }
242
243 // Don't merge volatiles.
244 if (MBBI->hasOrderedMemoryRef())
245 return E;
246
247 int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr);
248 const MachineOperand &AddrReg0 = I->getOperand(AddrIdx);
249 const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
250
251 // Check same base pointer. Be careful of subregisters, which can occur with
252 // vectors of pointers.
253 if (AddrReg0.getReg() == AddrReg1.getReg() &&
254 AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
255 int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
256 AMDGPU::OpName::offset);
257 unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
258 unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
259
260 // Check both offsets fit in the reduced range.
261 // We also need to go through the list of instructions that we plan to
262 // move and make sure they are all safe to move down past the merged
263 // instruction.
264 if (offsetsCanBeCombined(Offset0, Offset1, EltSize) &&
265 canMoveInstsAcrossMemOp(*MBBI, InstsToMove, TII, AA))
266 return MBBI;
267 }
268
269 // We've found a load/store that we couldn't merge for some reason.
270 // We could potentially keep looking, but we'd need to make sure that
271 // it was safe to move I and also all the instruction in InstsToMove
272 // down past this instruction.
273 // FIXME: This is too conservative.
274 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000275 }
Matt Arsenault41033282014-10-10 22:01:59 +0000276 return E;
277}
278
Matt Arsenault41033282014-10-10 22:01:59 +0000279MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
280 MachineBasicBlock::iterator I,
281 MachineBasicBlock::iterator Paired,
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000282 unsigned EltSize,
283 ArrayRef<MachineInstr*> InstsToMove) {
Matt Arsenault41033282014-10-10 22:01:59 +0000284 MachineBasicBlock *MBB = I->getParent();
285
286 // Be careful, since the addresses could be subregisters themselves in weird
287 // cases, like vectors of pointers.
288 const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
289
Matt Arsenault84db5d92015-07-14 17:57:36 +0000290 const MachineOperand *Dest0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst);
291 const MachineOperand *Dest1 = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000292
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000293 unsigned Offset0
Matt Arsenault84db5d92015-07-14 17:57:36 +0000294 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
Matt Arsenault41033282014-10-10 22:01:59 +0000295 unsigned Offset1
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000296 = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
297
298 unsigned NewOffset0 = Offset0 / EltSize;
299 unsigned NewOffset1 = Offset1 / EltSize;
300 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
301
302 // Prefer the st64 form if we can use it, even if we can fit the offset in the
303 // non st64 version. I'm not sure if there's any real reason to do this.
304 bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
305 if (UseST64) {
306 NewOffset0 /= 64;
307 NewOffset1 /= 64;
308 Opc = (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
309 }
310
Tom Stellarde175d8a2016-08-26 21:36:47 +0000311 unsigned SubRegIdx0 = (EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
312 unsigned SubRegIdx1 = (EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
313
314 if (NewOffset0 > NewOffset1) {
315 // Canonicalize the merged instruction so the smaller offset comes first.
316 std::swap(NewOffset0, NewOffset1);
317 std::swap(SubRegIdx0, SubRegIdx1);
318 }
319
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000320 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
321 (NewOffset0 != NewOffset1) &&
322 "Computed offset doesn't fit");
323
324 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000325
326 const TargetRegisterClass *SuperRC
327 = (EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
328 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
329
330 DebugLoc DL = I->getDebugLoc();
331 MachineInstrBuilder Read2
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000332 = BuildMI(*MBB, Paired, DL, Read2Desc, DestReg)
Matt Arsenault41033282014-10-10 22:01:59 +0000333 .addOperand(*AddrReg) // addr
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000334 .addImm(NewOffset0) // offset0
335 .addImm(NewOffset1) // offset1
Tom Stellard065e3d42015-03-09 18:49:54 +0000336 .addImm(0) // gds
Matt Arsenault41033282014-10-10 22:01:59 +0000337 .addMemOperand(*I->memoperands_begin())
338 .addMemOperand(*Paired->memoperands_begin());
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000339 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000340
Matt Arsenault84db5d92015-07-14 17:57:36 +0000341 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
342
343 // Copy to the old destination registers.
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000344 BuildMI(*MBB, Paired, DL, CopyDesc)
Matt Arsenault84db5d92015-07-14 17:57:36 +0000345 .addOperand(*Dest0) // Copy to same destination including flags and sub reg.
346 .addReg(DestReg, 0, SubRegIdx0);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000347 MachineInstr *Copy1 = BuildMI(*MBB, Paired, DL, CopyDesc)
Matt Arsenault84db5d92015-07-14 17:57:36 +0000348 .addOperand(*Dest1)
349 .addReg(DestReg, RegState::Kill, SubRegIdx1);
350
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000351 moveInstsAfter(Copy1, InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000352
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000353 MachineBasicBlock::iterator Next = std::next(I);
Matt Arsenault41033282014-10-10 22:01:59 +0000354 I->eraseFromParent();
355 Paired->eraseFromParent();
356
Matt Arsenault41033282014-10-10 22:01:59 +0000357 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000358 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000359}
360
361MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
362 MachineBasicBlock::iterator I,
363 MachineBasicBlock::iterator Paired,
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000364 unsigned EltSize,
365 ArrayRef<MachineInstr*> InstsToMove) {
Matt Arsenault41033282014-10-10 22:01:59 +0000366 MachineBasicBlock *MBB = I->getParent();
367
368 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
369 // sure we preserve the subregister index and any register flags set on them.
370 const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
371 const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0);
372 const MachineOperand *Data1
373 = TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
374
Matt Arsenault41033282014-10-10 22:01:59 +0000375
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000376 unsigned Offset0
377 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
378 unsigned Offset1
379 = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
380
381 unsigned NewOffset0 = Offset0 / EltSize;
382 unsigned NewOffset1 = Offset1 / EltSize;
383 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
384
385 // Prefer the st64 form if we can use it, even if we can fit the offset in the
386 // non st64 version. I'm not sure if there's any real reason to do this.
387 bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
388 if (UseST64) {
389 NewOffset0 /= 64;
390 NewOffset1 /= 64;
391 Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
392 }
393
Tom Stellarde175d8a2016-08-26 21:36:47 +0000394 if (NewOffset0 > NewOffset1) {
395 // Canonicalize the merged instruction so the smaller offset comes first.
396 std::swap(NewOffset0, NewOffset1);
397 std::swap(Data0, Data1);
398 }
399
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000400 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
401 (NewOffset0 != NewOffset1) &&
402 "Computed offset doesn't fit");
403
404 const MCInstrDesc &Write2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000405 DebugLoc DL = I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000406
Matt Arsenault41033282014-10-10 22:01:59 +0000407 MachineInstrBuilder Write2
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000408 = BuildMI(*MBB, Paired, DL, Write2Desc)
Matt Arsenault41033282014-10-10 22:01:59 +0000409 .addOperand(*Addr) // addr
410 .addOperand(*Data0) // data0
411 .addOperand(*Data1) // data1
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000412 .addImm(NewOffset0) // offset0
413 .addImm(NewOffset1) // offset1
Tom Stellard065e3d42015-03-09 18:49:54 +0000414 .addImm(0) // gds
Matt Arsenault41033282014-10-10 22:01:59 +0000415 .addMemOperand(*I->memoperands_begin())
416 .addMemOperand(*Paired->memoperands_begin());
417
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000418 moveInstsAfter(Write2, InstsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000419
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000420 MachineBasicBlock::iterator Next = std::next(I);
Matt Arsenault41033282014-10-10 22:01:59 +0000421 I->eraseFromParent();
422 Paired->eraseFromParent();
423
Matt Arsenault41033282014-10-10 22:01:59 +0000424 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000425 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000426}
427
428// Scan through looking for adjacent LDS operations with constant offsets from
429// the same base register. We rely on the scheduler to do the hard work of
430// clustering nearby loads, and assume these are all adjacent.
431bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000432 bool Modified = false;
433
434 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
435 MachineInstr &MI = *I;
436
437 // Don't combine if volatile.
438 if (MI.hasOrderedMemoryRef()) {
439 ++I;
440 continue;
441 }
442
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000443 SmallVector<MachineInstr*, 8> InstsToMove;
Matt Arsenault41033282014-10-10 22:01:59 +0000444 unsigned Opc = MI.getOpcode();
445 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
446 unsigned Size = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000447 MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size,
448 InstsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000449 if (Match != E) {
450 Modified = true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000451 I = mergeRead2Pair(I, Match, Size, InstsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000452 } else {
453 ++I;
454 }
455
456 continue;
457 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
458 unsigned Size = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000459 MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size,
460 InstsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000461 if (Match != E) {
462 Modified = true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000463 I = mergeWrite2Pair(I, Match, Size, InstsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000464 } else {
465 ++I;
466 }
467
468 continue;
469 }
470
471 ++I;
472 }
473
474 return Modified;
475}
476
477bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000478 if (skipFunction(*MF.getFunction()))
479 return false;
480
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000481 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000482 if (!STM.loadStoreOptEnabled())
483 return false;
484
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000485 TII = STM.getInstrInfo();
486 TRI = &TII->getRegisterInfo();
487
Matt Arsenault41033282014-10-10 22:01:59 +0000488 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000489 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +0000490
491 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
492
Matt Arsenault41033282014-10-10 22:01:59 +0000493 bool Modified = false;
494
495 for (MachineBasicBlock &MBB : MF)
496 Modified |= optimizeBlock(MBB);
497
498 return Modified;
499}