blob: 933a16646746d6397be2731ddbc3ddbc8f0c3543 [file] [log] [blame]
Matt Arsenault41033282014-10-10 22:01:59 +00001//===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
17//
18// Future improvements:
19//
20// - This currently relies on the scheduler to place loads and stores next to
21// each other, and then only merges adjacent pairs of instructions. It would
22// be good to be more flexible with interleaved instructions, and possibly run
23// before scheduling. It currently missing stores of constants because loading
24// the constant into the data register is placed between the stores, although
25// this is arguably a scheduling problem.
26//
27// - Live interval recomputing seems inefficient. This currently only matches
28// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000029// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000030//
31// - With a list of instructions to process, we can also merge more. If a
32// cluster of loads have offsets that are too large to fit in the 8-bit
33// offsets, but are close enough to fit in the 8 bits, we can add to the base
34// pointer and use the new reduced offsets.
35//
36//===----------------------------------------------------------------------===//
37
38#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000039#include "AMDGPUSubtarget.h"
Matt Arsenault41033282014-10-10 22:01:59 +000040#include "SIInstrInfo.h"
41#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000042#include "Utils/AMDGPUBaseInfo.h"
43#include "llvm/ADT/ArrayRef.h"
44#include "llvm/ADT/SmallVector.h"
45#include "llvm/ADT/StringRef.h"
46#include "llvm/Analysis/AliasAnalysis.h"
47#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000048#include "llvm/CodeGen/MachineFunction.h"
49#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000050#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000051#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000052#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000053#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000054#include "llvm/IR/DebugLoc.h"
55#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000056#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000057#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000058#include "llvm/Support/raw_ostream.h"
Matt Arsenault41033282014-10-10 22:01:59 +000059#include "llvm/Target/TargetMachine.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000060#include <cassert>
61#include <iterator>
62#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000063
64using namespace llvm;
65
66#define DEBUG_TYPE "si-load-store-opt"
67
68namespace {
69
70class SILoadStoreOptimizer : public MachineFunctionPass {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000071
72 typedef struct {
73 MachineBasicBlock::iterator I;
74 MachineBasicBlock::iterator Paired;
75 unsigned EltSize;
76 unsigned Offset0;
77 unsigned Offset1;
78 unsigned BaseOff;
79 bool UseST64;
80 SmallVector<MachineInstr*, 8> InstsToMove;
81 } CombineInfo;
82
Matt Arsenault41033282014-10-10 22:01:59 +000083private:
Eugene Zelenko66203762017-01-21 00:53:49 +000084 const SIInstrInfo *TII = nullptr;
85 const SIRegisterInfo *TRI = nullptr;
86 MachineRegisterInfo *MRI = nullptr;
87 AliasAnalysis *AA = nullptr;
Matt Arsenault41033282014-10-10 22:01:59 +000088
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000089 static bool offsetsCanBeCombined(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000090
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000091 bool findMatchingDSInst(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000092
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000093 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000094
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000095 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000096
97public:
98 static char ID;
99
Eugene Zelenko66203762017-01-21 00:53:49 +0000100 SILoadStoreOptimizer() : MachineFunctionPass(ID) {}
Matt Arsenault41033282014-10-10 22:01:59 +0000101
Eric Christopher7792e322015-01-30 23:24:40 +0000102 SILoadStoreOptimizer(const TargetMachine &TM_) : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000103 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
104 }
105
106 bool optimizeBlock(MachineBasicBlock &MBB);
107
108 bool runOnMachineFunction(MachineFunction &MF) override;
109
Mehdi Amini117296c2016-10-01 02:56:57 +0000110 StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000111
112 void getAnalysisUsage(AnalysisUsage &AU) const override {
113 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000114 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000115
116 MachineFunctionPass::getAnalysisUsage(AU);
117 }
118};
119
Eugene Zelenko66203762017-01-21 00:53:49 +0000120} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000121
122INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
123 "SI Load / Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000124INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Matt Arsenault41033282014-10-10 22:01:59 +0000125INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
126 "SI Load / Store Optimizer", false, false)
127
128char SILoadStoreOptimizer::ID = 0;
129
130char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
131
132FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
133 return new SILoadStoreOptimizer(TM);
134}
135
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000136static void moveInstsAfter(MachineBasicBlock::iterator I,
137 ArrayRef<MachineInstr*> InstsToMove) {
138 MachineBasicBlock *MBB = I->getParent();
139 ++I;
140 for (MachineInstr *MI : InstsToMove) {
141 MI->removeFromParent();
142 MBB->insert(I, MI);
143 }
144}
145
146static void addDefsToList(const MachineInstr &MI,
147 SmallVectorImpl<const MachineOperand *> &Defs) {
148 for (const MachineOperand &Def : MI.defs()) {
149 Defs.push_back(&Def);
150 }
151}
152
Eugene Zelenko66203762017-01-21 00:53:49 +0000153static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
154 MachineBasicBlock::iterator B,
155 const SIInstrInfo *TII,
156 AliasAnalysis * AA) {
Alexander Timofeevf867a402016-11-03 14:37:13 +0000157 return (TII->areMemAccessesTriviallyDisjoint(*A, *B, AA) ||
158 // RAW or WAR - cannot reorder
159 // WAW - cannot reorder
160 // RAR - safe to reorder
161 !(A->mayStore() || B->mayStore()));
162}
163
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000164// Add MI and its defs to the lists if MI reads one of the defs that are
165// already in the list. Returns true in that case.
166static bool
167addToListsIfDependent(MachineInstr &MI,
168 SmallVectorImpl<const MachineOperand *> &Defs,
169 SmallVectorImpl<MachineInstr*> &Insts) {
170 for (const MachineOperand *Def : Defs) {
171 bool ReadDef = MI.readsVirtualRegister(Def->getReg());
172 // If ReadDef is true, then there is a use of Def between I
173 // and the instruction that I will potentially be merged with. We
174 // will need to move this instruction after the merged instructions.
175 if (ReadDef) {
176 Insts.push_back(&MI);
177 addDefsToList(MI, Defs);
178 return true;
179 }
180 }
181
182 return false;
183}
184
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000185static bool
186canMoveInstsAcrossMemOp(MachineInstr &MemOp,
187 ArrayRef<MachineInstr*> InstsToMove,
188 const SIInstrInfo *TII,
189 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000190 assert(MemOp.mayLoadOrStore());
191
192 for (MachineInstr *InstToMove : InstsToMove) {
193 if (!InstToMove->mayLoadOrStore())
194 continue;
Alexander Timofeevf867a402016-11-03 14:37:13 +0000195 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
196 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000197 }
198 return true;
199}
200
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000201bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000202 // XXX - Would the same offset be OK? Is there any reason this would happen or
203 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000204 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000205 return false;
206
207 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000208 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000209 return false;
210
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000211 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
212 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
213 CI.UseST64 = false;
214 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000215
216 // If the offset in elements doesn't fit in 8-bits, we might be able to use
217 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000218 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
219 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
220 CI.Offset0 = EltOffset0 / 64;
221 CI.Offset1 = EltOffset1 / 64;
222 CI.UseST64 = true;
223 return true;
224 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000225
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000226 // Check if the new offsets fit in the reduced 8-bit range.
227 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
228 CI.Offset0 = EltOffset0;
229 CI.Offset1 = EltOffset1;
230 return true;
231 }
232
233 // Try to shift base address to decrease offsets.
234 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
235 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
236
237 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
238 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
239 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
240 CI.UseST64 = true;
241 return true;
242 }
243
244 if (isUInt<8>(OffsetDiff)) {
245 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
246 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
247 return true;
248 }
249
250 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000251}
252
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000253bool SILoadStoreOptimizer::findMatchingDSInst(CombineInfo &CI) {
254 MachineBasicBlock::iterator E = CI.I->getParent()->end();
255 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault41033282014-10-10 22:01:59 +0000256 ++MBBI;
257
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000258 SmallVector<const MachineOperand *, 8> DefsToMove;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000259 addDefsToList(*CI.I, DefsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000260
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000261 for ( ; MBBI != E; ++MBBI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000262 if (MBBI->getOpcode() != CI.I->getOpcode()) {
Matt Arsenault41033282014-10-10 22:01:59 +0000263
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000264 // This is not a matching DS instruction, but we can keep looking as
265 // long as one of these conditions are met:
266 // 1. It is safe to move I down past MBBI.
267 // 2. It is safe to move MBBI down past the instruction that I will
268 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000269
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000270 if (MBBI->hasUnmodeledSideEffects())
271 // We can't re-order this instruction with respect to other memory
272 // opeations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000273 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000274
275 if (MBBI->mayLoadOrStore() &&
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000276 !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000277 // We fail condition #1, but we may still be able to satisfy condition
278 // #2. Add this instruction to the move list and then we will check
279 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000280 CI.InstsToMove.push_back(&*MBBI);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000281 addDefsToList(*MBBI, DefsToMove);
282 continue;
283 }
284
285 // When we match I with another DS instruction we will be moving I down
286 // to the location of the matched instruction any uses of I will need to
287 // be moved down as well.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000288 addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000289 continue;
290 }
291
292 // Don't merge volatiles.
293 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000294 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000295
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000296 // Handle a case like
297 // DS_WRITE_B32 addr, v, idx0
298 // w = DS_READ_B32 addr, idx0
299 // DS_WRITE_B32 addr, f(w), idx1
300 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
301 // merging of the two writes.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000302 if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000303 continue;
304
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000305 int AddrIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
306 AMDGPU::OpName::addr);
307 const MachineOperand &AddrReg0 = CI.I->getOperand(AddrIdx);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000308 const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
309
310 // Check same base pointer. Be careful of subregisters, which can occur with
311 // vectors of pointers.
312 if (AddrReg0.getReg() == AddrReg1.getReg() &&
313 AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000314 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000315 AMDGPU::OpName::offset);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000316 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm() & 0xffff;
317 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
318 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000319
320 // Check both offsets fit in the reduced range.
321 // We also need to go through the list of instructions that we plan to
322 // move and make sure they are all safe to move down past the merged
323 // instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000324 if (offsetsCanBeCombined(CI))
325 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
326 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000327 }
328
329 // We've found a load/store that we couldn't merge for some reason.
330 // We could potentially keep looking, but we'd need to make sure that
331 // it was safe to move I and also all the instruction in InstsToMove
332 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000333 // check if we can move I across MBBI and if we can move all I's users
334 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
335 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000336 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000337 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000338 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000339}
340
Matt Arsenault41033282014-10-10 22:01:59 +0000341MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000342 CombineInfo &CI) {
343 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000344
345 // Be careful, since the addresses could be subregisters themselves in weird
346 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000347 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000348
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000349 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
350 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000351
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000352 unsigned NewOffset0 = CI.Offset0;
353 unsigned NewOffset1 = CI.Offset1;
354 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32
355 : AMDGPU::DS_READ2_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000356
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000357 if (CI.UseST64)
358 Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32
359 : AMDGPU::DS_READ2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000360
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000361 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
362 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000363
364 if (NewOffset0 > NewOffset1) {
365 // Canonicalize the merged instruction so the smaller offset comes first.
366 std::swap(NewOffset0, NewOffset1);
367 std::swap(SubRegIdx0, SubRegIdx1);
368 }
369
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000370 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
371 (NewOffset0 != NewOffset1) &&
372 "Computed offset doesn't fit");
373
374 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000375
376 const TargetRegisterClass *SuperRC
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000377 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000378 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
379
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000380 DebugLoc DL = CI.I->getDebugLoc();
381
382 unsigned BaseReg = AddrReg->getReg();
383 unsigned BaseRegFlags = 0;
384 if (CI.BaseOff) {
385 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
386 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000387 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000388 .addImm(CI.BaseOff)
389 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000390 }
391
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000392 MachineInstrBuilder Read2 =
393 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
394 .addReg(BaseReg, BaseRegFlags) // addr
395 .addImm(NewOffset0) // offset0
396 .addImm(NewOffset1) // offset1
397 .addImm(0) // gds
398 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
399
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000400 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000401
Matt Arsenault84db5d92015-07-14 17:57:36 +0000402 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
403
404 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000405 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000406 .add(*Dest0) // Copy to same destination including flags and sub reg.
407 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000408 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000409 .add(*Dest1)
410 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000411
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000412 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000413
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000414 MachineBasicBlock::iterator Next = std::next(CI.I);
415 CI.I->eraseFromParent();
416 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000417
Matt Arsenault41033282014-10-10 22:01:59 +0000418 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000419 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000420}
421
422MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000423 CombineInfo &CI) {
424 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000425
426 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
427 // sure we preserve the subregister index and any register flags set on them.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000428 const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
429 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000430 const MachineOperand *Data1
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000431 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000432
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000433 unsigned NewOffset0 = CI.Offset0;
434 unsigned NewOffset1 = CI.Offset1;
435 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32
436 : AMDGPU::DS_WRITE2_B64;
Matt Arsenault41033282014-10-10 22:01:59 +0000437
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000438 if (CI.UseST64)
439 Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
440 : AMDGPU::DS_WRITE2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000441
Tom Stellarde175d8a2016-08-26 21:36:47 +0000442 if (NewOffset0 > NewOffset1) {
443 // Canonicalize the merged instruction so the smaller offset comes first.
444 std::swap(NewOffset0, NewOffset1);
445 std::swap(Data0, Data1);
446 }
447
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000448 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
449 (NewOffset0 != NewOffset1) &&
450 "Computed offset doesn't fit");
451
452 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000453 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000454
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000455 unsigned BaseReg = Addr->getReg();
456 unsigned BaseRegFlags = 0;
457 if (CI.BaseOff) {
458 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
459 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000460 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000461 .addImm(CI.BaseOff)
462 .addReg(Addr->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000463 }
Matt Arsenault41033282014-10-10 22:01:59 +0000464
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000465 MachineInstrBuilder Write2 =
466 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
467 .addReg(BaseReg, BaseRegFlags) // addr
468 .add(*Data0) // data0
469 .add(*Data1) // data1
470 .addImm(NewOffset0) // offset0
471 .addImm(NewOffset1) // offset1
472 .addImm(0) // gds
473 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
Matt Arsenault41033282014-10-10 22:01:59 +0000474
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000475 moveInstsAfter(Write2, CI.InstsToMove);
476
477 MachineBasicBlock::iterator Next = std::next(CI.I);
478 CI.I->eraseFromParent();
479 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000480
Matt Arsenault41033282014-10-10 22:01:59 +0000481 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000482 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000483}
484
485// Scan through looking for adjacent LDS operations with constant offsets from
486// the same base register. We rely on the scheduler to do the hard work of
487// clustering nearby loads, and assume these are all adjacent.
488bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000489 bool Modified = false;
490
491 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
492 MachineInstr &MI = *I;
493
494 // Don't combine if volatile.
495 if (MI.hasOrderedMemoryRef()) {
496 ++I;
497 continue;
498 }
499
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000500 CombineInfo CI;
501 CI.I = I;
Matt Arsenault41033282014-10-10 22:01:59 +0000502 unsigned Opc = MI.getOpcode();
503 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000504 CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
505 if (findMatchingDSInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000506 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000507 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000508 } else {
509 ++I;
510 }
511
512 continue;
513 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000514 CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
515 if (findMatchingDSInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000516 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000517 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000518 } else {
519 ++I;
520 }
521
522 continue;
523 }
524
525 ++I;
526 }
527
528 return Modified;
529}
530
531bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000532 if (skipFunction(*MF.getFunction()))
533 return false;
534
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000535 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000536 if (!STM.loadStoreOptEnabled())
537 return false;
538
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000539 TII = STM.getInstrInfo();
540 TRI = &TII->getRegisterInfo();
541
Matt Arsenault41033282014-10-10 22:01:59 +0000542 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000543 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +0000544
545 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
546
Matt Arsenault41033282014-10-10 22:01:59 +0000547 bool Modified = false;
548
549 for (MachineBasicBlock &MBB : MF)
550 Modified |= optimizeBlock(MBB);
551
552 return Modified;
553}