blob: 026fd9743242c1a3530453096e5ee64d8b408d94 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
17//
18// Future improvements:
19//
20// - This currently relies on the scheduler to place loads and stores next to
21// each other, and then only merges adjacent pairs of instructions. It would
22// be good to be more flexible with interleaved instructions, and possibly run
23// before scheduling. It currently missing stores of constants because loading
24// the constant into the data register is placed between the stores, although
25// this is arguably a scheduling problem.
26//
27// - Live interval recomputing seems inefficient. This currently only matches
28// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000029// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000030//
31// - With a list of instructions to process, we can also merge more. If a
32// cluster of loads have offsets that are too large to fit in the 8-bit
33// offsets, but are close enough to fit in the 8 bits, we can add to the base
34// pointer and use the new reduced offsets.
35//
36//===----------------------------------------------------------------------===//
37
38#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000039#include "AMDGPUSubtarget.h"
Matt Arsenault41033282014-10-10 22:01:59 +000040#include "SIInstrInfo.h"
41#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000042#include "Utils/AMDGPUBaseInfo.h"
43#include "llvm/ADT/ArrayRef.h"
44#include "llvm/ADT/SmallVector.h"
45#include "llvm/ADT/StringRef.h"
46#include "llvm/Analysis/AliasAnalysis.h"
47#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000048#include "llvm/CodeGen/MachineFunction.h"
49#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000050#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000051#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000052#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000053#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000054#include "llvm/IR/DebugLoc.h"
55#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000056#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000057#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000058#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000059#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000060#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000061#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000062#include <iterator>
63#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000064
65using namespace llvm;
66
67#define DEBUG_TYPE "si-load-store-opt"
68
69namespace {
70
71class SILoadStoreOptimizer : public MachineFunctionPass {
NAKAMURA Takumiaba2b3d2017-10-10 08:30:53 +000072 struct CombineInfo {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000073 MachineBasicBlock::iterator I;
74 MachineBasicBlock::iterator Paired;
75 unsigned EltSize;
76 unsigned Offset0;
77 unsigned Offset1;
78 unsigned BaseOff;
79 bool UseST64;
80 SmallVector<MachineInstr*, 8> InstsToMove;
Eugene Zelenko59e12822017-08-08 00:47:13 +000081 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000082
Matt Arsenault41033282014-10-10 22:01:59 +000083private:
Eugene Zelenko66203762017-01-21 00:53:49 +000084 const SIInstrInfo *TII = nullptr;
85 const SIRegisterInfo *TRI = nullptr;
86 MachineRegisterInfo *MRI = nullptr;
87 AliasAnalysis *AA = nullptr;
Matt Arsenault41033282014-10-10 22:01:59 +000088
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000089 static bool offsetsCanBeCombined(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000090
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000091 bool findMatchingDSInst(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000092
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000093 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000094
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000095 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000096
97public:
98 static char ID;
99
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000100 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000101 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
102 }
103
104 bool optimizeBlock(MachineBasicBlock &MBB);
105
106 bool runOnMachineFunction(MachineFunction &MF) override;
107
Mehdi Amini117296c2016-10-01 02:56:57 +0000108 StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000109
110 void getAnalysisUsage(AnalysisUsage &AU) const override {
111 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000112 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000113
114 MachineFunctionPass::getAnalysisUsage(AU);
115 }
116};
117
Eugene Zelenko66203762017-01-21 00:53:49 +0000118} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000119
120INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
121 "SI Load / Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000122INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Matt Arsenault41033282014-10-10 22:01:59 +0000123INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
124 "SI Load / Store Optimizer", false, false)
125
126char SILoadStoreOptimizer::ID = 0;
127
128char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
129
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000130FunctionPass *llvm::createSILoadStoreOptimizerPass() {
131 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000132}
133
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000134static void moveInstsAfter(MachineBasicBlock::iterator I,
135 ArrayRef<MachineInstr*> InstsToMove) {
136 MachineBasicBlock *MBB = I->getParent();
137 ++I;
138 for (MachineInstr *MI : InstsToMove) {
139 MI->removeFromParent();
140 MBB->insert(I, MI);
141 }
142}
143
Matt Arsenault67e72de2017-08-31 01:53:09 +0000144static void addDefsToList(const MachineInstr &MI, DenseSet<unsigned> &Defs) {
145 // XXX: Should this be looking for implicit defs?
146 for (const MachineOperand &Def : MI.defs())
147 Defs.insert(Def.getReg());
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000148}
149
Eugene Zelenko66203762017-01-21 00:53:49 +0000150static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
151 MachineBasicBlock::iterator B,
152 const SIInstrInfo *TII,
153 AliasAnalysis * AA) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000154 // RAW or WAR - cannot reorder
155 // WAW - cannot reorder
156 // RAR - safe to reorder
157 return !(A->mayStore() || B->mayStore()) ||
158 TII->areMemAccessesTriviallyDisjoint(*A, *B, AA);
Alexander Timofeevf867a402016-11-03 14:37:13 +0000159}
160
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000161// Add MI and its defs to the lists if MI reads one of the defs that are
162// already in the list. Returns true in that case.
163static bool
164addToListsIfDependent(MachineInstr &MI,
Matt Arsenault67e72de2017-08-31 01:53:09 +0000165 DenseSet<unsigned> &Defs,
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000166 SmallVectorImpl<MachineInstr*> &Insts) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000167 for (MachineOperand &Use : MI.operands()) {
168 // If one of the defs is read, then there is a use of Def between I and the
169 // instruction that I will potentially be merged with. We will need to move
170 // this instruction after the merged instructions.
171
172 if (Use.isReg() && Use.readsReg() && Defs.count(Use.getReg())) {
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000173 Insts.push_back(&MI);
174 addDefsToList(MI, Defs);
175 return true;
176 }
177 }
178
179 return false;
180}
181
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000182static bool
183canMoveInstsAcrossMemOp(MachineInstr &MemOp,
184 ArrayRef<MachineInstr*> InstsToMove,
185 const SIInstrInfo *TII,
186 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000187 assert(MemOp.mayLoadOrStore());
188
189 for (MachineInstr *InstToMove : InstsToMove) {
190 if (!InstToMove->mayLoadOrStore())
191 continue;
Alexander Timofeevf867a402016-11-03 14:37:13 +0000192 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
193 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000194 }
195 return true;
196}
197
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000198bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000199 // XXX - Would the same offset be OK? Is there any reason this would happen or
200 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000201 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000202 return false;
203
204 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000205 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000206 return false;
207
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000208 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
209 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
210 CI.UseST64 = false;
211 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000212
213 // If the offset in elements doesn't fit in 8-bits, we might be able to use
214 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000215 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
216 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
217 CI.Offset0 = EltOffset0 / 64;
218 CI.Offset1 = EltOffset1 / 64;
219 CI.UseST64 = true;
220 return true;
221 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000222
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000223 // Check if the new offsets fit in the reduced 8-bit range.
224 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
225 CI.Offset0 = EltOffset0;
226 CI.Offset1 = EltOffset1;
227 return true;
228 }
229
230 // Try to shift base address to decrease offsets.
231 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
232 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
233
234 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
235 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
236 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
237 CI.UseST64 = true;
238 return true;
239 }
240
241 if (isUInt<8>(OffsetDiff)) {
242 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
243 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
244 return true;
245 }
246
247 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000248}
249
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000250bool SILoadStoreOptimizer::findMatchingDSInst(CombineInfo &CI) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000251 MachineBasicBlock *MBB = CI.I->getParent();
252 MachineBasicBlock::iterator E = MBB->end();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000253 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault3cb61632017-08-30 03:26:18 +0000254
255 int AddrIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
256 AMDGPU::OpName::addr);
257 const MachineOperand &AddrReg0 = CI.I->getOperand(AddrIdx);
258
259 // We only ever merge operations with the same base address register, so don't
260 // bother scanning forward if there are no other uses.
Matt Arsenault67e72de2017-08-31 01:53:09 +0000261 if (TargetRegisterInfo::isPhysicalRegister(AddrReg0.getReg()) ||
262 MRI->hasOneNonDBGUse(AddrReg0.getReg()))
Matt Arsenault3cb61632017-08-30 03:26:18 +0000263 return false;
264
Matt Arsenault41033282014-10-10 22:01:59 +0000265 ++MBBI;
266
Matt Arsenault67e72de2017-08-31 01:53:09 +0000267 DenseSet<unsigned> DefsToMove;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000268 addDefsToList(*CI.I, DefsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000269
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000270 for ( ; MBBI != E; ++MBBI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000271 if (MBBI->getOpcode() != CI.I->getOpcode()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000272 // This is not a matching DS instruction, but we can keep looking as
273 // long as one of these conditions are met:
274 // 1. It is safe to move I down past MBBI.
275 // 2. It is safe to move MBBI down past the instruction that I will
276 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000277
Matt Arsenault2d69c922017-08-29 21:25:51 +0000278 if (MBBI->hasUnmodeledSideEffects()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000279 // We can't re-order this instruction with respect to other memory
Matt Arsenault2d69c922017-08-29 21:25:51 +0000280 // operations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000281 return false;
Matt Arsenault2d69c922017-08-29 21:25:51 +0000282 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000283
284 if (MBBI->mayLoadOrStore() &&
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000285 !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000286 // We fail condition #1, but we may still be able to satisfy condition
287 // #2. Add this instruction to the move list and then we will check
288 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000289 CI.InstsToMove.push_back(&*MBBI);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000290 addDefsToList(*MBBI, DefsToMove);
291 continue;
292 }
293
294 // When we match I with another DS instruction we will be moving I down
295 // to the location of the matched instruction any uses of I will need to
296 // be moved down as well.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000297 addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000298 continue;
299 }
300
301 // Don't merge volatiles.
302 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000303 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000304
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000305 // Handle a case like
306 // DS_WRITE_B32 addr, v, idx0
307 // w = DS_READ_B32 addr, idx0
308 // DS_WRITE_B32 addr, f(w), idx1
309 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
310 // merging of the two writes.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000311 if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000312 continue;
313
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000314 const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
315
316 // Check same base pointer. Be careful of subregisters, which can occur with
317 // vectors of pointers.
318 if (AddrReg0.getReg() == AddrReg1.getReg() &&
319 AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000320 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000321 AMDGPU::OpName::offset);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000322 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm() & 0xffff;
323 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
324 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000325
326 // Check both offsets fit in the reduced range.
327 // We also need to go through the list of instructions that we plan to
328 // move and make sure they are all safe to move down past the merged
329 // instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000330 if (offsetsCanBeCombined(CI))
331 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
332 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000333 }
334
335 // We've found a load/store that we couldn't merge for some reason.
336 // We could potentially keep looking, but we'd need to make sure that
337 // it was safe to move I and also all the instruction in InstsToMove
338 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000339 // check if we can move I across MBBI and if we can move all I's users
340 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
341 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000342 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000343 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000344 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000345}
346
Matt Arsenault41033282014-10-10 22:01:59 +0000347MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000348 CombineInfo &CI) {
349 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000350
351 // Be careful, since the addresses could be subregisters themselves in weird
352 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000353 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000354
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000355 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
356 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000357
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000358 unsigned NewOffset0 = CI.Offset0;
359 unsigned NewOffset1 = CI.Offset1;
360 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32
361 : AMDGPU::DS_READ2_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000362
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000363 if (CI.UseST64)
364 Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32
365 : AMDGPU::DS_READ2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000366
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000367 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
368 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000369
370 if (NewOffset0 > NewOffset1) {
371 // Canonicalize the merged instruction so the smaller offset comes first.
372 std::swap(NewOffset0, NewOffset1);
373 std::swap(SubRegIdx0, SubRegIdx1);
374 }
375
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000376 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
377 (NewOffset0 != NewOffset1) &&
378 "Computed offset doesn't fit");
379
380 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000381
382 const TargetRegisterClass *SuperRC
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000383 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000384 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
385
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000386 DebugLoc DL = CI.I->getDebugLoc();
387
388 unsigned BaseReg = AddrReg->getReg();
389 unsigned BaseRegFlags = 0;
390 if (CI.BaseOff) {
391 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
392 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000393 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000394 .addImm(CI.BaseOff)
395 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000396 }
397
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000398 MachineInstrBuilder Read2 =
399 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
400 .addReg(BaseReg, BaseRegFlags) // addr
401 .addImm(NewOffset0) // offset0
402 .addImm(NewOffset1) // offset1
403 .addImm(0) // gds
404 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
405
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000406 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000407
Matt Arsenault84db5d92015-07-14 17:57:36 +0000408 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
409
410 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000411 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000412 .add(*Dest0) // Copy to same destination including flags and sub reg.
413 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000414 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000415 .add(*Dest1)
416 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000417
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000418 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000419
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000420 MachineBasicBlock::iterator Next = std::next(CI.I);
421 CI.I->eraseFromParent();
422 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000423
Matt Arsenault41033282014-10-10 22:01:59 +0000424 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000425 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000426}
427
428MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000429 CombineInfo &CI) {
430 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000431
432 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
433 // sure we preserve the subregister index and any register flags set on them.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000434 const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
435 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000436 const MachineOperand *Data1
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000437 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000438
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000439 unsigned NewOffset0 = CI.Offset0;
440 unsigned NewOffset1 = CI.Offset1;
441 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32
442 : AMDGPU::DS_WRITE2_B64;
Matt Arsenault41033282014-10-10 22:01:59 +0000443
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000444 if (CI.UseST64)
445 Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
446 : AMDGPU::DS_WRITE2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000447
Tom Stellarde175d8a2016-08-26 21:36:47 +0000448 if (NewOffset0 > NewOffset1) {
449 // Canonicalize the merged instruction so the smaller offset comes first.
450 std::swap(NewOffset0, NewOffset1);
451 std::swap(Data0, Data1);
452 }
453
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000454 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
455 (NewOffset0 != NewOffset1) &&
456 "Computed offset doesn't fit");
457
458 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000459 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000460
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000461 unsigned BaseReg = Addr->getReg();
462 unsigned BaseRegFlags = 0;
463 if (CI.BaseOff) {
464 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
465 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000466 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000467 .addImm(CI.BaseOff)
468 .addReg(Addr->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000469 }
Matt Arsenault41033282014-10-10 22:01:59 +0000470
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000471 MachineInstrBuilder Write2 =
472 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
473 .addReg(BaseReg, BaseRegFlags) // addr
474 .add(*Data0) // data0
475 .add(*Data1) // data1
476 .addImm(NewOffset0) // offset0
477 .addImm(NewOffset1) // offset1
478 .addImm(0) // gds
479 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
Matt Arsenault41033282014-10-10 22:01:59 +0000480
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000481 moveInstsAfter(Write2, CI.InstsToMove);
482
483 MachineBasicBlock::iterator Next = std::next(CI.I);
484 CI.I->eraseFromParent();
485 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000486
Matt Arsenault41033282014-10-10 22:01:59 +0000487 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000488 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000489}
490
491// Scan through looking for adjacent LDS operations with constant offsets from
492// the same base register. We rely on the scheduler to do the hard work of
493// clustering nearby loads, and assume these are all adjacent.
494bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000495 bool Modified = false;
496
497 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
498 MachineInstr &MI = *I;
499
500 // Don't combine if volatile.
501 if (MI.hasOrderedMemoryRef()) {
502 ++I;
503 continue;
504 }
505
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000506 CombineInfo CI;
507 CI.I = I;
Matt Arsenault41033282014-10-10 22:01:59 +0000508 unsigned Opc = MI.getOpcode();
509 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000510 CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
511 if (findMatchingDSInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000512 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000513 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000514 } else {
515 ++I;
516 }
517
518 continue;
519 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000520 CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
521 if (findMatchingDSInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000522 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000523 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000524 } else {
525 ++I;
526 }
527
528 continue;
529 }
530
531 ++I;
532 }
533
534 return Modified;
535}
536
537bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000538 if (skipFunction(*MF.getFunction()))
539 return false;
540
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000541 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000542 if (!STM.loadStoreOptEnabled())
543 return false;
544
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000545 TII = STM.getInstrInfo();
546 TRI = &TII->getRegisterInfo();
547
Matt Arsenault41033282014-10-10 22:01:59 +0000548 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000549 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +0000550
Matt Arsenault67e72de2017-08-31 01:53:09 +0000551 assert(MRI->isSSA() && "Must be run on SSA");
552
Matt Arsenault41033282014-10-10 22:01:59 +0000553 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
554
Matt Arsenault41033282014-10-10 22:01:59 +0000555 bool Modified = false;
556
557 for (MachineBasicBlock &MBB : MF)
558 Modified |= optimizeBlock(MBB);
559
560 return Modified;
561}