blob: 48bfc2dac2d5de0ec70bd5835716b0742b9e7f14 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +000017// The same is done for certain SMEM and VMEM opcodes, e.g.:
Marek Olsakb953cc32017-11-09 01:52:23 +000018// s_buffer_load_dword s4, s[0:3], 4
19// s_buffer_load_dword s5, s[0:3], 8
20// ==>
21// s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22//
Matt Arsenault41033282014-10-10 22:01:59 +000023//
24// Future improvements:
25//
26// - This currently relies on the scheduler to place loads and stores next to
27// each other, and then only merges adjacent pairs of instructions. It would
28// be good to be more flexible with interleaved instructions, and possibly run
29// before scheduling. It currently missing stores of constants because loading
30// the constant into the data register is placed between the stores, although
31// this is arguably a scheduling problem.
32//
33// - Live interval recomputing seems inefficient. This currently only matches
34// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000035// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000036//
37// - With a list of instructions to process, we can also merge more. If a
38// cluster of loads have offsets that are too large to fit in the 8-bit
39// offsets, but are close enough to fit in the 8 bits, we can add to the base
40// pointer and use the new reduced offsets.
41//
42//===----------------------------------------------------------------------===//
43
44#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000045#include "AMDGPUSubtarget.h"
Matt Arsenault41033282014-10-10 22:01:59 +000046#include "SIInstrInfo.h"
47#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000048#include "Utils/AMDGPUBaseInfo.h"
49#include "llvm/ADT/ArrayRef.h"
50#include "llvm/ADT/SmallVector.h"
51#include "llvm/ADT/StringRef.h"
52#include "llvm/Analysis/AliasAnalysis.h"
53#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000054#include "llvm/CodeGen/MachineFunction.h"
55#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000056#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000057#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000058#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000059#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000060#include "llvm/IR/DebugLoc.h"
61#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000062#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000063#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000064#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000065#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000066#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000067#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000068#include <iterator>
69#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000070
71using namespace llvm;
72
73#define DEBUG_TYPE "si-load-store-opt"
74
75namespace {
76
77class SILoadStoreOptimizer : public MachineFunctionPass {
Marek Olsak6a0548a2017-11-09 01:52:30 +000078 enum InstClassEnum {
79 DS_READ_WRITE,
80 S_BUFFER_LOAD_IMM,
81 BUFFER_LOAD_OFFEN,
Marek Olsak4c421a2d2017-11-09 01:52:36 +000082 BUFFER_LOAD_OFFSET,
Marek Olsak58410f32017-11-09 01:52:55 +000083 BUFFER_STORE_OFFEN,
84 BUFFER_STORE_OFFSET,
Marek Olsak6a0548a2017-11-09 01:52:30 +000085 };
86
NAKAMURA Takumiaba2b3d2017-10-10 08:30:53 +000087 struct CombineInfo {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000088 MachineBasicBlock::iterator I;
89 MachineBasicBlock::iterator Paired;
90 unsigned EltSize;
91 unsigned Offset0;
92 unsigned Offset1;
93 unsigned BaseOff;
Marek Olsak6a0548a2017-11-09 01:52:30 +000094 InstClassEnum InstClass;
Marek Olsakb953cc32017-11-09 01:52:23 +000095 bool GLC0;
96 bool GLC1;
Marek Olsak6a0548a2017-11-09 01:52:30 +000097 bool SLC0;
98 bool SLC1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000099 bool UseST64;
Marek Olsakb953cc32017-11-09 01:52:23 +0000100 bool IsX2;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000101 SmallVector<MachineInstr*, 8> InstsToMove;
Eugene Zelenko59e12822017-08-08 00:47:13 +0000102 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000103
Matt Arsenault41033282014-10-10 22:01:59 +0000104private:
Marek Olsakb953cc32017-11-09 01:52:23 +0000105 const SISubtarget *STM = nullptr;
Eugene Zelenko66203762017-01-21 00:53:49 +0000106 const SIInstrInfo *TII = nullptr;
107 const SIRegisterInfo *TRI = nullptr;
108 MachineRegisterInfo *MRI = nullptr;
109 AliasAnalysis *AA = nullptr;
Marek Olsakb953cc32017-11-09 01:52:23 +0000110 unsigned CreatedX2;
Matt Arsenault41033282014-10-10 22:01:59 +0000111
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000112 static bool offsetsCanBeCombined(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000113
Marek Olsakb953cc32017-11-09 01:52:23 +0000114 bool findMatchingInst(CombineInfo &CI);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000115 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000116 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000117 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000118 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
Marek Olsak58410f32017-11-09 01:52:55 +0000119 unsigned promoteBufferStoreOpcode(const MachineInstr &I, bool &IsX2,
120 bool &IsOffen) const;
121 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000122
123public:
124 static char ID;
125
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000126 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000127 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
128 }
129
130 bool optimizeBlock(MachineBasicBlock &MBB);
131
132 bool runOnMachineFunction(MachineFunction &MF) override;
133
Mehdi Amini117296c2016-10-01 02:56:57 +0000134 StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000135
136 void getAnalysisUsage(AnalysisUsage &AU) const override {
137 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000138 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000139
140 MachineFunctionPass::getAnalysisUsage(AU);
141 }
142};
143
Eugene Zelenko66203762017-01-21 00:53:49 +0000144} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000145
146INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
147 "SI Load / Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000148INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Matt Arsenault41033282014-10-10 22:01:59 +0000149INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
150 "SI Load / Store Optimizer", false, false)
151
152char SILoadStoreOptimizer::ID = 0;
153
154char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
155
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000156FunctionPass *llvm::createSILoadStoreOptimizerPass() {
157 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000158}
159
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000160static void moveInstsAfter(MachineBasicBlock::iterator I,
161 ArrayRef<MachineInstr*> InstsToMove) {
162 MachineBasicBlock *MBB = I->getParent();
163 ++I;
164 for (MachineInstr *MI : InstsToMove) {
165 MI->removeFromParent();
166 MBB->insert(I, MI);
167 }
168}
169
Matt Arsenault67e72de2017-08-31 01:53:09 +0000170static void addDefsToList(const MachineInstr &MI, DenseSet<unsigned> &Defs) {
171 // XXX: Should this be looking for implicit defs?
172 for (const MachineOperand &Def : MI.defs())
173 Defs.insert(Def.getReg());
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000174}
175
Eugene Zelenko66203762017-01-21 00:53:49 +0000176static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
177 MachineBasicBlock::iterator B,
178 const SIInstrInfo *TII,
179 AliasAnalysis * AA) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000180 // RAW or WAR - cannot reorder
181 // WAW - cannot reorder
182 // RAR - safe to reorder
183 return !(A->mayStore() || B->mayStore()) ||
184 TII->areMemAccessesTriviallyDisjoint(*A, *B, AA);
Alexander Timofeevf867a402016-11-03 14:37:13 +0000185}
186
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000187// Add MI and its defs to the lists if MI reads one of the defs that are
188// already in the list. Returns true in that case.
189static bool
190addToListsIfDependent(MachineInstr &MI,
Matt Arsenault67e72de2017-08-31 01:53:09 +0000191 DenseSet<unsigned> &Defs,
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000192 SmallVectorImpl<MachineInstr*> &Insts) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000193 for (MachineOperand &Use : MI.operands()) {
194 // If one of the defs is read, then there is a use of Def between I and the
195 // instruction that I will potentially be merged with. We will need to move
196 // this instruction after the merged instructions.
197
198 if (Use.isReg() && Use.readsReg() && Defs.count(Use.getReg())) {
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000199 Insts.push_back(&MI);
200 addDefsToList(MI, Defs);
201 return true;
202 }
203 }
204
205 return false;
206}
207
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000208static bool
209canMoveInstsAcrossMemOp(MachineInstr &MemOp,
210 ArrayRef<MachineInstr*> InstsToMove,
211 const SIInstrInfo *TII,
212 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000213 assert(MemOp.mayLoadOrStore());
214
215 for (MachineInstr *InstToMove : InstsToMove) {
216 if (!InstToMove->mayLoadOrStore())
217 continue;
Alexander Timofeevf867a402016-11-03 14:37:13 +0000218 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
219 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000220 }
221 return true;
222}
223
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000224bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000225 // XXX - Would the same offset be OK? Is there any reason this would happen or
226 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000227 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000228 return false;
229
230 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000231 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000232 return false;
233
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000234 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
235 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
236 CI.UseST64 = false;
237 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000238
Marek Olsak58410f32017-11-09 01:52:55 +0000239 // Handle SMEM and VMEM instructions.
240 if (CI.InstClass != DS_READ_WRITE) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000241 unsigned Diff = CI.IsX2 ? 2 : 1;
242 return (EltOffset0 + Diff == EltOffset1 ||
243 EltOffset1 + Diff == EltOffset0) &&
Marek Olsak6a0548a2017-11-09 01:52:30 +0000244 CI.GLC0 == CI.GLC1 &&
245 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
Marek Olsakb953cc32017-11-09 01:52:23 +0000246 }
247
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000248 // If the offset in elements doesn't fit in 8-bits, we might be able to use
249 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000250 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
251 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
252 CI.Offset0 = EltOffset0 / 64;
253 CI.Offset1 = EltOffset1 / 64;
254 CI.UseST64 = true;
255 return true;
256 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000257
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000258 // Check if the new offsets fit in the reduced 8-bit range.
259 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
260 CI.Offset0 = EltOffset0;
261 CI.Offset1 = EltOffset1;
262 return true;
263 }
264
265 // Try to shift base address to decrease offsets.
266 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
267 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
268
269 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
270 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
271 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
272 CI.UseST64 = true;
273 return true;
274 }
275
276 if (isUInt<8>(OffsetDiff)) {
277 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
278 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
279 return true;
280 }
281
282 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000283}
284
Marek Olsakb953cc32017-11-09 01:52:23 +0000285bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000286 MachineBasicBlock *MBB = CI.I->getParent();
287 MachineBasicBlock::iterator E = MBB->end();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000288 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault3cb61632017-08-30 03:26:18 +0000289
Marek Olsak6a0548a2017-11-09 01:52:30 +0000290 unsigned AddrOpName[3] = {0};
291 int AddrIdx[3];
292 const MachineOperand *AddrReg[3];
293 unsigned NumAddresses = 0;
Marek Olsakb953cc32017-11-09 01:52:23 +0000294
Marek Olsak6a0548a2017-11-09 01:52:30 +0000295 switch (CI.InstClass) {
296 case DS_READ_WRITE:
297 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
298 break;
299 case S_BUFFER_LOAD_IMM:
300 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
301 break;
302 case BUFFER_LOAD_OFFEN:
Marek Olsak58410f32017-11-09 01:52:55 +0000303 case BUFFER_STORE_OFFEN:
Marek Olsak6a0548a2017-11-09 01:52:30 +0000304 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
305 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
306 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
307 break;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000308 case BUFFER_LOAD_OFFSET:
Marek Olsak58410f32017-11-09 01:52:55 +0000309 case BUFFER_STORE_OFFSET:
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000310 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
311 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
312 break;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000313 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000314
Marek Olsak6a0548a2017-11-09 01:52:30 +0000315 for (unsigned i = 0; i < NumAddresses; i++) {
316 AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
317 AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
318
319 // We only ever merge operations with the same base address register, so don't
320 // bother scanning forward if there are no other uses.
321 if (AddrReg[i]->isReg() &&
322 (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
323 MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
324 return false;
325 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000326
Matt Arsenault41033282014-10-10 22:01:59 +0000327 ++MBBI;
328
Matt Arsenault67e72de2017-08-31 01:53:09 +0000329 DenseSet<unsigned> DefsToMove;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000330 addDefsToList(*CI.I, DefsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000331
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000332 for ( ; MBBI != E; ++MBBI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000333 if (MBBI->getOpcode() != CI.I->getOpcode()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000334 // This is not a matching DS instruction, but we can keep looking as
335 // long as one of these conditions are met:
336 // 1. It is safe to move I down past MBBI.
337 // 2. It is safe to move MBBI down past the instruction that I will
338 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000339
Matt Arsenault2d69c922017-08-29 21:25:51 +0000340 if (MBBI->hasUnmodeledSideEffects()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000341 // We can't re-order this instruction with respect to other memory
Matt Arsenault2d69c922017-08-29 21:25:51 +0000342 // operations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000343 return false;
Matt Arsenault2d69c922017-08-29 21:25:51 +0000344 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000345
346 if (MBBI->mayLoadOrStore() &&
Nicolai Haehnledd059c12017-11-22 12:25:21 +0000347 (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
348 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000349 // We fail condition #1, but we may still be able to satisfy condition
350 // #2. Add this instruction to the move list and then we will check
351 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000352 CI.InstsToMove.push_back(&*MBBI);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000353 addDefsToList(*MBBI, DefsToMove);
354 continue;
355 }
356
357 // When we match I with another DS instruction we will be moving I down
358 // to the location of the matched instruction any uses of I will need to
359 // be moved down as well.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000360 addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000361 continue;
362 }
363
364 // Don't merge volatiles.
365 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000366 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000367
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000368 // Handle a case like
369 // DS_WRITE_B32 addr, v, idx0
370 // w = DS_READ_B32 addr, idx0
371 // DS_WRITE_B32 addr, f(w), idx1
372 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
373 // merging of the two writes.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000374 if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000375 continue;
376
Marek Olsak6a0548a2017-11-09 01:52:30 +0000377 bool Match = true;
378 for (unsigned i = 0; i < NumAddresses; i++) {
379 const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000380
Marek Olsak6a0548a2017-11-09 01:52:30 +0000381 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
382 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
383 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
384 Match = false;
385 break;
386 }
387 continue;
388 }
389
390 // Check same base pointer. Be careful of subregisters, which can occur with
391 // vectors of pointers.
392 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
393 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
394 Match = false;
395 break;
396 }
397 }
398
399 if (Match) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000400 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000401 AMDGPU::OpName::offset);
Marek Olsakb953cc32017-11-09 01:52:23 +0000402 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
403 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000404 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000405
Marek Olsak6a0548a2017-11-09 01:52:30 +0000406 if (CI.InstClass == DS_READ_WRITE) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000407 CI.Offset0 &= 0xffff;
408 CI.Offset1 &= 0xffff;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000409 } else {
410 CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
411 CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000412 if (CI.InstClass != S_BUFFER_LOAD_IMM) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000413 CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
414 CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
415 }
Marek Olsakb953cc32017-11-09 01:52:23 +0000416 }
417
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000418 // Check both offsets fit in the reduced range.
419 // We also need to go through the list of instructions that we plan to
420 // move and make sure they are all safe to move down past the merged
421 // instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000422 if (offsetsCanBeCombined(CI))
423 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
424 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000425 }
426
427 // We've found a load/store that we couldn't merge for some reason.
428 // We could potentially keep looking, but we'd need to make sure that
429 // it was safe to move I and also all the instruction in InstsToMove
430 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000431 // check if we can move I across MBBI and if we can move all I's users
432 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
433 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000434 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000435 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000436 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000437}
438
Matt Arsenault41033282014-10-10 22:01:59 +0000439MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000440 CombineInfo &CI) {
441 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000442
443 // Be careful, since the addresses could be subregisters themselves in weird
444 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000445 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000446
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000447 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
448 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000449
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000450 unsigned NewOffset0 = CI.Offset0;
451 unsigned NewOffset1 = CI.Offset1;
452 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32
453 : AMDGPU::DS_READ2_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000454
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000455 if (CI.UseST64)
456 Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32
457 : AMDGPU::DS_READ2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000458
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000459 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
460 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000461
462 if (NewOffset0 > NewOffset1) {
463 // Canonicalize the merged instruction so the smaller offset comes first.
464 std::swap(NewOffset0, NewOffset1);
465 std::swap(SubRegIdx0, SubRegIdx1);
466 }
467
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000468 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
469 (NewOffset0 != NewOffset1) &&
470 "Computed offset doesn't fit");
471
472 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000473
474 const TargetRegisterClass *SuperRC
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000475 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000476 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
477
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000478 DebugLoc DL = CI.I->getDebugLoc();
479
480 unsigned BaseReg = AddrReg->getReg();
481 unsigned BaseRegFlags = 0;
482 if (CI.BaseOff) {
483 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
484 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000485 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000486 .addImm(CI.BaseOff)
487 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000488 }
489
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000490 MachineInstrBuilder Read2 =
491 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
492 .addReg(BaseReg, BaseRegFlags) // addr
493 .addImm(NewOffset0) // offset0
494 .addImm(NewOffset1) // offset1
495 .addImm(0) // gds
496 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
497
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000498 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000499
Matt Arsenault84db5d92015-07-14 17:57:36 +0000500 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
501
502 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000503 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000504 .add(*Dest0) // Copy to same destination including flags and sub reg.
505 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000506 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000507 .add(*Dest1)
508 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000509
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000510 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000511
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000512 MachineBasicBlock::iterator Next = std::next(CI.I);
513 CI.I->eraseFromParent();
514 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000515
Matt Arsenault41033282014-10-10 22:01:59 +0000516 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000517 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000518}
519
520MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000521 CombineInfo &CI) {
522 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000523
524 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
525 // sure we preserve the subregister index and any register flags set on them.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000526 const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
527 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000528 const MachineOperand *Data1
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000529 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000530
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000531 unsigned NewOffset0 = CI.Offset0;
532 unsigned NewOffset1 = CI.Offset1;
533 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32
534 : AMDGPU::DS_WRITE2_B64;
Matt Arsenault41033282014-10-10 22:01:59 +0000535
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000536 if (CI.UseST64)
537 Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
538 : AMDGPU::DS_WRITE2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000539
Tom Stellarde175d8a2016-08-26 21:36:47 +0000540 if (NewOffset0 > NewOffset1) {
541 // Canonicalize the merged instruction so the smaller offset comes first.
542 std::swap(NewOffset0, NewOffset1);
543 std::swap(Data0, Data1);
544 }
545
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000546 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
547 (NewOffset0 != NewOffset1) &&
548 "Computed offset doesn't fit");
549
550 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000551 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000552
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000553 unsigned BaseReg = Addr->getReg();
554 unsigned BaseRegFlags = 0;
555 if (CI.BaseOff) {
556 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
557 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000558 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000559 .addImm(CI.BaseOff)
560 .addReg(Addr->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000561 }
Matt Arsenault41033282014-10-10 22:01:59 +0000562
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000563 MachineInstrBuilder Write2 =
564 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
565 .addReg(BaseReg, BaseRegFlags) // addr
566 .add(*Data0) // data0
567 .add(*Data1) // data1
568 .addImm(NewOffset0) // offset0
569 .addImm(NewOffset1) // offset1
570 .addImm(0) // gds
571 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
Matt Arsenault41033282014-10-10 22:01:59 +0000572
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000573 moveInstsAfter(Write2, CI.InstsToMove);
574
575 MachineBasicBlock::iterator Next = std::next(CI.I);
576 CI.I->eraseFromParent();
577 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000578
Matt Arsenault41033282014-10-10 22:01:59 +0000579 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000580 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000581}
582
Marek Olsakb953cc32017-11-09 01:52:23 +0000583MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
584 CombineInfo &CI) {
585 MachineBasicBlock *MBB = CI.I->getParent();
586 DebugLoc DL = CI.I->getDebugLoc();
587 unsigned Opcode = CI.IsX2 ? AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM :
588 AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
589
590 const TargetRegisterClass *SuperRC =
591 CI.IsX2 ? &AMDGPU::SReg_128RegClass : &AMDGPU::SReg_64_XEXECRegClass;
592 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
593 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
594
595 BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
596 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
597 .addImm(MergedOffset) // offset
598 .addImm(CI.GLC0) // glc
599 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
600
601 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
602 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
603
604 // Handle descending offsets
605 if (CI.Offset0 > CI.Offset1)
606 std::swap(SubRegIdx0, SubRegIdx1);
607
608 // Copy to the old destination registers.
609 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
610 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
611 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
612
613 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
614 .add(*Dest0) // Copy to same destination including flags and sub reg.
615 .addReg(DestReg, 0, SubRegIdx0);
616 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
617 .add(*Dest1)
618 .addReg(DestReg, RegState::Kill, SubRegIdx1);
619
620 moveInstsAfter(Copy1, CI.InstsToMove);
621
622 MachineBasicBlock::iterator Next = std::next(CI.I);
623 CI.I->eraseFromParent();
624 CI.Paired->eraseFromParent();
625 return Next;
626}
627
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000628MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
Marek Olsak6a0548a2017-11-09 01:52:30 +0000629 CombineInfo &CI) {
630 MachineBasicBlock *MBB = CI.I->getParent();
631 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000632 unsigned Opcode;
633
634 if (CI.InstClass == BUFFER_LOAD_OFFEN) {
635 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN :
636 AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN;
637 } else {
638 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET :
639 AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
640 }
Marek Olsak6a0548a2017-11-09 01:52:30 +0000641
642 const TargetRegisterClass *SuperRC =
643 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
644 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
645 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
646
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000647 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
648
649 if (CI.InstClass == BUFFER_LOAD_OFFEN)
650 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
651
652 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
Marek Olsak6a0548a2017-11-09 01:52:30 +0000653 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
654 .addImm(MergedOffset) // offset
655 .addImm(CI.GLC0) // glc
656 .addImm(CI.SLC0) // slc
657 .addImm(0) // tfe
658 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
659
660 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
661 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
662
663 // Handle descending offsets
664 if (CI.Offset0 > CI.Offset1)
665 std::swap(SubRegIdx0, SubRegIdx1);
666
667 // Copy to the old destination registers.
668 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
669 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
670 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
671
672 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
673 .add(*Dest0) // Copy to same destination including flags and sub reg.
674 .addReg(DestReg, 0, SubRegIdx0);
675 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
676 .add(*Dest1)
677 .addReg(DestReg, RegState::Kill, SubRegIdx1);
678
679 moveInstsAfter(Copy1, CI.InstsToMove);
680
681 MachineBasicBlock::iterator Next = std::next(CI.I);
682 CI.I->eraseFromParent();
683 CI.Paired->eraseFromParent();
684 return Next;
685}
686
Marek Olsak58410f32017-11-09 01:52:55 +0000687unsigned SILoadStoreOptimizer::promoteBufferStoreOpcode(
688 const MachineInstr &I, bool &IsX2, bool &IsOffen) const {
689 IsX2 = false;
690 IsOffen = false;
691
692 switch (I.getOpcode()) {
693 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
694 IsOffen = true;
695 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN;
696 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
697 IsOffen = true;
698 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact;
699 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
700 IsX2 = true;
701 IsOffen = true;
702 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN;
703 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact:
704 IsX2 = true;
705 IsOffen = true;
706 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN_exact;
707 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
708 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
709 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
710 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact;
711 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET:
712 IsX2 = true;
713 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
714 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact:
715 IsX2 = true;
716 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET_exact;
717 }
718 return 0;
719}
720
721MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
722 CombineInfo &CI) {
723 MachineBasicBlock *MBB = CI.I->getParent();
724 DebugLoc DL = CI.I->getDebugLoc();
725 bool Unused1, Unused2;
726 unsigned Opcode = promoteBufferStoreOpcode(*CI.I, Unused1, Unused2);
727
728 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
729 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
730
731 // Handle descending offsets
732 if (CI.Offset0 > CI.Offset1)
733 std::swap(SubRegIdx0, SubRegIdx1);
734
735 // Copy to the new source register.
736 const TargetRegisterClass *SuperRC =
737 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
738 unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
739
740 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
741 const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
742
743 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
744 .add(*Src0)
745 .addImm(SubRegIdx0)
746 .add(*Src1)
747 .addImm(SubRegIdx1);
748
749 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
750 .addReg(SrcReg, RegState::Kill);
751
752 if (CI.InstClass == BUFFER_STORE_OFFEN)
753 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
754
755 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
756 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
757 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
758 .addImm(CI.GLC0) // glc
759 .addImm(CI.SLC0) // slc
760 .addImm(0) // tfe
761 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
762
763 moveInstsAfter(MIB, CI.InstsToMove);
764
765 MachineBasicBlock::iterator Next = std::next(CI.I);
766 CI.I->eraseFromParent();
767 CI.Paired->eraseFromParent();
768 return Next;
769}
770
Matt Arsenault41033282014-10-10 22:01:59 +0000771// Scan through looking for adjacent LDS operations with constant offsets from
772// the same base register. We rely on the scheduler to do the hard work of
773// clustering nearby loads, and assume these are all adjacent.
774bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000775 bool Modified = false;
776
777 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
778 MachineInstr &MI = *I;
779
780 // Don't combine if volatile.
781 if (MI.hasOrderedMemoryRef()) {
782 ++I;
783 continue;
784 }
785
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000786 CombineInfo CI;
787 CI.I = I;
Matt Arsenault41033282014-10-10 22:01:59 +0000788 unsigned Opc = MI.getOpcode();
789 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000790 CI.InstClass = DS_READ_WRITE;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000791 CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
Marek Olsakb953cc32017-11-09 01:52:23 +0000792 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000793 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000794 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000795 } else {
796 ++I;
797 }
798
799 continue;
Marek Olsakb953cc32017-11-09 01:52:23 +0000800 }
801 if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000802 CI.InstClass = DS_READ_WRITE;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000803 CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
Marek Olsakb953cc32017-11-09 01:52:23 +0000804 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000805 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000806 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000807 } else {
808 ++I;
809 }
810
811 continue;
812 }
Marek Olsakb953cc32017-11-09 01:52:23 +0000813 if (STM->hasSBufferLoadStoreAtomicDwordxN() &&
814 (Opc == AMDGPU::S_BUFFER_LOAD_DWORD_IMM ||
815 Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM)) {
816 // EltSize is in units of the offset encoding.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000817 CI.InstClass = S_BUFFER_LOAD_IMM;
Marek Olsakb953cc32017-11-09 01:52:23 +0000818 CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
Marek Olsakb953cc32017-11-09 01:52:23 +0000819 CI.IsX2 = Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
820 if (findMatchingInst(CI)) {
821 Modified = true;
822 I = mergeSBufferLoadImmPair(CI);
823 if (!CI.IsX2)
824 CreatedX2++;
825 } else {
826 ++I;
827 }
828 continue;
829 }
Marek Olsak6a0548a2017-11-09 01:52:30 +0000830 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000831 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
832 Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFSET ||
833 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET) {
834 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
835 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN)
836 CI.InstClass = BUFFER_LOAD_OFFEN;
837 else
838 CI.InstClass = BUFFER_LOAD_OFFSET;
839
Marek Olsak6a0548a2017-11-09 01:52:30 +0000840 CI.EltSize = 4;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000841 CI.IsX2 = Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
842 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000843 if (findMatchingInst(CI)) {
844 Modified = true;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000845 I = mergeBufferLoadPair(CI);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000846 if (!CI.IsX2)
847 CreatedX2++;
848 } else {
849 ++I;
850 }
851 continue;
852 }
Matt Arsenault41033282014-10-10 22:01:59 +0000853
Marek Olsak58410f32017-11-09 01:52:55 +0000854 bool StoreIsX2, IsOffen;
855 if (promoteBufferStoreOpcode(*I, StoreIsX2, IsOffen)) {
856 CI.InstClass = IsOffen ? BUFFER_STORE_OFFEN : BUFFER_STORE_OFFSET;
857 CI.EltSize = 4;
858 CI.IsX2 = StoreIsX2;
859 if (findMatchingInst(CI)) {
860 Modified = true;
861 I = mergeBufferStorePair(CI);
862 if (!CI.IsX2)
863 CreatedX2++;
864 } else {
865 ++I;
866 }
867 continue;
868 }
869
Matt Arsenault41033282014-10-10 22:01:59 +0000870 ++I;
871 }
872
873 return Modified;
874}
875
876bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000877 if (skipFunction(*MF.getFunction()))
878 return false;
879
Marek Olsakb953cc32017-11-09 01:52:23 +0000880 STM = &MF.getSubtarget<SISubtarget>();
881 if (!STM->loadStoreOptEnabled())
Matt Arsenault03d85842016-06-27 20:32:13 +0000882 return false;
883
Marek Olsakb953cc32017-11-09 01:52:23 +0000884 TII = STM->getInstrInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000885 TRI = &TII->getRegisterInfo();
886
Matt Arsenault41033282014-10-10 22:01:59 +0000887 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000888 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +0000889
Matt Arsenault67e72de2017-08-31 01:53:09 +0000890 assert(MRI->isSSA() && "Must be run on SSA");
891
Matt Arsenault41033282014-10-10 22:01:59 +0000892 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
893
Matt Arsenault41033282014-10-10 22:01:59 +0000894 bool Modified = false;
895
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +0000896 for (MachineBasicBlock &MBB : MF) {
897 CreatedX2 = 0;
Matt Arsenault41033282014-10-10 22:01:59 +0000898 Modified |= optimizeBlock(MBB);
899
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +0000900 // Run again to convert x2 to x4.
901 if (CreatedX2 >= 1)
Marek Olsakb953cc32017-11-09 01:52:23 +0000902 Modified |= optimizeBlock(MBB);
903 }
904
Matt Arsenault41033282014-10-10 22:01:59 +0000905 return Modified;
906}