blob: 840c7fbb8d70d774cc80a808605ffef6d0d64486 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +000017// The same is done for certain SMEM and VMEM opcodes, e.g.:
Marek Olsakb953cc32017-11-09 01:52:23 +000018// s_buffer_load_dword s4, s[0:3], 4
19// s_buffer_load_dword s5, s[0:3], 8
20// ==>
21// s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22//
Matt Arsenault41033282014-10-10 22:01:59 +000023//
24// Future improvements:
25//
26// - This currently relies on the scheduler to place loads and stores next to
27// each other, and then only merges adjacent pairs of instructions. It would
28// be good to be more flexible with interleaved instructions, and possibly run
29// before scheduling. It currently missing stores of constants because loading
30// the constant into the data register is placed between the stores, although
31// this is arguably a scheduling problem.
32//
33// - Live interval recomputing seems inefficient. This currently only matches
34// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000035// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000036//
37// - With a list of instructions to process, we can also merge more. If a
38// cluster of loads have offsets that are too large to fit in the 8-bit
39// offsets, but are close enough to fit in the 8 bits, we can add to the base
40// pointer and use the new reduced offsets.
41//
42//===----------------------------------------------------------------------===//
43
44#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000045#include "AMDGPUSubtarget.h"
Matt Arsenault41033282014-10-10 22:01:59 +000046#include "SIInstrInfo.h"
47#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000048#include "Utils/AMDGPUBaseInfo.h"
49#include "llvm/ADT/ArrayRef.h"
50#include "llvm/ADT/SmallVector.h"
51#include "llvm/ADT/StringRef.h"
52#include "llvm/Analysis/AliasAnalysis.h"
53#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000054#include "llvm/CodeGen/MachineFunction.h"
55#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000056#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000057#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000058#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000059#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000060#include "llvm/IR/DebugLoc.h"
61#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000062#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000063#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000064#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000065#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000066#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000067#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000068#include <iterator>
69#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000070
71using namespace llvm;
72
73#define DEBUG_TYPE "si-load-store-opt"
74
75namespace {
76
77class SILoadStoreOptimizer : public MachineFunctionPass {
Marek Olsak6a0548a2017-11-09 01:52:30 +000078 enum InstClassEnum {
79 DS_READ_WRITE,
80 S_BUFFER_LOAD_IMM,
81 BUFFER_LOAD_OFFEN,
Marek Olsak4c421a2d2017-11-09 01:52:36 +000082 BUFFER_LOAD_OFFSET,
Marek Olsak58410f32017-11-09 01:52:55 +000083 BUFFER_STORE_OFFEN,
84 BUFFER_STORE_OFFSET,
Marek Olsak6a0548a2017-11-09 01:52:30 +000085 };
86
NAKAMURA Takumiaba2b3d2017-10-10 08:30:53 +000087 struct CombineInfo {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000088 MachineBasicBlock::iterator I;
89 MachineBasicBlock::iterator Paired;
90 unsigned EltSize;
91 unsigned Offset0;
92 unsigned Offset1;
93 unsigned BaseOff;
Marek Olsak6a0548a2017-11-09 01:52:30 +000094 InstClassEnum InstClass;
Marek Olsakb953cc32017-11-09 01:52:23 +000095 bool GLC0;
96 bool GLC1;
Marek Olsak6a0548a2017-11-09 01:52:30 +000097 bool SLC0;
98 bool SLC1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000099 bool UseST64;
Marek Olsakb953cc32017-11-09 01:52:23 +0000100 bool IsX2;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000101 SmallVector<MachineInstr*, 8> InstsToMove;
Eugene Zelenko59e12822017-08-08 00:47:13 +0000102 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000103
Matt Arsenault41033282014-10-10 22:01:59 +0000104private:
Marek Olsakb953cc32017-11-09 01:52:23 +0000105 const SISubtarget *STM = nullptr;
Eugene Zelenko66203762017-01-21 00:53:49 +0000106 const SIInstrInfo *TII = nullptr;
107 const SIRegisterInfo *TRI = nullptr;
108 MachineRegisterInfo *MRI = nullptr;
109 AliasAnalysis *AA = nullptr;
Marek Olsakb953cc32017-11-09 01:52:23 +0000110 unsigned CreatedX2;
Matt Arsenault41033282014-10-10 22:01:59 +0000111
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000112 static bool offsetsCanBeCombined(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000113
Marek Olsakb953cc32017-11-09 01:52:23 +0000114 bool findMatchingInst(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000115
116 unsigned read2Opcode(unsigned EltSize) const;
117 unsigned read2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000118 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000119
120 unsigned write2Opcode(unsigned EltSize) const;
121 unsigned write2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000122 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000123 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000124 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
Marek Olsak58410f32017-11-09 01:52:55 +0000125 unsigned promoteBufferStoreOpcode(const MachineInstr &I, bool &IsX2,
126 bool &IsOffen) const;
127 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000128
129public:
130 static char ID;
131
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000132 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000133 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
134 }
135
136 bool optimizeBlock(MachineBasicBlock &MBB);
137
138 bool runOnMachineFunction(MachineFunction &MF) override;
139
Mehdi Amini117296c2016-10-01 02:56:57 +0000140 StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000141
142 void getAnalysisUsage(AnalysisUsage &AU) const override {
143 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000144 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000145
146 MachineFunctionPass::getAnalysisUsage(AU);
147 }
148};
149
Eugene Zelenko66203762017-01-21 00:53:49 +0000150} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000151
152INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
153 "SI Load / Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000154INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Matt Arsenault41033282014-10-10 22:01:59 +0000155INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
156 "SI Load / Store Optimizer", false, false)
157
158char SILoadStoreOptimizer::ID = 0;
159
160char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
161
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000162FunctionPass *llvm::createSILoadStoreOptimizerPass() {
163 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000164}
165
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000166static void moveInstsAfter(MachineBasicBlock::iterator I,
167 ArrayRef<MachineInstr*> InstsToMove) {
168 MachineBasicBlock *MBB = I->getParent();
169 ++I;
170 for (MachineInstr *MI : InstsToMove) {
171 MI->removeFromParent();
172 MBB->insert(I, MI);
173 }
174}
175
Matt Arsenault67e72de2017-08-31 01:53:09 +0000176static void addDefsToList(const MachineInstr &MI, DenseSet<unsigned> &Defs) {
177 // XXX: Should this be looking for implicit defs?
178 for (const MachineOperand &Def : MI.defs())
179 Defs.insert(Def.getReg());
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000180}
181
Eugene Zelenko66203762017-01-21 00:53:49 +0000182static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
183 MachineBasicBlock::iterator B,
184 const SIInstrInfo *TII,
185 AliasAnalysis * AA) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000186 // RAW or WAR - cannot reorder
187 // WAW - cannot reorder
188 // RAR - safe to reorder
189 return !(A->mayStore() || B->mayStore()) ||
190 TII->areMemAccessesTriviallyDisjoint(*A, *B, AA);
Alexander Timofeevf867a402016-11-03 14:37:13 +0000191}
192
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000193// Add MI and its defs to the lists if MI reads one of the defs that are
194// already in the list. Returns true in that case.
195static bool
196addToListsIfDependent(MachineInstr &MI,
Matt Arsenault67e72de2017-08-31 01:53:09 +0000197 DenseSet<unsigned> &Defs,
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000198 SmallVectorImpl<MachineInstr*> &Insts) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000199 for (MachineOperand &Use : MI.operands()) {
200 // If one of the defs is read, then there is a use of Def between I and the
201 // instruction that I will potentially be merged with. We will need to move
202 // this instruction after the merged instructions.
203
204 if (Use.isReg() && Use.readsReg() && Defs.count(Use.getReg())) {
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000205 Insts.push_back(&MI);
206 addDefsToList(MI, Defs);
207 return true;
208 }
209 }
210
211 return false;
212}
213
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000214static bool
215canMoveInstsAcrossMemOp(MachineInstr &MemOp,
216 ArrayRef<MachineInstr*> InstsToMove,
217 const SIInstrInfo *TII,
218 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000219 assert(MemOp.mayLoadOrStore());
220
221 for (MachineInstr *InstToMove : InstsToMove) {
222 if (!InstToMove->mayLoadOrStore())
223 continue;
Alexander Timofeevf867a402016-11-03 14:37:13 +0000224 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
225 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000226 }
227 return true;
228}
229
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000230bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000231 // XXX - Would the same offset be OK? Is there any reason this would happen or
232 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000233 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000234 return false;
235
236 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000237 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000238 return false;
239
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000240 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
241 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
242 CI.UseST64 = false;
243 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000244
Marek Olsak58410f32017-11-09 01:52:55 +0000245 // Handle SMEM and VMEM instructions.
246 if (CI.InstClass != DS_READ_WRITE) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000247 unsigned Diff = CI.IsX2 ? 2 : 1;
248 return (EltOffset0 + Diff == EltOffset1 ||
249 EltOffset1 + Diff == EltOffset0) &&
Marek Olsak6a0548a2017-11-09 01:52:30 +0000250 CI.GLC0 == CI.GLC1 &&
251 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
Marek Olsakb953cc32017-11-09 01:52:23 +0000252 }
253
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000254 // If the offset in elements doesn't fit in 8-bits, we might be able to use
255 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000256 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
257 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
258 CI.Offset0 = EltOffset0 / 64;
259 CI.Offset1 = EltOffset1 / 64;
260 CI.UseST64 = true;
261 return true;
262 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000263
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000264 // Check if the new offsets fit in the reduced 8-bit range.
265 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
266 CI.Offset0 = EltOffset0;
267 CI.Offset1 = EltOffset1;
268 return true;
269 }
270
271 // Try to shift base address to decrease offsets.
272 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
273 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
274
275 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
276 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
277 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
278 CI.UseST64 = true;
279 return true;
280 }
281
282 if (isUInt<8>(OffsetDiff)) {
283 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
284 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
285 return true;
286 }
287
288 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000289}
290
Marek Olsakb953cc32017-11-09 01:52:23 +0000291bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000292 MachineBasicBlock *MBB = CI.I->getParent();
293 MachineBasicBlock::iterator E = MBB->end();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000294 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault3cb61632017-08-30 03:26:18 +0000295
Marek Olsak6a0548a2017-11-09 01:52:30 +0000296 unsigned AddrOpName[3] = {0};
297 int AddrIdx[3];
298 const MachineOperand *AddrReg[3];
299 unsigned NumAddresses = 0;
Marek Olsakb953cc32017-11-09 01:52:23 +0000300
Marek Olsak6a0548a2017-11-09 01:52:30 +0000301 switch (CI.InstClass) {
302 case DS_READ_WRITE:
303 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
304 break;
305 case S_BUFFER_LOAD_IMM:
306 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
307 break;
308 case BUFFER_LOAD_OFFEN:
Marek Olsak58410f32017-11-09 01:52:55 +0000309 case BUFFER_STORE_OFFEN:
Marek Olsak6a0548a2017-11-09 01:52:30 +0000310 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
311 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
312 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
313 break;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000314 case BUFFER_LOAD_OFFSET:
Marek Olsak58410f32017-11-09 01:52:55 +0000315 case BUFFER_STORE_OFFSET:
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000316 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
317 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
318 break;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000319 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000320
Marek Olsak6a0548a2017-11-09 01:52:30 +0000321 for (unsigned i = 0; i < NumAddresses; i++) {
322 AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
323 AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
324
325 // We only ever merge operations with the same base address register, so don't
326 // bother scanning forward if there are no other uses.
327 if (AddrReg[i]->isReg() &&
328 (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
329 MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
330 return false;
331 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000332
Matt Arsenault41033282014-10-10 22:01:59 +0000333 ++MBBI;
334
Matt Arsenault67e72de2017-08-31 01:53:09 +0000335 DenseSet<unsigned> DefsToMove;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000336 addDefsToList(*CI.I, DefsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000337
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000338 for ( ; MBBI != E; ++MBBI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000339 if (MBBI->getOpcode() != CI.I->getOpcode()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000340 // This is not a matching DS instruction, but we can keep looking as
341 // long as one of these conditions are met:
342 // 1. It is safe to move I down past MBBI.
343 // 2. It is safe to move MBBI down past the instruction that I will
344 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000345
Matt Arsenault2d69c922017-08-29 21:25:51 +0000346 if (MBBI->hasUnmodeledSideEffects()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000347 // We can't re-order this instruction with respect to other memory
Matt Arsenault2d69c922017-08-29 21:25:51 +0000348 // operations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000349 return false;
Matt Arsenault2d69c922017-08-29 21:25:51 +0000350 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000351
352 if (MBBI->mayLoadOrStore() &&
Nicolai Haehnledd059c12017-11-22 12:25:21 +0000353 (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
354 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000355 // We fail condition #1, but we may still be able to satisfy condition
356 // #2. Add this instruction to the move list and then we will check
357 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000358 CI.InstsToMove.push_back(&*MBBI);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000359 addDefsToList(*MBBI, DefsToMove);
360 continue;
361 }
362
363 // When we match I with another DS instruction we will be moving I down
364 // to the location of the matched instruction any uses of I will need to
365 // be moved down as well.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000366 addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000367 continue;
368 }
369
370 // Don't merge volatiles.
371 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000372 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000373
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000374 // Handle a case like
375 // DS_WRITE_B32 addr, v, idx0
376 // w = DS_READ_B32 addr, idx0
377 // DS_WRITE_B32 addr, f(w), idx1
378 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
379 // merging of the two writes.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000380 if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000381 continue;
382
Marek Olsak6a0548a2017-11-09 01:52:30 +0000383 bool Match = true;
384 for (unsigned i = 0; i < NumAddresses; i++) {
385 const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000386
Marek Olsak6a0548a2017-11-09 01:52:30 +0000387 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
388 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
389 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
390 Match = false;
391 break;
392 }
393 continue;
394 }
395
396 // Check same base pointer. Be careful of subregisters, which can occur with
397 // vectors of pointers.
398 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
399 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
400 Match = false;
401 break;
402 }
403 }
404
405 if (Match) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000406 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000407 AMDGPU::OpName::offset);
Marek Olsakb953cc32017-11-09 01:52:23 +0000408 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
409 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000410 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000411
Marek Olsak6a0548a2017-11-09 01:52:30 +0000412 if (CI.InstClass == DS_READ_WRITE) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000413 CI.Offset0 &= 0xffff;
414 CI.Offset1 &= 0xffff;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000415 } else {
416 CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
417 CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000418 if (CI.InstClass != S_BUFFER_LOAD_IMM) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000419 CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
420 CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
421 }
Marek Olsakb953cc32017-11-09 01:52:23 +0000422 }
423
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000424 // Check both offsets fit in the reduced range.
425 // We also need to go through the list of instructions that we plan to
426 // move and make sure they are all safe to move down past the merged
427 // instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000428 if (offsetsCanBeCombined(CI))
429 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
430 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000431 }
432
433 // We've found a load/store that we couldn't merge for some reason.
434 // We could potentially keep looking, but we'd need to make sure that
435 // it was safe to move I and also all the instruction in InstsToMove
436 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000437 // check if we can move I across MBBI and if we can move all I's users
438 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
439 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000440 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000441 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000442 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000443}
444
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000445unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
446 if (STM->ldsRequiresM0Init())
447 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
448 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
449}
450
451unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
452 if (STM->ldsRequiresM0Init())
453 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
454
455 return (EltSize == 4) ?
456 AMDGPU::DS_READ2ST64_B32_gfx9 : AMDGPU::DS_READ2ST64_B64_gfx9;
457}
458
Matt Arsenault41033282014-10-10 22:01:59 +0000459MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000460 CombineInfo &CI) {
461 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000462
463 // Be careful, since the addresses could be subregisters themselves in weird
464 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000465 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000466
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000467 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
468 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000469
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000470 unsigned NewOffset0 = CI.Offset0;
471 unsigned NewOffset1 = CI.Offset1;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000472 unsigned Opc = CI.UseST64 ?
473 read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000474
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000475 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
476 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000477
478 if (NewOffset0 > NewOffset1) {
479 // Canonicalize the merged instruction so the smaller offset comes first.
480 std::swap(NewOffset0, NewOffset1);
481 std::swap(SubRegIdx0, SubRegIdx1);
482 }
483
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000484 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
485 (NewOffset0 != NewOffset1) &&
486 "Computed offset doesn't fit");
487
488 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000489
490 const TargetRegisterClass *SuperRC
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000491 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000492 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
493
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000494 DebugLoc DL = CI.I->getDebugLoc();
495
496 unsigned BaseReg = AddrReg->getReg();
497 unsigned BaseRegFlags = 0;
498 if (CI.BaseOff) {
499 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
500 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000501 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000502 .addImm(CI.BaseOff)
503 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000504 }
505
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000506 MachineInstrBuilder Read2 =
507 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
508 .addReg(BaseReg, BaseRegFlags) // addr
509 .addImm(NewOffset0) // offset0
510 .addImm(NewOffset1) // offset1
511 .addImm(0) // gds
512 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
513
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000514 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000515
Matt Arsenault84db5d92015-07-14 17:57:36 +0000516 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
517
518 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000519 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000520 .add(*Dest0) // Copy to same destination including flags and sub reg.
521 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000522 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000523 .add(*Dest1)
524 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000525
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000526 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000527
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000528 MachineBasicBlock::iterator Next = std::next(CI.I);
529 CI.I->eraseFromParent();
530 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000531
Matt Arsenault41033282014-10-10 22:01:59 +0000532 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000533 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000534}
535
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000536unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
537 if (STM->ldsRequiresM0Init())
538 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
539 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9 : AMDGPU::DS_WRITE2_B64_gfx9;
540}
541
542unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
543 if (STM->ldsRequiresM0Init())
544 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
545
546 return (EltSize == 4) ?
547 AMDGPU::DS_WRITE2ST64_B32_gfx9 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
548}
549
Matt Arsenault41033282014-10-10 22:01:59 +0000550MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000551 CombineInfo &CI) {
552 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000553
554 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
555 // sure we preserve the subregister index and any register flags set on them.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000556 const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
557 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000558 const MachineOperand *Data1
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000559 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000560
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000561 unsigned NewOffset0 = CI.Offset0;
562 unsigned NewOffset1 = CI.Offset1;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000563 unsigned Opc = CI.UseST64 ?
564 write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000565
Tom Stellarde175d8a2016-08-26 21:36:47 +0000566 if (NewOffset0 > NewOffset1) {
567 // Canonicalize the merged instruction so the smaller offset comes first.
568 std::swap(NewOffset0, NewOffset1);
569 std::swap(Data0, Data1);
570 }
571
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000572 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
573 (NewOffset0 != NewOffset1) &&
574 "Computed offset doesn't fit");
575
576 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000577 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000578
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000579 unsigned BaseReg = Addr->getReg();
580 unsigned BaseRegFlags = 0;
581 if (CI.BaseOff) {
582 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
583 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000584 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000585 .addImm(CI.BaseOff)
586 .addReg(Addr->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000587 }
Matt Arsenault41033282014-10-10 22:01:59 +0000588
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000589 MachineInstrBuilder Write2 =
590 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
591 .addReg(BaseReg, BaseRegFlags) // addr
592 .add(*Data0) // data0
593 .add(*Data1) // data1
594 .addImm(NewOffset0) // offset0
595 .addImm(NewOffset1) // offset1
596 .addImm(0) // gds
597 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
Matt Arsenault41033282014-10-10 22:01:59 +0000598
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000599 moveInstsAfter(Write2, CI.InstsToMove);
600
601 MachineBasicBlock::iterator Next = std::next(CI.I);
602 CI.I->eraseFromParent();
603 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000604
Matt Arsenault41033282014-10-10 22:01:59 +0000605 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000606 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000607}
608
Marek Olsakb953cc32017-11-09 01:52:23 +0000609MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
610 CombineInfo &CI) {
611 MachineBasicBlock *MBB = CI.I->getParent();
612 DebugLoc DL = CI.I->getDebugLoc();
613 unsigned Opcode = CI.IsX2 ? AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM :
614 AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
615
616 const TargetRegisterClass *SuperRC =
617 CI.IsX2 ? &AMDGPU::SReg_128RegClass : &AMDGPU::SReg_64_XEXECRegClass;
618 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
619 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
620
621 BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
622 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
623 .addImm(MergedOffset) // offset
624 .addImm(CI.GLC0) // glc
625 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
626
627 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
628 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
629
630 // Handle descending offsets
631 if (CI.Offset0 > CI.Offset1)
632 std::swap(SubRegIdx0, SubRegIdx1);
633
634 // Copy to the old destination registers.
635 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
636 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
637 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
638
639 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
640 .add(*Dest0) // Copy to same destination including flags and sub reg.
641 .addReg(DestReg, 0, SubRegIdx0);
642 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
643 .add(*Dest1)
644 .addReg(DestReg, RegState::Kill, SubRegIdx1);
645
646 moveInstsAfter(Copy1, CI.InstsToMove);
647
648 MachineBasicBlock::iterator Next = std::next(CI.I);
649 CI.I->eraseFromParent();
650 CI.Paired->eraseFromParent();
651 return Next;
652}
653
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000654MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
Marek Olsak6a0548a2017-11-09 01:52:30 +0000655 CombineInfo &CI) {
656 MachineBasicBlock *MBB = CI.I->getParent();
657 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000658 unsigned Opcode;
659
660 if (CI.InstClass == BUFFER_LOAD_OFFEN) {
661 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN :
662 AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN;
663 } else {
664 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET :
665 AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
666 }
Marek Olsak6a0548a2017-11-09 01:52:30 +0000667
668 const TargetRegisterClass *SuperRC =
669 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
670 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
671 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
672
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000673 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
674
675 if (CI.InstClass == BUFFER_LOAD_OFFEN)
676 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
677
678 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
Marek Olsak6a0548a2017-11-09 01:52:30 +0000679 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
680 .addImm(MergedOffset) // offset
681 .addImm(CI.GLC0) // glc
682 .addImm(CI.SLC0) // slc
683 .addImm(0) // tfe
684 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
685
686 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
687 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
688
689 // Handle descending offsets
690 if (CI.Offset0 > CI.Offset1)
691 std::swap(SubRegIdx0, SubRegIdx1);
692
693 // Copy to the old destination registers.
694 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
695 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
696 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
697
698 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
699 .add(*Dest0) // Copy to same destination including flags and sub reg.
700 .addReg(DestReg, 0, SubRegIdx0);
701 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
702 .add(*Dest1)
703 .addReg(DestReg, RegState::Kill, SubRegIdx1);
704
705 moveInstsAfter(Copy1, CI.InstsToMove);
706
707 MachineBasicBlock::iterator Next = std::next(CI.I);
708 CI.I->eraseFromParent();
709 CI.Paired->eraseFromParent();
710 return Next;
711}
712
Marek Olsak58410f32017-11-09 01:52:55 +0000713unsigned SILoadStoreOptimizer::promoteBufferStoreOpcode(
714 const MachineInstr &I, bool &IsX2, bool &IsOffen) const {
715 IsX2 = false;
716 IsOffen = false;
717
718 switch (I.getOpcode()) {
719 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
720 IsOffen = true;
721 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN;
722 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
723 IsOffen = true;
724 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact;
725 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
726 IsX2 = true;
727 IsOffen = true;
728 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN;
729 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact:
730 IsX2 = true;
731 IsOffen = true;
732 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN_exact;
733 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
734 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
735 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
736 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact;
737 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET:
738 IsX2 = true;
739 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
740 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact:
741 IsX2 = true;
742 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET_exact;
743 }
744 return 0;
745}
746
747MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
748 CombineInfo &CI) {
749 MachineBasicBlock *MBB = CI.I->getParent();
750 DebugLoc DL = CI.I->getDebugLoc();
751 bool Unused1, Unused2;
752 unsigned Opcode = promoteBufferStoreOpcode(*CI.I, Unused1, Unused2);
753
754 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
755 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
756
757 // Handle descending offsets
758 if (CI.Offset0 > CI.Offset1)
759 std::swap(SubRegIdx0, SubRegIdx1);
760
761 // Copy to the new source register.
762 const TargetRegisterClass *SuperRC =
763 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
764 unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
765
766 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
767 const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
768
769 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
770 .add(*Src0)
771 .addImm(SubRegIdx0)
772 .add(*Src1)
773 .addImm(SubRegIdx1);
774
775 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
776 .addReg(SrcReg, RegState::Kill);
777
778 if (CI.InstClass == BUFFER_STORE_OFFEN)
779 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
780
781 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
782 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
783 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
784 .addImm(CI.GLC0) // glc
785 .addImm(CI.SLC0) // slc
786 .addImm(0) // tfe
787 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
788
789 moveInstsAfter(MIB, CI.InstsToMove);
790
791 MachineBasicBlock::iterator Next = std::next(CI.I);
792 CI.I->eraseFromParent();
793 CI.Paired->eraseFromParent();
794 return Next;
795}
796
Matt Arsenault41033282014-10-10 22:01:59 +0000797// Scan through looking for adjacent LDS operations with constant offsets from
798// the same base register. We rely on the scheduler to do the hard work of
799// clustering nearby loads, and assume these are all adjacent.
800bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000801 bool Modified = false;
802
803 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
804 MachineInstr &MI = *I;
805
806 // Don't combine if volatile.
807 if (MI.hasOrderedMemoryRef()) {
808 ++I;
809 continue;
810 }
811
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000812 CombineInfo CI;
813 CI.I = I;
Matt Arsenault41033282014-10-10 22:01:59 +0000814 unsigned Opc = MI.getOpcode();
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000815 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64 ||
816 Opc == AMDGPU::DS_READ_B32_gfx9 || Opc == AMDGPU::DS_READ_B64_gfx9) {
817
Marek Olsak6a0548a2017-11-09 01:52:30 +0000818 CI.InstClass = DS_READ_WRITE;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000819 CI.EltSize =
820 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8 : 4;
821
Marek Olsakb953cc32017-11-09 01:52:23 +0000822 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000823 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000824 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000825 } else {
826 ++I;
827 }
828
829 continue;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000830 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64 ||
831 Opc == AMDGPU::DS_WRITE_B32_gfx9 ||
832 Opc == AMDGPU::DS_WRITE_B64_gfx9) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000833 CI.InstClass = DS_READ_WRITE;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000834 CI.EltSize
835 = (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8 : 4;
836
Marek Olsakb953cc32017-11-09 01:52:23 +0000837 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000838 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000839 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000840 } else {
841 ++I;
842 }
843
844 continue;
845 }
Marek Olsakb953cc32017-11-09 01:52:23 +0000846 if (STM->hasSBufferLoadStoreAtomicDwordxN() &&
847 (Opc == AMDGPU::S_BUFFER_LOAD_DWORD_IMM ||
848 Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM)) {
849 // EltSize is in units of the offset encoding.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000850 CI.InstClass = S_BUFFER_LOAD_IMM;
Marek Olsakb953cc32017-11-09 01:52:23 +0000851 CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
Marek Olsakb953cc32017-11-09 01:52:23 +0000852 CI.IsX2 = Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
853 if (findMatchingInst(CI)) {
854 Modified = true;
855 I = mergeSBufferLoadImmPair(CI);
856 if (!CI.IsX2)
857 CreatedX2++;
858 } else {
859 ++I;
860 }
861 continue;
862 }
Marek Olsak6a0548a2017-11-09 01:52:30 +0000863 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000864 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
865 Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFSET ||
866 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET) {
867 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
868 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN)
869 CI.InstClass = BUFFER_LOAD_OFFEN;
870 else
871 CI.InstClass = BUFFER_LOAD_OFFSET;
872
Marek Olsak6a0548a2017-11-09 01:52:30 +0000873 CI.EltSize = 4;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000874 CI.IsX2 = Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
875 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000876 if (findMatchingInst(CI)) {
877 Modified = true;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000878 I = mergeBufferLoadPair(CI);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000879 if (!CI.IsX2)
880 CreatedX2++;
881 } else {
882 ++I;
883 }
884 continue;
885 }
Matt Arsenault41033282014-10-10 22:01:59 +0000886
Marek Olsak58410f32017-11-09 01:52:55 +0000887 bool StoreIsX2, IsOffen;
888 if (promoteBufferStoreOpcode(*I, StoreIsX2, IsOffen)) {
889 CI.InstClass = IsOffen ? BUFFER_STORE_OFFEN : BUFFER_STORE_OFFSET;
890 CI.EltSize = 4;
891 CI.IsX2 = StoreIsX2;
892 if (findMatchingInst(CI)) {
893 Modified = true;
894 I = mergeBufferStorePair(CI);
895 if (!CI.IsX2)
896 CreatedX2++;
897 } else {
898 ++I;
899 }
900 continue;
901 }
902
Matt Arsenault41033282014-10-10 22:01:59 +0000903 ++I;
904 }
905
906 return Modified;
907}
908
909bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000910 if (skipFunction(*MF.getFunction()))
911 return false;
912
Marek Olsakb953cc32017-11-09 01:52:23 +0000913 STM = &MF.getSubtarget<SISubtarget>();
914 if (!STM->loadStoreOptEnabled())
Matt Arsenault03d85842016-06-27 20:32:13 +0000915 return false;
916
Marek Olsakb953cc32017-11-09 01:52:23 +0000917 TII = STM->getInstrInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000918 TRI = &TII->getRegisterInfo();
919
Matt Arsenault41033282014-10-10 22:01:59 +0000920 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000921 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +0000922
Matt Arsenault67e72de2017-08-31 01:53:09 +0000923 assert(MRI->isSSA() && "Must be run on SSA");
924
Matt Arsenault41033282014-10-10 22:01:59 +0000925 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
926
Matt Arsenault41033282014-10-10 22:01:59 +0000927 bool Modified = false;
928
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +0000929 for (MachineBasicBlock &MBB : MF) {
930 CreatedX2 = 0;
Matt Arsenault41033282014-10-10 22:01:59 +0000931 Modified |= optimizeBlock(MBB);
932
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +0000933 // Run again to convert x2 to x4.
934 if (CreatedX2 >= 1)
Marek Olsakb953cc32017-11-09 01:52:23 +0000935 Modified |= optimizeBlock(MBB);
936 }
937
Matt Arsenault41033282014-10-10 22:01:59 +0000938 return Modified;
939}