blob: b7ddfe44c925787cbe67f8cad477a0f86b3ccc26 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
17//
18// Future improvements:
19//
20// - This currently relies on the scheduler to place loads and stores next to
21// each other, and then only merges adjacent pairs of instructions. It would
22// be good to be more flexible with interleaved instructions, and possibly run
23// before scheduling. It currently missing stores of constants because loading
24// the constant into the data register is placed between the stores, although
25// this is arguably a scheduling problem.
26//
27// - Live interval recomputing seems inefficient. This currently only matches
28// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000029// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000030//
31// - With a list of instructions to process, we can also merge more. If a
32// cluster of loads have offsets that are too large to fit in the 8-bit
33// offsets, but are close enough to fit in the 8 bits, we can add to the base
34// pointer and use the new reduced offsets.
35//
36//===----------------------------------------------------------------------===//
37
38#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000039#include "AMDGPUSubtarget.h"
Matt Arsenault41033282014-10-10 22:01:59 +000040#include "SIInstrInfo.h"
41#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000042#include "Utils/AMDGPUBaseInfo.h"
43#include "llvm/ADT/ArrayRef.h"
44#include "llvm/ADT/SmallVector.h"
45#include "llvm/ADT/StringRef.h"
46#include "llvm/Analysis/AliasAnalysis.h"
47#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000048#include "llvm/CodeGen/MachineFunction.h"
49#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000050#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000051#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000052#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000053#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000054#include "llvm/IR/DebugLoc.h"
55#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000056#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000057#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000058#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000059#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000060#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000061#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000062#include <iterator>
63#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000064
65using namespace llvm;
66
67#define DEBUG_TYPE "si-load-store-opt"
68
69namespace {
70
71class SILoadStoreOptimizer : public MachineFunctionPass {
Eugene Zelenko59e12822017-08-08 00:47:13 +000072 using CombineInfo = struct {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000073 MachineBasicBlock::iterator I;
74 MachineBasicBlock::iterator Paired;
75 unsigned EltSize;
76 unsigned Offset0;
77 unsigned Offset1;
78 unsigned BaseOff;
79 bool UseST64;
80 SmallVector<MachineInstr*, 8> InstsToMove;
Eugene Zelenko59e12822017-08-08 00:47:13 +000081 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000082
Matt Arsenault41033282014-10-10 22:01:59 +000083private:
Eugene Zelenko66203762017-01-21 00:53:49 +000084 const SIInstrInfo *TII = nullptr;
85 const SIRegisterInfo *TRI = nullptr;
86 MachineRegisterInfo *MRI = nullptr;
87 AliasAnalysis *AA = nullptr;
Matt Arsenault41033282014-10-10 22:01:59 +000088
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000089 static bool offsetsCanBeCombined(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000090
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000091 bool findMatchingDSInst(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000092
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000093 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000094
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000095 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000096
97public:
98 static char ID;
99
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000100 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000101 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
102 }
103
104 bool optimizeBlock(MachineBasicBlock &MBB);
105
106 bool runOnMachineFunction(MachineFunction &MF) override;
107
Mehdi Amini117296c2016-10-01 02:56:57 +0000108 StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000109
110 void getAnalysisUsage(AnalysisUsage &AU) const override {
111 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000112 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000113
114 MachineFunctionPass::getAnalysisUsage(AU);
115 }
116};
117
Eugene Zelenko66203762017-01-21 00:53:49 +0000118} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000119
120INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
121 "SI Load / Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000122INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Matt Arsenault41033282014-10-10 22:01:59 +0000123INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
124 "SI Load / Store Optimizer", false, false)
125
126char SILoadStoreOptimizer::ID = 0;
127
128char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
129
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000130FunctionPass *llvm::createSILoadStoreOptimizerPass() {
131 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000132}
133
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000134static void moveInstsAfter(MachineBasicBlock::iterator I,
135 ArrayRef<MachineInstr*> InstsToMove) {
136 MachineBasicBlock *MBB = I->getParent();
137 ++I;
138 for (MachineInstr *MI : InstsToMove) {
139 MI->removeFromParent();
140 MBB->insert(I, MI);
141 }
142}
143
144static void addDefsToList(const MachineInstr &MI,
145 SmallVectorImpl<const MachineOperand *> &Defs) {
146 for (const MachineOperand &Def : MI.defs()) {
147 Defs.push_back(&Def);
148 }
149}
150
Eugene Zelenko66203762017-01-21 00:53:49 +0000151static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
152 MachineBasicBlock::iterator B,
153 const SIInstrInfo *TII,
154 AliasAnalysis * AA) {
Alexander Timofeevf867a402016-11-03 14:37:13 +0000155 return (TII->areMemAccessesTriviallyDisjoint(*A, *B, AA) ||
156 // RAW or WAR - cannot reorder
157 // WAW - cannot reorder
158 // RAR - safe to reorder
159 !(A->mayStore() || B->mayStore()));
160}
161
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000162// Add MI and its defs to the lists if MI reads one of the defs that are
163// already in the list. Returns true in that case.
164static bool
165addToListsIfDependent(MachineInstr &MI,
166 SmallVectorImpl<const MachineOperand *> &Defs,
167 SmallVectorImpl<MachineInstr*> &Insts) {
168 for (const MachineOperand *Def : Defs) {
169 bool ReadDef = MI.readsVirtualRegister(Def->getReg());
170 // If ReadDef is true, then there is a use of Def between I
171 // and the instruction that I will potentially be merged with. We
172 // will need to move this instruction after the merged instructions.
173 if (ReadDef) {
174 Insts.push_back(&MI);
175 addDefsToList(MI, Defs);
176 return true;
177 }
178 }
179
180 return false;
181}
182
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000183static bool
184canMoveInstsAcrossMemOp(MachineInstr &MemOp,
185 ArrayRef<MachineInstr*> InstsToMove,
186 const SIInstrInfo *TII,
187 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000188 assert(MemOp.mayLoadOrStore());
189
190 for (MachineInstr *InstToMove : InstsToMove) {
191 if (!InstToMove->mayLoadOrStore())
192 continue;
Alexander Timofeevf867a402016-11-03 14:37:13 +0000193 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
194 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000195 }
196 return true;
197}
198
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000199bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000200 // XXX - Would the same offset be OK? Is there any reason this would happen or
201 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000202 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000203 return false;
204
205 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000206 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000207 return false;
208
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000209 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
210 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
211 CI.UseST64 = false;
212 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000213
214 // If the offset in elements doesn't fit in 8-bits, we might be able to use
215 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000216 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
217 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
218 CI.Offset0 = EltOffset0 / 64;
219 CI.Offset1 = EltOffset1 / 64;
220 CI.UseST64 = true;
221 return true;
222 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000223
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000224 // Check if the new offsets fit in the reduced 8-bit range.
225 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
226 CI.Offset0 = EltOffset0;
227 CI.Offset1 = EltOffset1;
228 return true;
229 }
230
231 // Try to shift base address to decrease offsets.
232 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
233 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
234
235 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
236 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
237 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
238 CI.UseST64 = true;
239 return true;
240 }
241
242 if (isUInt<8>(OffsetDiff)) {
243 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
244 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
245 return true;
246 }
247
248 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000249}
250
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000251bool SILoadStoreOptimizer::findMatchingDSInst(CombineInfo &CI) {
252 MachineBasicBlock::iterator E = CI.I->getParent()->end();
253 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault41033282014-10-10 22:01:59 +0000254 ++MBBI;
255
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000256 SmallVector<const MachineOperand *, 8> DefsToMove;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000257 addDefsToList(*CI.I, DefsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000258
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000259 for ( ; MBBI != E; ++MBBI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000260 if (MBBI->getOpcode() != CI.I->getOpcode()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000261 // This is not a matching DS instruction, but we can keep looking as
262 // long as one of these conditions are met:
263 // 1. It is safe to move I down past MBBI.
264 // 2. It is safe to move MBBI down past the instruction that I will
265 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000266
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000267 if (MBBI->hasUnmodeledSideEffects())
268 // We can't re-order this instruction with respect to other memory
269 // opeations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000270 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000271
272 if (MBBI->mayLoadOrStore() &&
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000273 !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000274 // We fail condition #1, but we may still be able to satisfy condition
275 // #2. Add this instruction to the move list and then we will check
276 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000277 CI.InstsToMove.push_back(&*MBBI);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000278 addDefsToList(*MBBI, DefsToMove);
279 continue;
280 }
281
282 // When we match I with another DS instruction we will be moving I down
283 // to the location of the matched instruction any uses of I will need to
284 // be moved down as well.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000285 addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000286 continue;
287 }
288
289 // Don't merge volatiles.
290 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000291 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000292
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000293 // Handle a case like
294 // DS_WRITE_B32 addr, v, idx0
295 // w = DS_READ_B32 addr, idx0
296 // DS_WRITE_B32 addr, f(w), idx1
297 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
298 // merging of the two writes.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000299 if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000300 continue;
301
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000302 int AddrIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
303 AMDGPU::OpName::addr);
304 const MachineOperand &AddrReg0 = CI.I->getOperand(AddrIdx);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000305 const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
306
307 // Check same base pointer. Be careful of subregisters, which can occur with
308 // vectors of pointers.
309 if (AddrReg0.getReg() == AddrReg1.getReg() &&
310 AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000311 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000312 AMDGPU::OpName::offset);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000313 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm() & 0xffff;
314 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
315 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000316
317 // Check both offsets fit in the reduced range.
318 // We also need to go through the list of instructions that we plan to
319 // move and make sure they are all safe to move down past the merged
320 // instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000321 if (offsetsCanBeCombined(CI))
322 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
323 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000324 }
325
326 // We've found a load/store that we couldn't merge for some reason.
327 // We could potentially keep looking, but we'd need to make sure that
328 // it was safe to move I and also all the instruction in InstsToMove
329 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000330 // check if we can move I across MBBI and if we can move all I's users
331 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
332 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000333 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000334 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000335 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000336}
337
Matt Arsenault41033282014-10-10 22:01:59 +0000338MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000339 CombineInfo &CI) {
340 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000341
342 // Be careful, since the addresses could be subregisters themselves in weird
343 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000344 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000345
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000346 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
347 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000348
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000349 unsigned NewOffset0 = CI.Offset0;
350 unsigned NewOffset1 = CI.Offset1;
351 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32
352 : AMDGPU::DS_READ2_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000353
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000354 if (CI.UseST64)
355 Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32
356 : AMDGPU::DS_READ2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000357
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000358 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
359 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000360
361 if (NewOffset0 > NewOffset1) {
362 // Canonicalize the merged instruction so the smaller offset comes first.
363 std::swap(NewOffset0, NewOffset1);
364 std::swap(SubRegIdx0, SubRegIdx1);
365 }
366
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000367 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
368 (NewOffset0 != NewOffset1) &&
369 "Computed offset doesn't fit");
370
371 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000372
373 const TargetRegisterClass *SuperRC
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000374 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000375 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
376
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000377 DebugLoc DL = CI.I->getDebugLoc();
378
379 unsigned BaseReg = AddrReg->getReg();
380 unsigned BaseRegFlags = 0;
381 if (CI.BaseOff) {
382 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
383 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000384 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000385 .addImm(CI.BaseOff)
386 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000387 }
388
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000389 MachineInstrBuilder Read2 =
390 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
391 .addReg(BaseReg, BaseRegFlags) // addr
392 .addImm(NewOffset0) // offset0
393 .addImm(NewOffset1) // offset1
394 .addImm(0) // gds
395 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
396
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000397 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000398
Matt Arsenault84db5d92015-07-14 17:57:36 +0000399 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
400
401 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000402 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000403 .add(*Dest0) // Copy to same destination including flags and sub reg.
404 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000405 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000406 .add(*Dest1)
407 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000408
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000409 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000410
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000411 MachineBasicBlock::iterator Next = std::next(CI.I);
412 CI.I->eraseFromParent();
413 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000414
Matt Arsenault41033282014-10-10 22:01:59 +0000415 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000416 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000417}
418
419MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000420 CombineInfo &CI) {
421 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000422
423 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
424 // sure we preserve the subregister index and any register flags set on them.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000425 const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
426 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000427 const MachineOperand *Data1
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000428 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000429
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000430 unsigned NewOffset0 = CI.Offset0;
431 unsigned NewOffset1 = CI.Offset1;
432 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32
433 : AMDGPU::DS_WRITE2_B64;
Matt Arsenault41033282014-10-10 22:01:59 +0000434
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000435 if (CI.UseST64)
436 Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
437 : AMDGPU::DS_WRITE2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000438
Tom Stellarde175d8a2016-08-26 21:36:47 +0000439 if (NewOffset0 > NewOffset1) {
440 // Canonicalize the merged instruction so the smaller offset comes first.
441 std::swap(NewOffset0, NewOffset1);
442 std::swap(Data0, Data1);
443 }
444
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000445 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
446 (NewOffset0 != NewOffset1) &&
447 "Computed offset doesn't fit");
448
449 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000450 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000451
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000452 unsigned BaseReg = Addr->getReg();
453 unsigned BaseRegFlags = 0;
454 if (CI.BaseOff) {
455 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
456 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000457 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000458 .addImm(CI.BaseOff)
459 .addReg(Addr->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000460 }
Matt Arsenault41033282014-10-10 22:01:59 +0000461
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000462 MachineInstrBuilder Write2 =
463 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
464 .addReg(BaseReg, BaseRegFlags) // addr
465 .add(*Data0) // data0
466 .add(*Data1) // data1
467 .addImm(NewOffset0) // offset0
468 .addImm(NewOffset1) // offset1
469 .addImm(0) // gds
470 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
Matt Arsenault41033282014-10-10 22:01:59 +0000471
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000472 moveInstsAfter(Write2, CI.InstsToMove);
473
474 MachineBasicBlock::iterator Next = std::next(CI.I);
475 CI.I->eraseFromParent();
476 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000477
Matt Arsenault41033282014-10-10 22:01:59 +0000478 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000479 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000480}
481
482// Scan through looking for adjacent LDS operations with constant offsets from
483// the same base register. We rely on the scheduler to do the hard work of
484// clustering nearby loads, and assume these are all adjacent.
485bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000486 bool Modified = false;
487
488 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
489 MachineInstr &MI = *I;
490
491 // Don't combine if volatile.
492 if (MI.hasOrderedMemoryRef()) {
493 ++I;
494 continue;
495 }
496
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000497 CombineInfo CI;
498 CI.I = I;
Matt Arsenault41033282014-10-10 22:01:59 +0000499 unsigned Opc = MI.getOpcode();
500 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000501 CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
502 if (findMatchingDSInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000503 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000504 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000505 } else {
506 ++I;
507 }
508
509 continue;
510 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000511 CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
512 if (findMatchingDSInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000513 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000514 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000515 } else {
516 ++I;
517 }
518
519 continue;
520 }
521
522 ++I;
523 }
524
525 return Modified;
526}
527
528bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000529 if (skipFunction(*MF.getFunction()))
530 return false;
531
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000532 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000533 if (!STM.loadStoreOptEnabled())
534 return false;
535
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000536 TII = STM.getInstrInfo();
537 TRI = &TII->getRegisterInfo();
538
Matt Arsenault41033282014-10-10 22:01:59 +0000539 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000540 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +0000541
542 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
543
Matt Arsenault41033282014-10-10 22:01:59 +0000544 bool Modified = false;
545
546 for (MachineBasicBlock &MBB : MF)
547 Modified |= optimizeBlock(MBB);
548
549 return Modified;
550}