blob: a78b62de7151d624f7fe962b4347f0e74d61d629 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Matt Arsenault41033282014-10-10 22:01:59 +00006//
7//===----------------------------------------------------------------------===//
8//
9// This pass tries to fuse DS instructions with close by immediate offsets.
10// This will fuse operations such as
11// ds_read_b32 v0, v2 offset:16
12// ds_read_b32 v1, v2 offset:32
13// ==>
14// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
15//
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +000016// The same is done for certain SMEM and VMEM opcodes, e.g.:
Marek Olsakb953cc32017-11-09 01:52:23 +000017// s_buffer_load_dword s4, s[0:3], 4
18// s_buffer_load_dword s5, s[0:3], 8
19// ==>
20// s_buffer_load_dwordx2 s[4:5], s[0:3], 4
21//
Farhana Aleence095c52018-12-14 21:13:14 +000022// This pass also tries to promote constant offset to the immediate by
23// adjusting the base. It tries to use a base from the nearby instructions that
24// allows it to have a 13bit constant offset and then promotes the 13bit offset
25// to the immediate.
26// E.g.
27// s_movk_i32 s0, 0x1800
28// v_add_co_u32_e32 v0, vcc, s0, v2
29// v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
30//
31// s_movk_i32 s0, 0x1000
32// v_add_co_u32_e32 v5, vcc, s0, v2
33// v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34// global_load_dwordx2 v[5:6], v[5:6], off
35// global_load_dwordx2 v[0:1], v[0:1], off
36// =>
37// s_movk_i32 s0, 0x1000
38// v_add_co_u32_e32 v5, vcc, s0, v2
39// v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40// global_load_dwordx2 v[5:6], v[5:6], off
41// global_load_dwordx2 v[0:1], v[5:6], off offset:2048
Matt Arsenault41033282014-10-10 22:01:59 +000042//
43// Future improvements:
44//
45// - This currently relies on the scheduler to place loads and stores next to
46// each other, and then only merges adjacent pairs of instructions. It would
47// be good to be more flexible with interleaved instructions, and possibly run
48// before scheduling. It currently missing stores of constants because loading
49// the constant into the data register is placed between the stores, although
50// this is arguably a scheduling problem.
51//
52// - Live interval recomputing seems inefficient. This currently only matches
53// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000054// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000055//
56// - With a list of instructions to process, we can also merge more. If a
57// cluster of loads have offsets that are too large to fit in the 8-bit
58// offsets, but are close enough to fit in the 8 bits, we can add to the base
59// pointer and use the new reduced offsets.
60//
61//===----------------------------------------------------------------------===//
62
63#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000064#include "AMDGPUSubtarget.h"
Neil Henning76504a42018-12-12 16:15:21 +000065#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenault41033282014-10-10 22:01:59 +000066#include "SIInstrInfo.h"
67#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000068#include "Utils/AMDGPUBaseInfo.h"
69#include "llvm/ADT/ArrayRef.h"
70#include "llvm/ADT/SmallVector.h"
71#include "llvm/ADT/StringRef.h"
72#include "llvm/Analysis/AliasAnalysis.h"
73#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000074#include "llvm/CodeGen/MachineFunction.h"
75#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000076#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000077#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000078#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000079#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000080#include "llvm/IR/DebugLoc.h"
81#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000082#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000083#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000084#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000085#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000086#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000087#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000088#include <iterator>
89#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000090
91using namespace llvm;
92
93#define DEBUG_TYPE "si-load-store-opt"
94
95namespace {
Neil Henning76504a42018-12-12 16:15:21 +000096enum InstClassEnum {
97 UNKNOWN,
98 DS_READ,
99 DS_WRITE,
100 S_BUFFER_LOAD_IMM,
101 BUFFER_LOAD_OFFEN = AMDGPU::BUFFER_LOAD_DWORD_OFFEN,
102 BUFFER_LOAD_OFFSET = AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
103 BUFFER_STORE_OFFEN = AMDGPU::BUFFER_STORE_DWORD_OFFEN,
104 BUFFER_STORE_OFFSET = AMDGPU::BUFFER_STORE_DWORD_OFFSET,
105 BUFFER_LOAD_OFFEN_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact,
106 BUFFER_LOAD_OFFSET_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact,
107 BUFFER_STORE_OFFEN_exact = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact,
108 BUFFER_STORE_OFFSET_exact = AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact,
109};
110
111enum RegisterEnum {
112 SBASE = 0x1,
113 SRSRC = 0x2,
114 SOFFSET = 0x4,
115 VADDR = 0x8,
116 ADDR = 0x10,
117};
Matt Arsenault41033282014-10-10 22:01:59 +0000118
119class SILoadStoreOptimizer : public MachineFunctionPass {
NAKAMURA Takumiaba2b3d2017-10-10 08:30:53 +0000120 struct CombineInfo {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000121 MachineBasicBlock::iterator I;
122 MachineBasicBlock::iterator Paired;
123 unsigned EltSize;
124 unsigned Offset0;
125 unsigned Offset1;
Neil Henning76504a42018-12-12 16:15:21 +0000126 unsigned Width0;
127 unsigned Width1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000128 unsigned BaseOff;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000129 InstClassEnum InstClass;
Marek Olsakb953cc32017-11-09 01:52:23 +0000130 bool GLC0;
131 bool GLC1;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000132 bool SLC0;
133 bool SLC1;
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000134 bool DLC0;
135 bool DLC1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000136 bool UseST64;
Neil Henning76504a42018-12-12 16:15:21 +0000137 SmallVector<MachineInstr *, 8> InstsToMove;
Tom Stellard004c7912019-10-01 17:56:59 +0000138 int AddrIdx[5];
139 const MachineOperand *AddrReg[5];
140 unsigned NumAddresses;
141
142 bool hasSameBaseAddress(const MachineInstr &MI) {
143 for (unsigned i = 0; i < NumAddresses; i++) {
144 const MachineOperand &AddrRegNext = MI.getOperand(AddrIdx[i]);
145
146 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
147 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
148 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
149 return false;
150 }
151 continue;
152 }
153
154 // Check same base pointer. Be careful of subregisters, which can occur
155 // with vectors of pointers.
156 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
157 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
158 return false;
159 }
160 }
161 return true;
162 }
163
164 void setMI(MachineBasicBlock::iterator MI, const SIInstrInfo &TII,
165 const GCNSubtarget &STM);
166 void setPaired(MachineBasicBlock::iterator MI, const SIInstrInfo &TII);
Neil Henning76504a42018-12-12 16:15:21 +0000167 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000168
Farhana Aleence095c52018-12-14 21:13:14 +0000169 struct BaseRegisters {
170 unsigned LoReg = 0;
171 unsigned HiReg = 0;
172
173 unsigned LoSubReg = 0;
174 unsigned HiSubReg = 0;
175 };
176
177 struct MemAddress {
178 BaseRegisters Base;
179 int64_t Offset = 0;
180 };
181
182 using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
183
Matt Arsenault41033282014-10-10 22:01:59 +0000184private:
Tom Stellard5bfbae52018-07-11 20:59:01 +0000185 const GCNSubtarget *STM = nullptr;
Eugene Zelenko66203762017-01-21 00:53:49 +0000186 const SIInstrInfo *TII = nullptr;
187 const SIRegisterInfo *TRI = nullptr;
188 MachineRegisterInfo *MRI = nullptr;
189 AliasAnalysis *AA = nullptr;
Neil Henning76504a42018-12-12 16:15:21 +0000190 bool OptimizeAgain;
Matt Arsenault41033282014-10-10 22:01:59 +0000191
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000192 static bool offsetsCanBeCombined(CombineInfo &CI);
Neil Henninge85d45a2019-01-10 16:21:08 +0000193 static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
Neil Henning76504a42018-12-12 16:15:21 +0000194 static unsigned getNewOpcode(const CombineInfo &CI);
195 static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
196 const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000197
Marek Olsakb953cc32017-11-09 01:52:23 +0000198 bool findMatchingInst(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000199
200 unsigned read2Opcode(unsigned EltSize) const;
201 unsigned read2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000202 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000203
204 unsigned write2Opcode(unsigned EltSize) const;
205 unsigned write2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000206 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000207 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000208 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
Marek Olsak58410f32017-11-09 01:52:55 +0000209 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000210
Farhana Aleence095c52018-12-14 21:13:14 +0000211 void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
Tom Stellard9f4c7572019-09-19 04:39:45 +0000212 int32_t NewOffset) const;
213 unsigned computeBase(MachineInstr &MI, const MemAddress &Addr) const;
214 MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
215 Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
216 void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
Farhana Aleence095c52018-12-14 21:13:14 +0000217 /// Promotes constant offset to the immediate by adjusting the base. It
218 /// tries to use a base from the nearby instructions that allows it to have
219 /// a 13bit constant offset which gets promoted to the immediate.
220 bool promoteConstantOffsetToImm(MachineInstr &CI,
221 MemInfoMap &Visited,
Tom Stellard9f4c7572019-09-19 04:39:45 +0000222 SmallPtrSet<MachineInstr *, 4> &Promoted) const;
Farhana Aleence095c52018-12-14 21:13:14 +0000223
Matt Arsenault41033282014-10-10 22:01:59 +0000224public:
225 static char ID;
226
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000227 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000228 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
229 }
230
231 bool optimizeBlock(MachineBasicBlock &MBB);
232
233 bool runOnMachineFunction(MachineFunction &MF) override;
234
Mark Searles7687d422018-01-22 21:46:43 +0000235 StringRef getPassName() const override { return "SI Load Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000236
237 void getAnalysisUsage(AnalysisUsage &AU) const override {
238 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000239 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000240
241 MachineFunctionPass::getAnalysisUsage(AU);
242 }
243};
244
Tom Stellard004c7912019-10-01 17:56:59 +0000245static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
246 const unsigned Opc = MI.getOpcode();
247
248 if (TII.isMUBUF(Opc)) {
249 // FIXME: Handle d16 correctly
250 return AMDGPU::getMUBUFElements(Opc);
251 }
252
253 switch (Opc) {
254 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
255 return 1;
256 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
257 return 2;
258 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
259 return 4;
260 default:
261 return 0;
262 }
263}
264
265static InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) {
266 if (TII.isMUBUF(Opc)) {
267 const int baseOpcode = AMDGPU::getMUBUFBaseOpcode(Opc);
268
269 // If we couldn't identify the opcode, bail out.
270 if (baseOpcode == -1) {
271 return UNKNOWN;
272 }
273
274 switch (baseOpcode) {
275 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
276 return BUFFER_LOAD_OFFEN;
277 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
278 return BUFFER_LOAD_OFFSET;
279 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
280 return BUFFER_STORE_OFFEN;
281 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
282 return BUFFER_STORE_OFFSET;
283 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
284 return BUFFER_LOAD_OFFEN_exact;
285 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
286 return BUFFER_LOAD_OFFSET_exact;
287 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
288 return BUFFER_STORE_OFFEN_exact;
289 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
290 return BUFFER_STORE_OFFSET_exact;
291 default:
292 return UNKNOWN;
293 }
294 }
295
296 switch (Opc) {
297 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
298 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
299 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
300 return S_BUFFER_LOAD_IMM;
301 case AMDGPU::DS_READ_B32:
302 case AMDGPU::DS_READ_B64:
303 case AMDGPU::DS_READ_B32_gfx9:
304 case AMDGPU::DS_READ_B64_gfx9:
305 return DS_READ;
306 case AMDGPU::DS_WRITE_B32:
307 case AMDGPU::DS_WRITE_B64:
308 case AMDGPU::DS_WRITE_B32_gfx9:
309 case AMDGPU::DS_WRITE_B64_gfx9:
310 return DS_WRITE;
311 default:
312 return UNKNOWN;
313 }
314}
315
316static unsigned getRegs(unsigned Opc, const SIInstrInfo &TII) {
317 if (TII.isMUBUF(Opc)) {
318 unsigned result = 0;
319
320 if (AMDGPU::getMUBUFHasVAddr(Opc)) {
321 result |= VADDR;
322 }
323
324 if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
325 result |= SRSRC;
326 }
327
328 if (AMDGPU::getMUBUFHasSoffset(Opc)) {
329 result |= SOFFSET;
330 }
331
332 return result;
333 }
334
335 switch (Opc) {
336 default:
337 return 0;
338 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
339 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
340 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
341 return SBASE;
342 case AMDGPU::DS_READ_B32:
343 case AMDGPU::DS_READ_B64:
344 case AMDGPU::DS_READ_B32_gfx9:
345 case AMDGPU::DS_READ_B64_gfx9:
346 case AMDGPU::DS_WRITE_B32:
347 case AMDGPU::DS_WRITE_B64:
348 case AMDGPU::DS_WRITE_B32_gfx9:
349 case AMDGPU::DS_WRITE_B64_gfx9:
350 return ADDR;
351 }
352}
353
354
355void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
356 const SIInstrInfo &TII,
357 const GCNSubtarget &STM) {
358 I = MI;
359 unsigned Opc = MI->getOpcode();
360 InstClass = getInstClass(Opc, TII);
361
362 if (InstClass == UNKNOWN)
363 return;
364
365 switch (InstClass) {
366 case DS_READ:
367 EltSize =
368 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
369 : 4;
370 break;
371 case DS_WRITE:
372 EltSize =
373 (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
374 : 4;
375 break;
376 case S_BUFFER_LOAD_IMM:
377 EltSize = AMDGPU::getSMRDEncodedOffset(STM, 4);
378 break;
379 default:
380 EltSize = 4;
381 break;
382 }
383
384 int OffsetIdx =
385 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::offset);
386 Offset0 = I->getOperand(OffsetIdx).getImm();
387 Width0 = getOpcodeWidth(*I, TII);
388
389 if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) {
390 Offset0 &= 0xffff;
391 } else {
392 GLC0 = TII.getNamedOperand(*I, AMDGPU::OpName::glc)->getImm();
393 if (InstClass != S_BUFFER_LOAD_IMM) {
394 SLC0 = TII.getNamedOperand(*I, AMDGPU::OpName::slc)->getImm();
395 }
396 DLC0 = TII.getNamedOperand(*I, AMDGPU::OpName::dlc)->getImm();
397 }
398
399 unsigned AddrOpName[5] = {0};
400 NumAddresses = 0;
401 const unsigned Regs = getRegs(I->getOpcode(), TII);
402
403 if (Regs & ADDR) {
404 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
405 }
406
407 if (Regs & SBASE) {
408 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
409 }
410
411 if (Regs & SRSRC) {
412 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
413 }
414
415 if (Regs & SOFFSET) {
416 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
417 }
418
419 if (Regs & VADDR) {
420 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
421 }
422
423 for (unsigned i = 0; i < NumAddresses; i++) {
424 AddrIdx[i] = AMDGPU::getNamedOperandIdx(I->getOpcode(), AddrOpName[i]);
425 AddrReg[i] = &I->getOperand(AddrIdx[i]);
426 }
427}
428
429void SILoadStoreOptimizer::CombineInfo::setPaired(MachineBasicBlock::iterator MI,
430 const SIInstrInfo &TII) {
431 Paired = MI;
432 assert(InstClass == getInstClass(Paired->getOpcode(), TII));
433 int OffsetIdx =
434 AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::offset);
435 Offset1 = Paired->getOperand(OffsetIdx).getImm();
436 Width1 = getOpcodeWidth(*Paired, TII);
437 if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) {
438 Offset1 &= 0xffff;
439 } else {
440 GLC1 = TII.getNamedOperand(*Paired, AMDGPU::OpName::glc)->getImm();
441 if (InstClass != S_BUFFER_LOAD_IMM) {
442 SLC1 = TII.getNamedOperand(*Paired, AMDGPU::OpName::slc)->getImm();
443 }
444 DLC1 = TII.getNamedOperand(*Paired, AMDGPU::OpName::dlc)->getImm();
445 }
446}
447
448
Eugene Zelenko66203762017-01-21 00:53:49 +0000449} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000450
451INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
Mark Searles7687d422018-01-22 21:46:43 +0000452 "SI Load Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000453INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Neil Henning76504a42018-12-12 16:15:21 +0000454INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
455 false, false)
Matt Arsenault41033282014-10-10 22:01:59 +0000456
457char SILoadStoreOptimizer::ID = 0;
458
459char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
460
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000461FunctionPass *llvm::createSILoadStoreOptimizerPass() {
462 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000463}
464
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000465static void moveInstsAfter(MachineBasicBlock::iterator I,
Neil Henning76504a42018-12-12 16:15:21 +0000466 ArrayRef<MachineInstr *> InstsToMove) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000467 MachineBasicBlock *MBB = I->getParent();
468 ++I;
469 for (MachineInstr *MI : InstsToMove) {
470 MI->removeFromParent();
471 MBB->insert(I, MI);
472 }
473}
474
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000475static void addDefsUsesToList(const MachineInstr &MI,
476 DenseSet<unsigned> &RegDefs,
477 DenseSet<unsigned> &PhysRegUses) {
478 for (const MachineOperand &Op : MI.operands()) {
479 if (Op.isReg()) {
480 if (Op.isDef())
481 RegDefs.insert(Op.getReg());
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000482 else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000483 PhysRegUses.insert(Op.getReg());
484 }
Matt Arsenaultb02cebf2018-02-08 01:56:14 +0000485 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000486}
487
Eugene Zelenko66203762017-01-21 00:53:49 +0000488static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
489 MachineBasicBlock::iterator B,
Neil Henning76504a42018-12-12 16:15:21 +0000490 AliasAnalysis *AA) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000491 // RAW or WAR - cannot reorder
492 // WAW - cannot reorder
493 // RAR - safe to reorder
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000494 return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
Alexander Timofeevf867a402016-11-03 14:37:13 +0000495}
496
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000497// Add MI and its defs to the lists if MI reads one of the defs that are
498// already in the list. Returns true in that case.
Neil Henning76504a42018-12-12 16:15:21 +0000499static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
500 DenseSet<unsigned> &PhysRegUses,
501 SmallVectorImpl<MachineInstr *> &Insts) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000502 for (MachineOperand &Use : MI.operands()) {
503 // If one of the defs is read, then there is a use of Def between I and the
504 // instruction that I will potentially be merged with. We will need to move
505 // this instruction after the merged instructions.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000506 //
507 // Similarly, if there is a def which is read by an instruction that is to
508 // be moved for merging, then we need to move the def-instruction as well.
509 // This can only happen for physical registers such as M0; virtual
510 // registers are in SSA form.
511 if (Use.isReg() &&
512 ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
Rhys Perryc4bc61b2019-05-17 09:32:23 +0000513 (Use.isDef() && RegDefs.count(Use.getReg())) ||
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000514 (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000515 PhysRegUses.count(Use.getReg())))) {
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000516 Insts.push_back(&MI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000517 addDefsUsesToList(MI, RegDefs, PhysRegUses);
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000518 return true;
519 }
520 }
521
522 return false;
523}
524
Neil Henning76504a42018-12-12 16:15:21 +0000525static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
526 ArrayRef<MachineInstr *> InstsToMove,
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000527 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000528 assert(MemOp.mayLoadOrStore());
529
530 for (MachineInstr *InstToMove : InstsToMove) {
531 if (!InstToMove->mayLoadOrStore())
532 continue;
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000533 if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
Neil Henning76504a42018-12-12 16:15:21 +0000534 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000535 }
536 return true;
537}
538
Tom Stellardcc0bc942019-07-29 16:40:58 +0000539// This function assumes that \p A and \p B have are identical except for
540// size and offset, and they referecne adjacent memory.
541static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF,
542 const MachineMemOperand *A,
543 const MachineMemOperand *B) {
544 unsigned MinOffset = std::min(A->getOffset(), B->getOffset());
545 unsigned Size = A->getSize() + B->getSize();
Tom Stellarde15d95a2019-08-05 16:08:44 +0000546 // This function adds the offset parameter to the existing offset for A,
547 // so we pass 0 here as the offset and then manually set it to the correct
548 // value after the call.
549 MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size);
550 MMO->setOffset(MinOffset);
551 return MMO;
Tom Stellardcc0bc942019-07-29 16:40:58 +0000552}
553
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000554bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000555 // XXX - Would the same offset be OK? Is there any reason this would happen or
556 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000557 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000558 return false;
559
560 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000561 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000562 return false;
563
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000564 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
565 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
566 CI.UseST64 = false;
567 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000568
Marek Olsak58410f32017-11-09 01:52:55 +0000569 // Handle SMEM and VMEM instructions.
Neil Henning76504a42018-12-12 16:15:21 +0000570 if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
571 return (EltOffset0 + CI.Width0 == EltOffset1 ||
572 EltOffset1 + CI.Width1 == EltOffset0) &&
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000573 CI.GLC0 == CI.GLC1 && CI.DLC0 == CI.DLC1 &&
Marek Olsak6a0548a2017-11-09 01:52:30 +0000574 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
Marek Olsakb953cc32017-11-09 01:52:23 +0000575 }
576
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000577 // If the offset in elements doesn't fit in 8-bits, we might be able to use
578 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000579 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
580 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
581 CI.Offset0 = EltOffset0 / 64;
582 CI.Offset1 = EltOffset1 / 64;
583 CI.UseST64 = true;
584 return true;
585 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000586
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000587 // Check if the new offsets fit in the reduced 8-bit range.
588 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
589 CI.Offset0 = EltOffset0;
590 CI.Offset1 = EltOffset1;
591 return true;
592 }
593
594 // Try to shift base address to decrease offsets.
595 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
596 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
597
598 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
599 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
600 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
601 CI.UseST64 = true;
602 return true;
603 }
604
605 if (isUInt<8>(OffsetDiff)) {
606 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
607 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
608 return true;
609 }
610
611 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000612}
613
Neil Henninge85d45a2019-01-10 16:21:08 +0000614bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
615 const CombineInfo &CI) {
Neil Henning76504a42018-12-12 16:15:21 +0000616 const unsigned Width = (CI.Width0 + CI.Width1);
617 switch (CI.InstClass) {
618 default:
Neil Henninge85d45a2019-01-10 16:21:08 +0000619 return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
Neil Henning76504a42018-12-12 16:15:21 +0000620 case S_BUFFER_LOAD_IMM:
621 switch (Width) {
622 default:
623 return false;
624 case 2:
625 case 4:
626 return true;
627 }
628 }
629}
630
Marek Olsakb953cc32017-11-09 01:52:23 +0000631bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000632 MachineBasicBlock *MBB = CI.I->getParent();
633 MachineBasicBlock::iterator E = MBB->end();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000634 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault3cb61632017-08-30 03:26:18 +0000635
Neil Henning76504a42018-12-12 16:15:21 +0000636 const unsigned Opc = CI.I->getOpcode();
Tom Stellard004c7912019-10-01 17:56:59 +0000637 const InstClassEnum InstClass = getInstClass(Opc, *TII);
Neil Henning76504a42018-12-12 16:15:21 +0000638
639 if (InstClass == UNKNOWN) {
640 return false;
641 }
642
Piotr Sobczak265e94e2019-10-02 17:22:36 +0000643 // Do not merge VMEM buffer instructions with "swizzled" bit set.
644 int Swizzled =
645 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::swz);
646 if (Swizzled != -1 && CI.I->getOperand(Swizzled).getImm())
647 return false;
648
Tom Stellard004c7912019-10-01 17:56:59 +0000649 for (unsigned i = 0; i < CI.NumAddresses; i++) {
Neil Henning76504a42018-12-12 16:15:21 +0000650 // We only ever merge operations with the same base address register, so
651 // don't bother scanning forward if there are no other uses.
Tom Stellard004c7912019-10-01 17:56:59 +0000652 if (CI.AddrReg[i]->isReg() &&
653 (Register::isPhysicalRegister(CI.AddrReg[i]->getReg()) ||
654 MRI->hasOneNonDBGUse(CI.AddrReg[i]->getReg())))
Marek Olsak6a0548a2017-11-09 01:52:30 +0000655 return false;
656 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000657
Matt Arsenault41033282014-10-10 22:01:59 +0000658 ++MBBI;
659
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000660 DenseSet<unsigned> RegDefsToMove;
661 DenseSet<unsigned> PhysRegUsesToMove;
662 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000663
Neil Henning76504a42018-12-12 16:15:21 +0000664 for (; MBBI != E; ++MBBI) {
665 const bool IsDS = (InstClass == DS_READ) || (InstClass == DS_WRITE);
666
Tom Stellard004c7912019-10-01 17:56:59 +0000667 if ((getInstClass(MBBI->getOpcode(), *TII) != InstClass) ||
Neil Henning76504a42018-12-12 16:15:21 +0000668 (IsDS && (MBBI->getOpcode() != Opc))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000669 // This is not a matching DS instruction, but we can keep looking as
670 // long as one of these conditions are met:
671 // 1. It is safe to move I down past MBBI.
672 // 2. It is safe to move MBBI down past the instruction that I will
673 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000674
Matt Arsenault2d69c922017-08-29 21:25:51 +0000675 if (MBBI->hasUnmodeledSideEffects()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000676 // We can't re-order this instruction with respect to other memory
Matt Arsenault2d69c922017-08-29 21:25:51 +0000677 // operations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000678 return false;
Matt Arsenault2d69c922017-08-29 21:25:51 +0000679 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000680
681 if (MBBI->mayLoadOrStore() &&
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000682 (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
683 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000684 // We fail condition #1, but we may still be able to satisfy condition
685 // #2. Add this instruction to the move list and then we will check
686 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000687 CI.InstsToMove.push_back(&*MBBI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000688 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000689 continue;
690 }
691
692 // When we match I with another DS instruction we will be moving I down
693 // to the location of the matched instruction any uses of I will need to
694 // be moved down as well.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000695 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
696 CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000697 continue;
698 }
699
700 // Don't merge volatiles.
701 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000702 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000703
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000704 // Handle a case like
705 // DS_WRITE_B32 addr, v, idx0
706 // w = DS_READ_B32 addr, idx0
707 // DS_WRITE_B32 addr, f(w), idx1
708 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
709 // merging of the two writes.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000710 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
711 CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000712 continue;
713
Tom Stellard004c7912019-10-01 17:56:59 +0000714 bool Match = CI.hasSameBaseAddress(*MBBI);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000715
716 if (Match) {
Tom Stellard004c7912019-10-01 17:56:59 +0000717 CI.setPaired(MBBI, *TII);
Marek Olsakb953cc32017-11-09 01:52:23 +0000718
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000719 // Check both offsets fit in the reduced range.
720 // We also need to go through the list of instructions that we plan to
721 // move and make sure they are all safe to move down past the merged
722 // instruction.
Neil Henninge85d45a2019-01-10 16:21:08 +0000723 if (widthsFit(*STM, CI) && offsetsCanBeCombined(CI))
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000724 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000725 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000726 }
727
728 // We've found a load/store that we couldn't merge for some reason.
729 // We could potentially keep looking, but we'd need to make sure that
730 // it was safe to move I and also all the instruction in InstsToMove
731 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000732 // check if we can move I across MBBI and if we can move all I's users
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000733 if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
734 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000735 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000736 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000737 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000738}
739
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000740unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
741 if (STM->ldsRequiresM0Init())
742 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
743 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
744}
745
746unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
747 if (STM->ldsRequiresM0Init())
748 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
749
Neil Henning76504a42018-12-12 16:15:21 +0000750 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
751 : AMDGPU::DS_READ2ST64_B64_gfx9;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000752}
753
Neil Henning76504a42018-12-12 16:15:21 +0000754MachineBasicBlock::iterator
755SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000756 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000757
758 // Be careful, since the addresses could be subregisters themselves in weird
759 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000760 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000761
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000762 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
763 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000764
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000765 unsigned NewOffset0 = CI.Offset0;
766 unsigned NewOffset1 = CI.Offset1;
Neil Henning76504a42018-12-12 16:15:21 +0000767 unsigned Opc =
768 CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000769
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000770 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
771 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000772
773 if (NewOffset0 > NewOffset1) {
774 // Canonicalize the merged instruction so the smaller offset comes first.
775 std::swap(NewOffset0, NewOffset1);
776 std::swap(SubRegIdx0, SubRegIdx1);
777 }
778
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000779 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
Neil Henning76504a42018-12-12 16:15:21 +0000780 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000781
782 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000783
Neil Henning76504a42018-12-12 16:15:21 +0000784 const TargetRegisterClass *SuperRC =
785 (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Daniel Sanders0c476112019-08-15 19:22:08 +0000786 Register DestReg = MRI->createVirtualRegister(SuperRC);
Matt Arsenault41033282014-10-10 22:01:59 +0000787
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000788 DebugLoc DL = CI.I->getDebugLoc();
789
Daniel Sanders0c476112019-08-15 19:22:08 +0000790 Register BaseReg = AddrReg->getReg();
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000791 unsigned BaseSubReg = AddrReg->getSubReg();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000792 unsigned BaseRegFlags = 0;
793 if (CI.BaseOff) {
Daniel Sanders0c476112019-08-15 19:22:08 +0000794 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Mark Searles7687d422018-01-22 21:46:43 +0000795 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
Neil Henning76504a42018-12-12 16:15:21 +0000796 .addImm(CI.BaseOff);
Mark Searles7687d422018-01-22 21:46:43 +0000797
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000798 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
799 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000800
Mark Searles7687d422018-01-22 21:46:43 +0000801 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
Neil Henning76504a42018-12-12 16:15:21 +0000802 .addReg(ImmReg)
Tim Renoufcfdfba92019-03-18 19:35:44 +0000803 .addReg(AddrReg->getReg(), 0, BaseSubReg)
804 .addImm(0); // clamp bit
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000805 BaseSubReg = 0;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000806 }
807
Neil Henning76504a42018-12-12 16:15:21 +0000808 MachineInstrBuilder Read2 =
809 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
810 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
811 .addImm(NewOffset0) // offset0
812 .addImm(NewOffset1) // offset1
813 .addImm(0) // gds
814 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000815
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000816 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000817
Matt Arsenault84db5d92015-07-14 17:57:36 +0000818 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
819
820 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000821 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000822 .add(*Dest0) // Copy to same destination including flags and sub reg.
823 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000824 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000825 .add(*Dest1)
826 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000827
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000828 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000829
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000830 MachineBasicBlock::iterator Next = std::next(CI.I);
831 CI.I->eraseFromParent();
832 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000833
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000834 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000835 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000836}
837
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000838unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
839 if (STM->ldsRequiresM0Init())
840 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
Neil Henning76504a42018-12-12 16:15:21 +0000841 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
842 : AMDGPU::DS_WRITE2_B64_gfx9;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000843}
844
845unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
846 if (STM->ldsRequiresM0Init())
Neil Henning76504a42018-12-12 16:15:21 +0000847 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
848 : AMDGPU::DS_WRITE2ST64_B64;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000849
Neil Henning76504a42018-12-12 16:15:21 +0000850 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
851 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000852}
853
Neil Henning76504a42018-12-12 16:15:21 +0000854MachineBasicBlock::iterator
855SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000856 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000857
858 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
859 // sure we preserve the subregister index and any register flags set on them.
Neil Henning76504a42018-12-12 16:15:21 +0000860 const MachineOperand *AddrReg =
861 TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
862 const MachineOperand *Data0 =
863 TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
864 const MachineOperand *Data1 =
865 TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000866
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000867 unsigned NewOffset0 = CI.Offset0;
868 unsigned NewOffset1 = CI.Offset1;
Neil Henning76504a42018-12-12 16:15:21 +0000869 unsigned Opc =
870 CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000871
Tom Stellarde175d8a2016-08-26 21:36:47 +0000872 if (NewOffset0 > NewOffset1) {
873 // Canonicalize the merged instruction so the smaller offset comes first.
874 std::swap(NewOffset0, NewOffset1);
875 std::swap(Data0, Data1);
876 }
877
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000878 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
Neil Henning76504a42018-12-12 16:15:21 +0000879 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000880
881 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000882 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000883
Daniel Sanders0c476112019-08-15 19:22:08 +0000884 Register BaseReg = AddrReg->getReg();
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000885 unsigned BaseSubReg = AddrReg->getSubReg();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000886 unsigned BaseRegFlags = 0;
887 if (CI.BaseOff) {
Daniel Sanders0c476112019-08-15 19:22:08 +0000888 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Mark Searles7687d422018-01-22 21:46:43 +0000889 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
Neil Henning76504a42018-12-12 16:15:21 +0000890 .addImm(CI.BaseOff);
Mark Searles7687d422018-01-22 21:46:43 +0000891
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000892 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
893 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000894
Mark Searles7687d422018-01-22 21:46:43 +0000895 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
Neil Henning76504a42018-12-12 16:15:21 +0000896 .addReg(ImmReg)
Tim Renoufcfdfba92019-03-18 19:35:44 +0000897 .addReg(AddrReg->getReg(), 0, BaseSubReg)
898 .addImm(0); // clamp bit
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000899 BaseSubReg = 0;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000900 }
Matt Arsenault41033282014-10-10 22:01:59 +0000901
Neil Henning76504a42018-12-12 16:15:21 +0000902 MachineInstrBuilder Write2 =
903 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
904 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
905 .add(*Data0) // data0
906 .add(*Data1) // data1
907 .addImm(NewOffset0) // offset0
908 .addImm(NewOffset1) // offset1
909 .addImm(0) // gds
910 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
Matt Arsenault41033282014-10-10 22:01:59 +0000911
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000912 moveInstsAfter(Write2, CI.InstsToMove);
913
914 MachineBasicBlock::iterator Next = std::next(CI.I);
915 CI.I->eraseFromParent();
916 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000917
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000918 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000919 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000920}
921
Neil Henning76504a42018-12-12 16:15:21 +0000922MachineBasicBlock::iterator
923SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000924 MachineBasicBlock *MBB = CI.I->getParent();
925 DebugLoc DL = CI.I->getDebugLoc();
Neil Henning76504a42018-12-12 16:15:21 +0000926 const unsigned Opcode = getNewOpcode(CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000927
Neil Henning76504a42018-12-12 16:15:21 +0000928 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
929
Daniel Sanders0c476112019-08-15 19:22:08 +0000930 Register DestReg = MRI->createVirtualRegister(SuperRC);
Marek Olsakb953cc32017-11-09 01:52:23 +0000931 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
932
Tom Stellardcc0bc942019-07-29 16:40:58 +0000933 // It shouldn't be possible to get this far if the two instructions
934 // don't have a single memoperand, because MachineInstr::mayAlias()
935 // will return true if this is the case.
936 assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
937
938 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
939 const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
940
Marek Olsakb953cc32017-11-09 01:52:23 +0000941 BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
942 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
943 .addImm(MergedOffset) // offset
944 .addImm(CI.GLC0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000945 .addImm(CI.DLC0) // dlc
Tom Stellardcc0bc942019-07-29 16:40:58 +0000946 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
Marek Olsakb953cc32017-11-09 01:52:23 +0000947
Neil Henning76504a42018-12-12 16:15:21 +0000948 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
949 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
950 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
Marek Olsakb953cc32017-11-09 01:52:23 +0000951
952 // Copy to the old destination registers.
953 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
954 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
955 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
956
957 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
958 .add(*Dest0) // Copy to same destination including flags and sub reg.
959 .addReg(DestReg, 0, SubRegIdx0);
960 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
961 .add(*Dest1)
962 .addReg(DestReg, RegState::Kill, SubRegIdx1);
963
964 moveInstsAfter(Copy1, CI.InstsToMove);
965
966 MachineBasicBlock::iterator Next = std::next(CI.I);
967 CI.I->eraseFromParent();
968 CI.Paired->eraseFromParent();
969 return Next;
970}
971
Neil Henning76504a42018-12-12 16:15:21 +0000972MachineBasicBlock::iterator
973SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000974 MachineBasicBlock *MBB = CI.I->getParent();
975 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000976
Neil Henning76504a42018-12-12 16:15:21 +0000977 const unsigned Opcode = getNewOpcode(CI);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000978
Neil Henning76504a42018-12-12 16:15:21 +0000979 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
980
981 // Copy to the new source register.
Daniel Sanders0c476112019-08-15 19:22:08 +0000982 Register DestReg = MRI->createVirtualRegister(SuperRC);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000983 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
984
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000985 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
986
Tom Stellard004c7912019-10-01 17:56:59 +0000987 const unsigned Regs = getRegs(Opcode, *TII);
Neil Henning76504a42018-12-12 16:15:21 +0000988
989 if (Regs & VADDR)
990 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000991
Tom Stellardcc0bc942019-07-29 16:40:58 +0000992 // It shouldn't be possible to get this far if the two instructions
993 // don't have a single memoperand, because MachineInstr::mayAlias()
994 // will return true if this is the case.
995 assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
996
997 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
998 const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
999
Marek Olsak4c421a2d2017-11-09 01:52:36 +00001000 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
Marek Olsak6a0548a2017-11-09 01:52:30 +00001001 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1002 .addImm(MergedOffset) // offset
1003 .addImm(CI.GLC0) // glc
1004 .addImm(CI.SLC0) // slc
1005 .addImm(0) // tfe
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001006 .addImm(CI.DLC0) // dlc
Piotr Sobczak265e94e2019-10-02 17:22:36 +00001007 .addImm(0) // swz
Tom Stellardcc0bc942019-07-29 16:40:58 +00001008 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
Marek Olsak6a0548a2017-11-09 01:52:30 +00001009
Neil Henning76504a42018-12-12 16:15:21 +00001010 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1011 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1012 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
Marek Olsak6a0548a2017-11-09 01:52:30 +00001013
1014 // Copy to the old destination registers.
1015 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1016 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1017 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1018
1019 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
1020 .add(*Dest0) // Copy to same destination including flags and sub reg.
1021 .addReg(DestReg, 0, SubRegIdx0);
1022 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
1023 .add(*Dest1)
1024 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1025
1026 moveInstsAfter(Copy1, CI.InstsToMove);
1027
1028 MachineBasicBlock::iterator Next = std::next(CI.I);
1029 CI.I->eraseFromParent();
1030 CI.Paired->eraseFromParent();
1031 return Next;
1032}
1033
Neil Henning76504a42018-12-12 16:15:21 +00001034unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
1035 const unsigned Width = CI.Width0 + CI.Width1;
Marek Olsak58410f32017-11-09 01:52:55 +00001036
Neil Henning76504a42018-12-12 16:15:21 +00001037 switch (CI.InstClass) {
1038 default:
Matt Arsenaultcfdc2b92019-08-18 00:20:43 +00001039 // FIXME: Handle d16 correctly
Neil Henning76504a42018-12-12 16:15:21 +00001040 return AMDGPU::getMUBUFOpcode(CI.InstClass, Width);
1041 case UNKNOWN:
1042 llvm_unreachable("Unknown instruction class");
1043 case S_BUFFER_LOAD_IMM:
1044 switch (Width) {
1045 default:
1046 return 0;
1047 case 2:
1048 return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
1049 case 4:
1050 return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
1051 }
Marek Olsak58410f32017-11-09 01:52:55 +00001052 }
Marek Olsak58410f32017-11-09 01:52:55 +00001053}
1054
Neil Henning76504a42018-12-12 16:15:21 +00001055std::pair<unsigned, unsigned>
1056SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
1057 if (CI.Offset0 > CI.Offset1) {
1058 switch (CI.Width0) {
1059 default:
1060 return std::make_pair(0, 0);
1061 case 1:
1062 switch (CI.Width1) {
1063 default:
1064 return std::make_pair(0, 0);
1065 case 1:
1066 return std::make_pair(AMDGPU::sub1, AMDGPU::sub0);
1067 case 2:
1068 return std::make_pair(AMDGPU::sub2, AMDGPU::sub0_sub1);
1069 case 3:
1070 return std::make_pair(AMDGPU::sub3, AMDGPU::sub0_sub1_sub2);
1071 }
1072 case 2:
1073 switch (CI.Width1) {
1074 default:
1075 return std::make_pair(0, 0);
1076 case 1:
1077 return std::make_pair(AMDGPU::sub1_sub2, AMDGPU::sub0);
1078 case 2:
1079 return std::make_pair(AMDGPU::sub2_sub3, AMDGPU::sub0_sub1);
1080 }
1081 case 3:
1082 switch (CI.Width1) {
1083 default:
1084 return std::make_pair(0, 0);
1085 case 1:
1086 return std::make_pair(AMDGPU::sub1_sub2_sub3, AMDGPU::sub0);
1087 }
1088 }
1089 } else {
1090 switch (CI.Width0) {
1091 default:
1092 return std::make_pair(0, 0);
1093 case 1:
1094 switch (CI.Width1) {
1095 default:
1096 return std::make_pair(0, 0);
1097 case 1:
1098 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1);
1099 case 2:
1100 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2);
1101 case 3:
1102 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2_sub3);
1103 }
1104 case 2:
1105 switch (CI.Width1) {
1106 default:
1107 return std::make_pair(0, 0);
1108 case 1:
1109 return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2);
1110 case 2:
1111 return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2_sub3);
1112 }
1113 case 3:
1114 switch (CI.Width1) {
1115 default:
1116 return std::make_pair(0, 0);
1117 case 1:
1118 return std::make_pair(AMDGPU::sub0_sub1_sub2, AMDGPU::sub3);
1119 }
1120 }
1121 }
1122}
1123
1124const TargetRegisterClass *
1125SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
1126 if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1127 switch (CI.Width0 + CI.Width1) {
1128 default:
1129 return nullptr;
1130 case 2:
1131 return &AMDGPU::SReg_64_XEXECRegClass;
1132 case 4:
1133 return &AMDGPU::SReg_128RegClass;
1134 case 8:
1135 return &AMDGPU::SReg_256RegClass;
1136 case 16:
1137 return &AMDGPU::SReg_512RegClass;
1138 }
1139 } else {
1140 switch (CI.Width0 + CI.Width1) {
1141 default:
1142 return nullptr;
1143 case 2:
1144 return &AMDGPU::VReg_64RegClass;
1145 case 3:
1146 return &AMDGPU::VReg_96RegClass;
1147 case 4:
1148 return &AMDGPU::VReg_128RegClass;
1149 }
1150 }
1151}
1152
1153MachineBasicBlock::iterator
1154SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
Marek Olsak58410f32017-11-09 01:52:55 +00001155 MachineBasicBlock *MBB = CI.I->getParent();
1156 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak58410f32017-11-09 01:52:55 +00001157
Neil Henning76504a42018-12-12 16:15:21 +00001158 const unsigned Opcode = getNewOpcode(CI);
Marek Olsak58410f32017-11-09 01:52:55 +00001159
Neil Henning76504a42018-12-12 16:15:21 +00001160 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1161 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1162 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
Marek Olsak58410f32017-11-09 01:52:55 +00001163
1164 // Copy to the new source register.
Neil Henning76504a42018-12-12 16:15:21 +00001165 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
Daniel Sanders0c476112019-08-15 19:22:08 +00001166 Register SrcReg = MRI->createVirtualRegister(SuperRC);
Marek Olsak58410f32017-11-09 01:52:55 +00001167
1168 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1169 const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1170
1171 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1172 .add(*Src0)
1173 .addImm(SubRegIdx0)
1174 .add(*Src1)
1175 .addImm(SubRegIdx1);
1176
1177 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
Neil Henning76504a42018-12-12 16:15:21 +00001178 .addReg(SrcReg, RegState::Kill);
Marek Olsak58410f32017-11-09 01:52:55 +00001179
Tom Stellard004c7912019-10-01 17:56:59 +00001180 const unsigned Regs = getRegs(Opcode, *TII);
Neil Henning76504a42018-12-12 16:15:21 +00001181
1182 if (Regs & VADDR)
1183 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
Marek Olsak58410f32017-11-09 01:52:55 +00001184
Tom Stellardcc0bc942019-07-29 16:40:58 +00001185
1186 // It shouldn't be possible to get this far if the two instructions
1187 // don't have a single memoperand, because MachineInstr::mayAlias()
1188 // will return true if this is the case.
1189 assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
1190
1191 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1192 const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
1193
Marek Olsak58410f32017-11-09 01:52:55 +00001194 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1195 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1196 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001197 .addImm(CI.GLC0) // glc
1198 .addImm(CI.SLC0) // slc
1199 .addImm(0) // tfe
1200 .addImm(CI.DLC0) // dlc
Piotr Sobczak265e94e2019-10-02 17:22:36 +00001201 .addImm(0) // swz
Tom Stellardcc0bc942019-07-29 16:40:58 +00001202 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
Marek Olsak58410f32017-11-09 01:52:55 +00001203
1204 moveInstsAfter(MIB, CI.InstsToMove);
1205
1206 MachineBasicBlock::iterator Next = std::next(CI.I);
1207 CI.I->eraseFromParent();
1208 CI.Paired->eraseFromParent();
1209 return Next;
1210}
1211
Farhana Aleence095c52018-12-14 21:13:14 +00001212MachineOperand
Tom Stellard9f4c7572019-09-19 04:39:45 +00001213SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) const {
Farhana Aleence095c52018-12-14 21:13:14 +00001214 APInt V(32, Val, true);
1215 if (TII->isInlineConstant(V))
1216 return MachineOperand::CreateImm(Val);
1217
Daniel Sanders0c476112019-08-15 19:22:08 +00001218 Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
Farhana Aleence095c52018-12-14 21:13:14 +00001219 MachineInstr *Mov =
1220 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1221 TII->get(AMDGPU::S_MOV_B32), Reg)
1222 .addImm(Val);
Simon Pilgrim9831d402018-12-15 12:25:22 +00001223 (void)Mov;
Farhana Aleence095c52018-12-14 21:13:14 +00001224 LLVM_DEBUG(dbgs() << " "; Mov->dump());
1225 return MachineOperand::CreateReg(Reg, false);
1226}
1227
1228// Compute base address using Addr and return the final register.
1229unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
Tom Stellard9f4c7572019-09-19 04:39:45 +00001230 const MemAddress &Addr) const {
Farhana Aleence095c52018-12-14 21:13:14 +00001231 MachineBasicBlock *MBB = MI.getParent();
1232 MachineBasicBlock::iterator MBBI = MI.getIterator();
1233 DebugLoc DL = MI.getDebugLoc();
1234
1235 assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1236 Addr.Base.LoSubReg) &&
1237 "Expected 32-bit Base-Register-Low!!");
1238
1239 assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1240 Addr.Base.HiSubReg) &&
1241 "Expected 32-bit Base-Register-Hi!!");
1242
1243 LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n");
1244 MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1245 MachineOperand OffsetHi =
1246 createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00001247
1248 const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
Daniel Sanders0c476112019-08-15 19:22:08 +00001249 Register CarryReg = MRI->createVirtualRegister(CarryRC);
1250 Register DeadCarryReg = MRI->createVirtualRegister(CarryRC);
Farhana Aleence095c52018-12-14 21:13:14 +00001251
Daniel Sanders0c476112019-08-15 19:22:08 +00001252 Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1253 Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Farhana Aleence095c52018-12-14 21:13:14 +00001254 MachineInstr *LoHalf =
1255 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1256 .addReg(CarryReg, RegState::Define)
1257 .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
Tim Renoufcfdfba92019-03-18 19:35:44 +00001258 .add(OffsetLo)
1259 .addImm(0); // clamp bit
Simon Pilgrim9831d402018-12-15 12:25:22 +00001260 (void)LoHalf;
Farhana Aleence095c52018-12-14 21:13:14 +00001261 LLVM_DEBUG(dbgs() << " "; LoHalf->dump(););
1262
1263 MachineInstr *HiHalf =
1264 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1265 .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1266 .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1267 .add(OffsetHi)
Tim Renoufcfdfba92019-03-18 19:35:44 +00001268 .addReg(CarryReg, RegState::Kill)
1269 .addImm(0); // clamp bit
Simon Pilgrim9831d402018-12-15 12:25:22 +00001270 (void)HiHalf;
Farhana Aleence095c52018-12-14 21:13:14 +00001271 LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
1272
Daniel Sanders0c476112019-08-15 19:22:08 +00001273 Register FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
Farhana Aleence095c52018-12-14 21:13:14 +00001274 MachineInstr *FullBase =
1275 BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1276 .addReg(DestSub0)
1277 .addImm(AMDGPU::sub0)
1278 .addReg(DestSub1)
1279 .addImm(AMDGPU::sub1);
Simon Pilgrim9831d402018-12-15 12:25:22 +00001280 (void)FullBase;
Farhana Aleence095c52018-12-14 21:13:14 +00001281 LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";);
1282
1283 return FullDestReg;
1284}
1285
1286// Update base and offset with the NewBase and NewOffset in MI.
1287void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1288 unsigned NewBase,
Tom Stellard9f4c7572019-09-19 04:39:45 +00001289 int32_t NewOffset) const {
Farhana Aleence095c52018-12-14 21:13:14 +00001290 TII->getNamedOperand(MI, AMDGPU::OpName::vaddr)->setReg(NewBase);
1291 TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1292}
1293
1294Optional<int32_t>
Tom Stellard9f4c7572019-09-19 04:39:45 +00001295SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
Farhana Aleence095c52018-12-14 21:13:14 +00001296 if (Op.isImm())
1297 return Op.getImm();
1298
1299 if (!Op.isReg())
1300 return None;
1301
1302 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1303 if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1304 !Def->getOperand(1).isImm())
1305 return None;
1306
1307 return Def->getOperand(1).getImm();
1308}
1309
1310// Analyze Base and extracts:
1311// - 32bit base registers, subregisters
1312// - 64bit constant offset
1313// Expecting base computation as:
1314// %OFFSET0:sgpr_32 = S_MOV_B32 8000
1315// %LO:vgpr_32, %c:sreg_64_xexec =
1316// V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1317// %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1318// %Base:vreg_64 =
1319// REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1320void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
Tom Stellard9f4c7572019-09-19 04:39:45 +00001321 MemAddress &Addr) const {
Farhana Aleence095c52018-12-14 21:13:14 +00001322 if (!Base.isReg())
1323 return;
1324
1325 MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1326 if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1327 || Def->getNumOperands() != 5)
1328 return;
1329
1330 MachineOperand BaseLo = Def->getOperand(1);
1331 MachineOperand BaseHi = Def->getOperand(3);
1332 if (!BaseLo.isReg() || !BaseHi.isReg())
1333 return;
1334
1335 MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1336 MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1337
1338 if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1339 !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1340 return;
1341
1342 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1343 const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1344
1345 auto Offset0P = extractConstOffset(*Src0);
1346 if (Offset0P)
1347 BaseLo = *Src1;
1348 else {
1349 if (!(Offset0P = extractConstOffset(*Src1)))
1350 return;
1351 BaseLo = *Src0;
1352 }
1353
1354 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1355 Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1356
1357 if (Src0->isImm())
1358 std::swap(Src0, Src1);
1359
1360 if (!Src1->isImm())
1361 return;
1362
Farhana Aleence095c52018-12-14 21:13:14 +00001363 uint64_t Offset1 = Src1->getImm();
1364 BaseHi = *Src0;
1365
1366 Addr.Base.LoReg = BaseLo.getReg();
1367 Addr.Base.HiReg = BaseHi.getReg();
1368 Addr.Base.LoSubReg = BaseLo.getSubReg();
1369 Addr.Base.HiSubReg = BaseHi.getSubReg();
1370 Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1371}
1372
1373bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1374 MachineInstr &MI,
1375 MemInfoMap &Visited,
Tom Stellard9f4c7572019-09-19 04:39:45 +00001376 SmallPtrSet<MachineInstr *, 4> &AnchorList) const {
Farhana Aleence095c52018-12-14 21:13:14 +00001377
Valery Pykhtine8ade892019-09-06 15:33:53 +00001378 if (!(MI.mayLoad() ^ MI.mayStore()))
Farhana Aleence095c52018-12-14 21:13:14 +00001379 return false;
1380
Valery Pykhtine8ade892019-09-06 15:33:53 +00001381 // TODO: Support flat and scratch.
1382 if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0)
1383 return false;
1384
1385 if (MI.mayLoad() && TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
Farhana Aleence095c52018-12-14 21:13:14 +00001386 return false;
1387
1388 if (AnchorList.count(&MI))
1389 return false;
1390
1391 LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1392
1393 if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1394 LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";);
1395 return false;
1396 }
1397
1398 // Step1: Find the base-registers and a 64bit constant offset.
1399 MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1400 MemAddress MAddr;
1401 if (Visited.find(&MI) == Visited.end()) {
1402 processBaseWithConstOffset(Base, MAddr);
1403 Visited[&MI] = MAddr;
1404 } else
1405 MAddr = Visited[&MI];
1406
1407 if (MAddr.Offset == 0) {
1408 LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no"
1409 " constant offsets that can be promoted.\n";);
1410 return false;
1411 }
1412
1413 LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", "
1414 << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1415
1416 // Step2: Traverse through MI's basic block and find an anchor(that has the
1417 // same base-registers) with the highest 13bit distance from MI's offset.
1418 // E.g. (64bit loads)
1419 // bb:
1420 // addr1 = &a + 4096; load1 = load(addr1, 0)
1421 // addr2 = &a + 6144; load2 = load(addr2, 0)
1422 // addr3 = &a + 8192; load3 = load(addr3, 0)
1423 // addr4 = &a + 10240; load4 = load(addr4, 0)
1424 // addr5 = &a + 12288; load5 = load(addr5, 0)
1425 //
1426 // Starting from the first load, the optimization will try to find a new base
1427 // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1428 // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1429 // as the new-base(anchor) because of the maximum distance which can
1430 // accomodate more intermediate bases presumeably.
1431 //
1432 // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1433 // (&a + 8192) for load1, load2, load4.
1434 // addr = &a + 8192
1435 // load1 = load(addr, -4096)
1436 // load2 = load(addr, -2048)
1437 // load3 = load(addr, 0)
1438 // load4 = load(addr, 2048)
1439 // addr5 = &a + 12288; load5 = load(addr5, 0)
1440 //
1441 MachineInstr *AnchorInst = nullptr;
1442 MemAddress AnchorAddr;
1443 uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1444 SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1445
1446 MachineBasicBlock *MBB = MI.getParent();
1447 MachineBasicBlock::iterator E = MBB->end();
1448 MachineBasicBlock::iterator MBBI = MI.getIterator();
1449 ++MBBI;
1450 const SITargetLowering *TLI =
1451 static_cast<const SITargetLowering *>(STM->getTargetLowering());
1452
1453 for ( ; MBBI != E; ++MBBI) {
1454 MachineInstr &MINext = *MBBI;
1455 // TODO: Support finding an anchor(with same base) from store addresses or
1456 // any other load addresses where the opcodes are different.
1457 if (MINext.getOpcode() != MI.getOpcode() ||
1458 TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1459 continue;
1460
1461 const MachineOperand &BaseNext =
1462 *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1463 MemAddress MAddrNext;
1464 if (Visited.find(&MINext) == Visited.end()) {
1465 processBaseWithConstOffset(BaseNext, MAddrNext);
1466 Visited[&MINext] = MAddrNext;
1467 } else
1468 MAddrNext = Visited[&MINext];
1469
1470 if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1471 MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1472 MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1473 MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1474 continue;
1475
1476 InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1477
1478 int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1479 TargetLoweringBase::AddrMode AM;
1480 AM.HasBaseReg = true;
1481 AM.BaseOffs = Dist;
1482 if (TLI->isLegalGlobalAddressingMode(AM) &&
Florian Hahnabe32c92018-12-15 01:32:58 +00001483 (uint32_t)std::abs(Dist) > MaxDist) {
1484 MaxDist = std::abs(Dist);
Farhana Aleence095c52018-12-14 21:13:14 +00001485
1486 AnchorAddr = MAddrNext;
1487 AnchorInst = &MINext;
1488 }
1489 }
1490
1491 if (AnchorInst) {
1492 LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): ";
1493 AnchorInst->dump());
1494 LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: "
1495 << AnchorAddr.Offset << "\n\n");
1496
1497 // Instead of moving up, just re-compute anchor-instruction's base address.
1498 unsigned Base = computeBase(MI, AnchorAddr);
1499
1500 updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1501 LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump(););
1502
1503 for (auto P : InstsWCommonBase) {
1504 TargetLoweringBase::AddrMode AM;
1505 AM.HasBaseReg = true;
1506 AM.BaseOffs = P.second - AnchorAddr.Offset;
1507
1508 if (TLI->isLegalGlobalAddressingMode(AM)) {
1509 LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second;
1510 dbgs() << ")"; P.first->dump());
1511 updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1512 LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump());
1513 }
1514 }
1515 AnchorList.insert(AnchorInst);
1516 return true;
1517 }
1518
1519 return false;
1520}
1521
Matt Arsenault41033282014-10-10 22:01:59 +00001522// Scan through looking for adjacent LDS operations with constant offsets from
1523// the same base register. We rely on the scheduler to do the hard work of
1524// clustering nearby loads, and assume these are all adjacent.
1525bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +00001526 bool Modified = false;
1527
Farhana Aleence095c52018-12-14 21:13:14 +00001528 // Contain the list
1529 MemInfoMap Visited;
1530 // Contains the list of instructions for which constant offsets are being
1531 // promoted to the IMM.
1532 SmallPtrSet<MachineInstr *, 4> AnchorList;
1533
Matt Arsenault41033282014-10-10 22:01:59 +00001534 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
1535 MachineInstr &MI = *I;
1536
Farhana Aleence095c52018-12-14 21:13:14 +00001537 if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1538 Modified = true;
1539
Matt Arsenault41033282014-10-10 22:01:59 +00001540 // Don't combine if volatile.
1541 if (MI.hasOrderedMemoryRef()) {
1542 ++I;
1543 continue;
1544 }
1545
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +00001546 CombineInfo CI;
Tom Stellard004c7912019-10-01 17:56:59 +00001547 CI.setMI(I, *TII, *STM);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +00001548
Neil Henning76504a42018-12-12 16:15:21 +00001549 switch (CI.InstClass) {
1550 default:
1551 break;
1552 case DS_READ:
Marek Olsakb953cc32017-11-09 01:52:23 +00001553 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +00001554 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +00001555 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +00001556 } else {
1557 ++I;
1558 }
Matt Arsenault41033282014-10-10 22:01:59 +00001559 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001560 case DS_WRITE:
Marek Olsakb953cc32017-11-09 01:52:23 +00001561 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +00001562 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +00001563 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +00001564 } else {
1565 ++I;
1566 }
Matt Arsenault41033282014-10-10 22:01:59 +00001567 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001568 case S_BUFFER_LOAD_IMM:
Marek Olsakb953cc32017-11-09 01:52:23 +00001569 if (findMatchingInst(CI)) {
1570 Modified = true;
1571 I = mergeSBufferLoadImmPair(CI);
Neil Henning76504a42018-12-12 16:15:21 +00001572 OptimizeAgain |= (CI.Width0 + CI.Width1) < 16;
Marek Olsakb953cc32017-11-09 01:52:23 +00001573 } else {
1574 ++I;
1575 }
1576 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001577 case BUFFER_LOAD_OFFEN:
1578 case BUFFER_LOAD_OFFSET:
1579 case BUFFER_LOAD_OFFEN_exact:
1580 case BUFFER_LOAD_OFFSET_exact:
Marek Olsak6a0548a2017-11-09 01:52:30 +00001581 if (findMatchingInst(CI)) {
1582 Modified = true;
Marek Olsak4c421a2d2017-11-09 01:52:36 +00001583 I = mergeBufferLoadPair(CI);
Neil Henning76504a42018-12-12 16:15:21 +00001584 OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
Marek Olsak6a0548a2017-11-09 01:52:30 +00001585 } else {
1586 ++I;
1587 }
1588 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001589 case BUFFER_STORE_OFFEN:
1590 case BUFFER_STORE_OFFSET:
1591 case BUFFER_STORE_OFFEN_exact:
1592 case BUFFER_STORE_OFFSET_exact:
Marek Olsak58410f32017-11-09 01:52:55 +00001593 if (findMatchingInst(CI)) {
1594 Modified = true;
1595 I = mergeBufferStorePair(CI);
Neil Henning76504a42018-12-12 16:15:21 +00001596 OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
Marek Olsak58410f32017-11-09 01:52:55 +00001597 } else {
1598 ++I;
1599 }
1600 continue;
1601 }
1602
Matt Arsenault41033282014-10-10 22:01:59 +00001603 ++I;
1604 }
1605
1606 return Modified;
1607}
1608
1609bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +00001610 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +00001611 return false;
1612
Tom Stellard5bfbae52018-07-11 20:59:01 +00001613 STM = &MF.getSubtarget<GCNSubtarget>();
Marek Olsakb953cc32017-11-09 01:52:23 +00001614 if (!STM->loadStoreOptEnabled())
Matt Arsenault03d85842016-06-27 20:32:13 +00001615 return false;
1616
Marek Olsakb953cc32017-11-09 01:52:23 +00001617 TII = STM->getInstrInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001618 TRI = &TII->getRegisterInfo();
1619
Matt Arsenault41033282014-10-10 22:01:59 +00001620 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +00001621 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +00001622
Matt Arsenault67e72de2017-08-31 01:53:09 +00001623 assert(MRI->isSSA() && "Must be run on SSA");
1624
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001625 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
Matt Arsenault41033282014-10-10 22:01:59 +00001626
Matt Arsenault41033282014-10-10 22:01:59 +00001627 bool Modified = false;
1628
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +00001629 for (MachineBasicBlock &MBB : MF) {
Neil Henning76504a42018-12-12 16:15:21 +00001630 do {
1631 OptimizeAgain = false;
Marek Olsakb953cc32017-11-09 01:52:23 +00001632 Modified |= optimizeBlock(MBB);
Neil Henning76504a42018-12-12 16:15:21 +00001633 } while (OptimizeAgain);
Marek Olsakb953cc32017-11-09 01:52:23 +00001634 }
1635
Matt Arsenault41033282014-10-10 22:01:59 +00001636 return Modified;
1637}