blob: 196c2bc25ac049c7e72f59f849a9cfb9154581eb [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that performs load / store related peephole
11// optimizations. This pass should be run after register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AArch64InstrInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000016#include "AArch64Subtarget.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000017#include "MCTargetDesc/AArch64AddressingModes.h"
18#include "llvm/ADT/BitVector.h"
Chad Rosierce8e5ab2015-05-21 21:36:46 +000019#include "llvm/ADT/SmallVector.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000020#include "llvm/ADT/Statistic.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000021#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/ErrorHandling.h"
28#include "llvm/Support/raw_ostream.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000029#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Target/TargetRegisterInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000032using namespace llvm;
33
34#define DEBUG_TYPE "aarch64-ldst-opt"
35
Tim Northover3b0846e2014-05-24 12:50:23 +000036STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
37STATISTIC(NumPostFolded, "Number of post-index updates folded");
38STATISTIC(NumPreFolded, "Number of pre-index updates folded");
39STATISTIC(NumUnscaledPairCreated,
40 "Number of load/store from unscaled generated");
Jun Bum Limc12c2792015-11-19 18:41:27 +000041STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
Jun Bum Lim80ec0d32015-11-20 21:14:07 +000042STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
Jun Bum Lim6755c3b2015-12-22 16:36:16 +000043STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
Tim Northover3b0846e2014-05-24 12:50:23 +000044
Chad Rosier35706ad2016-02-04 21:26:02 +000045// The LdStLimit limits how far we search for load/store pairs.
46static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +000047 cl::init(20), cl::Hidden);
Tim Northover3b0846e2014-05-24 12:50:23 +000048
Chad Rosier35706ad2016-02-04 21:26:02 +000049// The UpdateLimit limits how far we search for update instructions when we form
50// pre-/post-index instructions.
51static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
52 cl::Hidden);
53
Chad Rosier96530b32015-08-05 13:44:51 +000054namespace llvm {
55void initializeAArch64LoadStoreOptPass(PassRegistry &);
56}
57
58#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
59
Tim Northover3b0846e2014-05-24 12:50:23 +000060namespace {
Chad Rosier96a18a92015-07-21 17:42:04 +000061
62typedef struct LdStPairFlags {
63 // If a matching instruction is found, MergeForward is set to true if the
64 // merge is to remove the first instruction and replace the second with
65 // a pair-wise insn, and false if the reverse is true.
66 bool MergeForward;
67
68 // SExtIdx gives the index of the result of the load pair that must be
69 // extended. The value of SExtIdx assumes that the paired load produces the
70 // value in this order: (I, returned iterator), i.e., -1 means no value has
71 // to be extended, 0 means I, and 1 means the returned iterator.
72 int SExtIdx;
73
74 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
75
76 void setMergeForward(bool V = true) { MergeForward = V; }
77 bool getMergeForward() const { return MergeForward; }
78
79 void setSExtIdx(int V) { SExtIdx = V; }
80 int getSExtIdx() const { return SExtIdx; }
81
82} LdStPairFlags;
83
Tim Northover3b0846e2014-05-24 12:50:23 +000084struct AArch64LoadStoreOpt : public MachineFunctionPass {
85 static char ID;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +000086 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
Chad Rosier96530b32015-08-05 13:44:51 +000087 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
88 }
Tim Northover3b0846e2014-05-24 12:50:23 +000089
90 const AArch64InstrInfo *TII;
91 const TargetRegisterInfo *TRI;
Oliver Stannardd414c992015-11-10 11:04:18 +000092 const AArch64Subtarget *Subtarget;
Tim Northover3b0846e2014-05-24 12:50:23 +000093
Chad Rosierbba881e2016-02-02 15:02:30 +000094 // Track which registers have been modified and used.
95 BitVector ModifiedRegs, UsedRegs;
96
Tim Northover3b0846e2014-05-24 12:50:23 +000097 // Scan the instructions looking for a load/store that can be combined
98 // with the current instruction into a load/store pair.
99 // Return the matching instruction if one is found, else MBB->end().
Tim Northover3b0846e2014-05-24 12:50:23 +0000100 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000101 LdStPairFlags &Flags,
Tim Northover3b0846e2014-05-24 12:50:23 +0000102 unsigned Limit);
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000103
104 // Scan the instructions looking for a store that writes to the address from
105 // which the current load instruction reads. Return true if one is found.
106 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
107 MachineBasicBlock::iterator &StoreI);
108
Chad Rosierb5933d72016-02-09 19:02:12 +0000109 // Merge the two instructions indicated into a wider instruction.
110 MachineBasicBlock::iterator
111 mergeNarrowInsns(MachineBasicBlock::iterator I,
Chad Rosierd7363db2016-02-09 19:09:22 +0000112 MachineBasicBlock::iterator MergeMI,
Chad Rosierb5933d72016-02-09 19:02:12 +0000113 const LdStPairFlags &Flags);
114
Tim Northover3b0846e2014-05-24 12:50:23 +0000115 // Merge the two instructions indicated into a single pair-wise instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000116 MachineBasicBlock::iterator
117 mergePairedInsns(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000118 MachineBasicBlock::iterator Paired,
Chad Rosierfe5399f2015-07-21 17:47:56 +0000119 const LdStPairFlags &Flags);
Tim Northover3b0846e2014-05-24 12:50:23 +0000120
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000121 // Promote the load that reads directly from the address stored to.
122 MachineBasicBlock::iterator
123 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
124 MachineBasicBlock::iterator StoreI);
125
Tim Northover3b0846e2014-05-24 12:50:23 +0000126 // Scan the instruction list to find a base register update that can
127 // be combined with the current instruction (a load or store) using
128 // pre or post indexed addressing with writeback. Scan forwards.
129 MachineBasicBlock::iterator
Chad Rosier234bf6f2016-01-18 21:56:40 +0000130 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
Chad Rosier35706ad2016-02-04 21:26:02 +0000131 int UnscaledOffset, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000132
133 // Scan the instruction list to find a base register update that can
134 // be combined with the current instruction (a load or store) using
135 // pre or post indexed addressing with writeback. Scan backwards.
136 MachineBasicBlock::iterator
Chad Rosier35706ad2016-02-04 21:26:02 +0000137 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000138
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000139 // Find an instruction that updates the base register of the ld/st
140 // instruction.
141 bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
142 unsigned BaseReg, int Offset);
143
Chad Rosier2dfd3542015-09-23 13:51:44 +0000144 // Merge a pre- or post-index base register update into a ld/st instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000145 MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +0000146 mergeUpdateInsn(MachineBasicBlock::iterator I,
147 MachineBasicBlock::iterator Update, bool IsPreIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000148
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000149 // Find and merge foldable ldr/str instructions.
150 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
151
Chad Rosier24c46ad2016-02-09 18:10:20 +0000152 // Find and pair ldr/str instructions.
153 bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
154
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000155 // Find and promote load instructions which read directly from store.
156 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
157
Jun Bum Lim22fe15e2015-11-06 16:27:47 +0000158 // Check if converting two narrow loads into a single wider load with
159 // bitfield extracts could be enabled.
160 bool enableNarrowLdMerge(MachineFunction &Fn);
161
162 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000163
164 bool runOnMachineFunction(MachineFunction &Fn) override;
165
166 const char *getPassName() const override {
Chad Rosier96530b32015-08-05 13:44:51 +0000167 return AARCH64_LOAD_STORE_OPT_NAME;
Tim Northover3b0846e2014-05-24 12:50:23 +0000168 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000169};
170char AArch64LoadStoreOpt::ID = 0;
Jim Grosbach1eee3df2014-08-11 22:42:31 +0000171} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +0000172
Chad Rosier96530b32015-08-05 13:44:51 +0000173INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
174 AARCH64_LOAD_STORE_OPT_NAME, false, false)
175
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000176static unsigned getBitExtrOpcode(MachineInstr *MI) {
177 switch (MI->getOpcode()) {
178 default:
179 llvm_unreachable("Unexpected opcode.");
180 case AArch64::LDRBBui:
181 case AArch64::LDURBBi:
182 case AArch64::LDRHHui:
183 case AArch64::LDURHHi:
184 return AArch64::UBFMWri;
185 case AArch64::LDRSBWui:
186 case AArch64::LDURSBWi:
187 case AArch64::LDRSHWui:
188 case AArch64::LDURSHWi:
189 return AArch64::SBFMWri;
190 }
191}
192
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000193static bool isNarrowStore(unsigned Opc) {
194 switch (Opc) {
195 default:
196 return false;
197 case AArch64::STRBBui:
198 case AArch64::STURBBi:
199 case AArch64::STRHHui:
200 case AArch64::STURHHi:
201 return true;
202 }
203}
204
Jun Bum Limc12c2792015-11-19 18:41:27 +0000205static bool isNarrowLoad(unsigned Opc) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000206 switch (Opc) {
207 default:
208 return false;
209 case AArch64::LDRHHui:
210 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000211 case AArch64::LDRBBui:
212 case AArch64::LDURBBi:
213 case AArch64::LDRSHWui:
214 case AArch64::LDURSHWi:
215 case AArch64::LDRSBWui:
216 case AArch64::LDURSBWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000217 return true;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000218 }
219}
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000220
Jun Bum Limc12c2792015-11-19 18:41:27 +0000221static bool isNarrowLoad(MachineInstr *MI) {
222 return isNarrowLoad(MI->getOpcode());
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000223}
224
Chad Rosier00f9d232016-02-11 14:25:08 +0000225static bool isNarrowLoadOrStore(unsigned Opc) {
226 return isNarrowLoad(Opc) || isNarrowStore(Opc);
227}
228
Chad Rosier32d4d372015-09-29 16:07:32 +0000229// Scaling factor for unscaled load or store.
230static int getMemScale(MachineInstr *MI) {
Chad Rosier22eb7102015-08-06 17:37:18 +0000231 switch (MI->getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000232 default:
Chad Rosierdabe2532015-09-29 18:26:15 +0000233 llvm_unreachable("Opcode has unknown scale!");
234 case AArch64::LDRBBui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000235 case AArch64::LDURBBi:
236 case AArch64::LDRSBWui:
237 case AArch64::LDURSBWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000238 case AArch64::STRBBui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000239 case AArch64::STURBBi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000240 return 1;
241 case AArch64::LDRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000242 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000243 case AArch64::LDRSHWui:
244 case AArch64::LDURSHWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000245 case AArch64::STRHHui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000246 case AArch64::STURHHi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000247 return 2;
Chad Rosiera4d32172015-09-29 14:57:10 +0000248 case AArch64::LDRSui:
249 case AArch64::LDURSi:
250 case AArch64::LDRSWui:
251 case AArch64::LDURSWi:
252 case AArch64::LDRWui:
253 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000254 case AArch64::STRSui:
255 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000256 case AArch64::STRWui:
257 case AArch64::STURWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000258 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000259 case AArch64::LDPSWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000260 case AArch64::LDPWi:
261 case AArch64::STPSi:
262 case AArch64::STPWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000263 return 4;
Chad Rosiera4d32172015-09-29 14:57:10 +0000264 case AArch64::LDRDui:
265 case AArch64::LDURDi:
266 case AArch64::LDRXui:
267 case AArch64::LDURXi:
268 case AArch64::STRDui:
269 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000270 case AArch64::STRXui:
271 case AArch64::STURXi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000272 case AArch64::LDPDi:
273 case AArch64::LDPXi:
274 case AArch64::STPDi:
275 case AArch64::STPXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000276 return 8;
Tim Northover3b0846e2014-05-24 12:50:23 +0000277 case AArch64::LDRQui:
278 case AArch64::LDURQi:
Chad Rosiera4d32172015-09-29 14:57:10 +0000279 case AArch64::STRQui:
280 case AArch64::STURQi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000281 case AArch64::LDPQi:
282 case AArch64::STPQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000283 return 16;
Tim Northover3b0846e2014-05-24 12:50:23 +0000284 }
285}
286
Quentin Colombet66b61632015-03-06 22:42:10 +0000287static unsigned getMatchingNonSExtOpcode(unsigned Opc,
288 bool *IsValidLdStrOpc = nullptr) {
289 if (IsValidLdStrOpc)
290 *IsValidLdStrOpc = true;
291 switch (Opc) {
292 default:
293 if (IsValidLdStrOpc)
294 *IsValidLdStrOpc = false;
295 return UINT_MAX;
296 case AArch64::STRDui:
297 case AArch64::STURDi:
298 case AArch64::STRQui:
299 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000300 case AArch64::STRBBui:
301 case AArch64::STURBBi:
302 case AArch64::STRHHui:
303 case AArch64::STURHHi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000304 case AArch64::STRWui:
305 case AArch64::STURWi:
306 case AArch64::STRXui:
307 case AArch64::STURXi:
308 case AArch64::LDRDui:
309 case AArch64::LDURDi:
310 case AArch64::LDRQui:
311 case AArch64::LDURQi:
312 case AArch64::LDRWui:
313 case AArch64::LDURWi:
314 case AArch64::LDRXui:
315 case AArch64::LDURXi:
316 case AArch64::STRSui:
317 case AArch64::STURSi:
318 case AArch64::LDRSui:
319 case AArch64::LDURSi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000320 case AArch64::LDRHHui:
321 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000322 case AArch64::LDRBBui:
323 case AArch64::LDURBBi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000324 return Opc;
325 case AArch64::LDRSWui:
326 return AArch64::LDRWui;
327 case AArch64::LDURSWi:
328 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000329 case AArch64::LDRSBWui:
330 return AArch64::LDRBBui;
331 case AArch64::LDRSHWui:
332 return AArch64::LDRHHui;
333 case AArch64::LDURSBWi:
334 return AArch64::LDURBBi;
335 case AArch64::LDURSHWi:
336 return AArch64::LDURHHi;
Quentin Colombet66b61632015-03-06 22:42:10 +0000337 }
338}
339
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000340static unsigned getMatchingWideOpcode(unsigned Opc) {
341 switch (Opc) {
342 default:
343 llvm_unreachable("Opcode has no wide equivalent!");
344 case AArch64::STRBBui:
345 return AArch64::STRHHui;
346 case AArch64::STRHHui:
347 return AArch64::STRWui;
348 case AArch64::STURBBi:
349 return AArch64::STURHHi;
350 case AArch64::STURHHi:
351 return AArch64::STURWi;
Jun Bum Lim397eb7b2016-02-12 15:25:39 +0000352 case AArch64::STURWi:
353 return AArch64::STURXi;
354 case AArch64::STRWui:
355 return AArch64::STRXui;
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000356 case AArch64::LDRHHui:
357 case AArch64::LDRSHWui:
358 return AArch64::LDRWui;
359 case AArch64::LDURHHi:
360 case AArch64::LDURSHWi:
361 return AArch64::LDURWi;
362 case AArch64::LDRBBui:
363 case AArch64::LDRSBWui:
364 return AArch64::LDRHHui;
365 case AArch64::LDURBBi:
366 case AArch64::LDURSBWi:
367 return AArch64::LDURHHi;
368 }
369}
370
Tim Northover3b0846e2014-05-24 12:50:23 +0000371static unsigned getMatchingPairOpcode(unsigned Opc) {
372 switch (Opc) {
373 default:
374 llvm_unreachable("Opcode has no pairwise equivalent!");
375 case AArch64::STRSui:
376 case AArch64::STURSi:
377 return AArch64::STPSi;
378 case AArch64::STRDui:
379 case AArch64::STURDi:
380 return AArch64::STPDi;
381 case AArch64::STRQui:
382 case AArch64::STURQi:
383 return AArch64::STPQi;
384 case AArch64::STRWui:
385 case AArch64::STURWi:
386 return AArch64::STPWi;
387 case AArch64::STRXui:
388 case AArch64::STURXi:
389 return AArch64::STPXi;
390 case AArch64::LDRSui:
391 case AArch64::LDURSi:
392 return AArch64::LDPSi;
393 case AArch64::LDRDui:
394 case AArch64::LDURDi:
395 return AArch64::LDPDi;
396 case AArch64::LDRQui:
397 case AArch64::LDURQi:
398 return AArch64::LDPQi;
399 case AArch64::LDRWui:
400 case AArch64::LDURWi:
401 return AArch64::LDPWi;
402 case AArch64::LDRXui:
403 case AArch64::LDURXi:
404 return AArch64::LDPXi;
Quentin Colombet29f55332015-01-24 01:25:54 +0000405 case AArch64::LDRSWui:
406 case AArch64::LDURSWi:
407 return AArch64::LDPSWi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000408 }
409}
410
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000411static unsigned isMatchingStore(MachineInstr *LoadInst,
412 MachineInstr *StoreInst) {
413 unsigned LdOpc = LoadInst->getOpcode();
414 unsigned StOpc = StoreInst->getOpcode();
415 switch (LdOpc) {
416 default:
417 llvm_unreachable("Unsupported load instruction!");
418 case AArch64::LDRBBui:
419 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
420 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
421 case AArch64::LDURBBi:
422 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
423 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
424 case AArch64::LDRHHui:
425 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
426 StOpc == AArch64::STRXui;
427 case AArch64::LDURHHi:
428 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
429 StOpc == AArch64::STURXi;
430 case AArch64::LDRWui:
431 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
432 case AArch64::LDURWi:
433 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
434 case AArch64::LDRXui:
435 return StOpc == AArch64::STRXui;
436 case AArch64::LDURXi:
437 return StOpc == AArch64::STURXi;
438 }
439}
440
Tim Northover3b0846e2014-05-24 12:50:23 +0000441static unsigned getPreIndexedOpcode(unsigned Opc) {
442 switch (Opc) {
443 default:
444 llvm_unreachable("Opcode has no pre-indexed equivalent!");
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000445 case AArch64::STRSui:
446 return AArch64::STRSpre;
447 case AArch64::STRDui:
448 return AArch64::STRDpre;
449 case AArch64::STRQui:
450 return AArch64::STRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000451 case AArch64::STRBBui:
452 return AArch64::STRBBpre;
453 case AArch64::STRHHui:
454 return AArch64::STRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000455 case AArch64::STRWui:
456 return AArch64::STRWpre;
457 case AArch64::STRXui:
458 return AArch64::STRXpre;
459 case AArch64::LDRSui:
460 return AArch64::LDRSpre;
461 case AArch64::LDRDui:
462 return AArch64::LDRDpre;
463 case AArch64::LDRQui:
464 return AArch64::LDRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000465 case AArch64::LDRBBui:
466 return AArch64::LDRBBpre;
467 case AArch64::LDRHHui:
468 return AArch64::LDRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000469 case AArch64::LDRWui:
470 return AArch64::LDRWpre;
471 case AArch64::LDRXui:
472 return AArch64::LDRXpre;
Quentin Colombet29f55332015-01-24 01:25:54 +0000473 case AArch64::LDRSWui:
474 return AArch64::LDRSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000475 case AArch64::LDPSi:
476 return AArch64::LDPSpre;
Chad Rosier43150122015-09-29 20:39:55 +0000477 case AArch64::LDPSWi:
478 return AArch64::LDPSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000479 case AArch64::LDPDi:
480 return AArch64::LDPDpre;
481 case AArch64::LDPQi:
482 return AArch64::LDPQpre;
483 case AArch64::LDPWi:
484 return AArch64::LDPWpre;
485 case AArch64::LDPXi:
486 return AArch64::LDPXpre;
487 case AArch64::STPSi:
488 return AArch64::STPSpre;
489 case AArch64::STPDi:
490 return AArch64::STPDpre;
491 case AArch64::STPQi:
492 return AArch64::STPQpre;
493 case AArch64::STPWi:
494 return AArch64::STPWpre;
495 case AArch64::STPXi:
496 return AArch64::STPXpre;
Tim Northover3b0846e2014-05-24 12:50:23 +0000497 }
498}
499
500static unsigned getPostIndexedOpcode(unsigned Opc) {
501 switch (Opc) {
502 default:
503 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
504 case AArch64::STRSui:
505 return AArch64::STRSpost;
506 case AArch64::STRDui:
507 return AArch64::STRDpost;
508 case AArch64::STRQui:
509 return AArch64::STRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000510 case AArch64::STRBBui:
511 return AArch64::STRBBpost;
512 case AArch64::STRHHui:
513 return AArch64::STRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000514 case AArch64::STRWui:
515 return AArch64::STRWpost;
516 case AArch64::STRXui:
517 return AArch64::STRXpost;
518 case AArch64::LDRSui:
519 return AArch64::LDRSpost;
520 case AArch64::LDRDui:
521 return AArch64::LDRDpost;
522 case AArch64::LDRQui:
523 return AArch64::LDRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000524 case AArch64::LDRBBui:
525 return AArch64::LDRBBpost;
526 case AArch64::LDRHHui:
527 return AArch64::LDRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000528 case AArch64::LDRWui:
529 return AArch64::LDRWpost;
530 case AArch64::LDRXui:
531 return AArch64::LDRXpost;
Quentin Colombet29f55332015-01-24 01:25:54 +0000532 case AArch64::LDRSWui:
533 return AArch64::LDRSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000534 case AArch64::LDPSi:
535 return AArch64::LDPSpost;
Chad Rosier43150122015-09-29 20:39:55 +0000536 case AArch64::LDPSWi:
537 return AArch64::LDPSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000538 case AArch64::LDPDi:
539 return AArch64::LDPDpost;
540 case AArch64::LDPQi:
541 return AArch64::LDPQpost;
542 case AArch64::LDPWi:
543 return AArch64::LDPWpost;
544 case AArch64::LDPXi:
545 return AArch64::LDPXpost;
546 case AArch64::STPSi:
547 return AArch64::STPSpost;
548 case AArch64::STPDi:
549 return AArch64::STPDpost;
550 case AArch64::STPQi:
551 return AArch64::STPQpost;
552 case AArch64::STPWi:
553 return AArch64::STPWpost;
554 case AArch64::STPXi:
555 return AArch64::STPXpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000556 }
557}
558
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000559static bool isPairedLdSt(const MachineInstr *MI) {
560 switch (MI->getOpcode()) {
561 default:
562 return false;
563 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000564 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000565 case AArch64::LDPDi:
566 case AArch64::LDPQi:
567 case AArch64::LDPWi:
568 case AArch64::LDPXi:
569 case AArch64::STPSi:
570 case AArch64::STPDi:
571 case AArch64::STPQi:
572 case AArch64::STPWi:
573 case AArch64::STPXi:
574 return true;
575 }
576}
577
578static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
579 unsigned PairedRegOp = 0) {
580 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
581 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
582 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000583}
584
585static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000586 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
587 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000588}
589
590static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000591 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
592 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000593}
594
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000595static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
Chad Rosiere4e15ba2016-03-09 17:29:48 +0000596 MachineInstr *StoreInst,
597 const AArch64InstrInfo *TII) {
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000598 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
599 int LoadSize = getMemScale(LoadInst);
600 int StoreSize = getMemScale(StoreInst);
Chad Rosiere4e15ba2016-03-09 17:29:48 +0000601 int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000602 ? getLdStOffsetOp(StoreInst).getImm()
603 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
Chad Rosiere4e15ba2016-03-09 17:29:48 +0000604 int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000605 ? getLdStOffsetOp(LoadInst).getImm()
606 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
607 return (UnscaledStOffset <= UnscaledLdOffset) &&
608 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
609}
610
Jun Bum Lim397eb7b2016-02-12 15:25:39 +0000611static bool isPromotableZeroStoreOpcode(MachineInstr *MI) {
612 unsigned Opc = MI->getOpcode();
613 return isNarrowStore(Opc) || Opc == AArch64::STRWui || Opc == AArch64::STURWi;
614}
615
616static bool isPromotableZeroStoreInst(MachineInstr *MI) {
617 return (isPromotableZeroStoreOpcode(MI)) &&
618 getLdStRegOp(MI).getReg() == AArch64::WZR;
619}
620
Tim Northover3b0846e2014-05-24 12:50:23 +0000621MachineBasicBlock::iterator
Chad Rosierb5933d72016-02-09 19:02:12 +0000622AArch64LoadStoreOpt::mergeNarrowInsns(MachineBasicBlock::iterator I,
Chad Rosierd7363db2016-02-09 19:09:22 +0000623 MachineBasicBlock::iterator MergeMI,
Chad Rosier96a18a92015-07-21 17:42:04 +0000624 const LdStPairFlags &Flags) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000625 MachineBasicBlock::iterator NextI = I;
626 ++NextI;
627 // If NextI is the second of the two instructions to be merged, we need
628 // to skip one further. Either way we merge will invalidate the iterator,
629 // and we don't need to scan the new instruction, as it's a pairwise
630 // instruction, which we're not considering for further action anyway.
Chad Rosierd7363db2016-02-09 19:09:22 +0000631 if (NextI == MergeMI)
Tim Northover3b0846e2014-05-24 12:50:23 +0000632 ++NextI;
633
Chad Rosierb5933d72016-02-09 19:02:12 +0000634 unsigned Opc = I->getOpcode();
Chad Rosiere4e15ba2016-03-09 17:29:48 +0000635 bool IsScaled = !TII->isUnscaledLdSt(Opc);
Chad Rosier11eedc92016-02-09 19:17:18 +0000636 int OffsetStride = IsScaled ? 1 : getMemScale(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000637
Chad Rosier96a18a92015-07-21 17:42:04 +0000638 bool MergeForward = Flags.getMergeForward();
Tim Northover3b0846e2014-05-24 12:50:23 +0000639 // Insert our new paired instruction after whichever of the paired
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000640 // instructions MergeForward indicates.
Chad Rosierd7363db2016-02-09 19:09:22 +0000641 MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000642 // Also based on MergeForward is from where we copy the base register operand
Tim Northover3b0846e2014-05-24 12:50:23 +0000643 // so we get the flags compatible with the input code.
Chad Rosierf77e9092015-08-06 15:50:12 +0000644 const MachineOperand &BaseRegOp =
Chad Rosierd7363db2016-02-09 19:09:22 +0000645 MergeForward ? getLdStBaseOp(MergeMI) : getLdStBaseOp(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000646
647 // Which register is Rt and which is Rt2 depends on the offset order.
648 MachineInstr *RtMI, *Rt2MI;
Renato Golin6274e522016-02-05 12:14:30 +0000649 if (getLdStOffsetOp(I).getImm() ==
Chad Rosierd7363db2016-02-09 19:09:22 +0000650 getLdStOffsetOp(MergeMI).getImm() + OffsetStride) {
651 RtMI = MergeMI;
Tim Northover3b0846e2014-05-24 12:50:23 +0000652 Rt2MI = I;
653 } else {
654 RtMI = I;
Chad Rosierd7363db2016-02-09 19:09:22 +0000655 Rt2MI = MergeMI;
Tim Northover3b0846e2014-05-24 12:50:23 +0000656 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000657
James Molloy5b18b4c2015-10-23 10:41:38 +0000658 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
Chad Rosier11eedc92016-02-09 19:17:18 +0000659 // Change the scaled offset from small to large type.
660 if (IsScaled) {
661 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
662 OffsetImm /= 2;
663 }
664
Chad Rosierc46ef882016-02-09 19:33:42 +0000665 DebugLoc DL = I->getDebugLoc();
666 MachineBasicBlock *MBB = I->getParent();
Jun Bum Limc12c2792015-11-19 18:41:27 +0000667 if (isNarrowLoad(Opc)) {
Chad Rosierd7363db2016-02-09 19:09:22 +0000668 MachineInstr *RtNewDest = MergeForward ? I : MergeMI;
Oliver Stannardd414c992015-11-10 11:04:18 +0000669 // When merging small (< 32 bit) loads for big-endian targets, the order of
670 // the component parts gets swapped.
671 if (!Subtarget->isLittleEndian())
672 std::swap(RtMI, Rt2MI);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000673 // Construct the new load instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000674 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
Chad Rosierc46ef882016-02-09 19:33:42 +0000675 NewMemMI =
676 BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
677 .addOperand(getLdStRegOp(RtNewDest))
678 .addOperand(BaseRegOp)
679 .addImm(OffsetImm)
680 .setMemRefs(I->mergeMemRefsWith(*MergeMI));
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000681
682 DEBUG(
683 dbgs()
684 << "Creating the new load and extract. Replacing instructions:\n ");
685 DEBUG(I->print(dbgs()));
686 DEBUG(dbgs() << " ");
Chad Rosierd7363db2016-02-09 19:09:22 +0000687 DEBUG(MergeMI->print(dbgs()));
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000688 DEBUG(dbgs() << " with instructions:\n ");
689 DEBUG((NewMemMI)->print(dbgs()));
690
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000691 int Width = getMemScale(I) == 1 ? 8 : 16;
692 int LSBLow = 0;
693 int LSBHigh = Width;
694 int ImmsLow = LSBLow + Width - 1;
695 int ImmsHigh = LSBHigh + Width - 1;
Chad Rosierd7363db2016-02-09 19:09:22 +0000696 MachineInstr *ExtDestMI = MergeForward ? MergeMI : I;
Oliver Stannardd414c992015-11-10 11:04:18 +0000697 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000698 // Create the bitfield extract for high bits.
Chad Rosierc46ef882016-02-09 19:33:42 +0000699 BitExtMI1 =
700 BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(Rt2MI)))
701 .addOperand(getLdStRegOp(Rt2MI))
702 .addReg(getLdStRegOp(RtNewDest).getReg())
703 .addImm(LSBHigh)
704 .addImm(ImmsHigh);
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000705 // Create the bitfield extract for low bits.
706 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
707 // For unsigned, prefer to use AND for low bits.
Chad Rosierc46ef882016-02-09 19:33:42 +0000708 BitExtMI2 = BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::ANDWri))
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000709 .addOperand(getLdStRegOp(RtMI))
710 .addReg(getLdStRegOp(RtNewDest).getReg())
711 .addImm(ImmsLow);
712 } else {
Chad Rosierc46ef882016-02-09 19:33:42 +0000713 BitExtMI2 =
714 BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(RtMI)))
715 .addOperand(getLdStRegOp(RtMI))
716 .addReg(getLdStRegOp(RtNewDest).getReg())
717 .addImm(LSBLow)
718 .addImm(ImmsLow);
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000719 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000720 } else {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000721 // Create the bitfield extract for low bits.
722 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
723 // For unsigned, prefer to use AND for low bits.
Chad Rosierc46ef882016-02-09 19:33:42 +0000724 BitExtMI1 = BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::ANDWri))
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000725 .addOperand(getLdStRegOp(RtMI))
726 .addReg(getLdStRegOp(RtNewDest).getReg())
727 .addImm(ImmsLow);
728 } else {
Chad Rosierc46ef882016-02-09 19:33:42 +0000729 BitExtMI1 =
730 BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(RtMI)))
731 .addOperand(getLdStRegOp(RtMI))
732 .addReg(getLdStRegOp(RtNewDest).getReg())
733 .addImm(LSBLow)
734 .addImm(ImmsLow);
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000735 }
736
737 // Create the bitfield extract for high bits.
Chad Rosierc46ef882016-02-09 19:33:42 +0000738 BitExtMI2 =
739 BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(Rt2MI)))
740 .addOperand(getLdStRegOp(Rt2MI))
741 .addReg(getLdStRegOp(RtNewDest).getReg())
742 .addImm(LSBHigh)
743 .addImm(ImmsHigh);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000744 }
745 DEBUG(dbgs() << " ");
746 DEBUG((BitExtMI1)->print(dbgs()));
747 DEBUG(dbgs() << " ");
748 DEBUG((BitExtMI2)->print(dbgs()));
749 DEBUG(dbgs() << "\n");
750
751 // Erase the old instructions.
752 I->eraseFromParent();
Chad Rosierd7363db2016-02-09 19:09:22 +0000753 MergeMI->eraseFromParent();
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000754 return NextI;
755 }
Jun Bum Lim397eb7b2016-02-12 15:25:39 +0000756 assert(isPromotableZeroStoreInst(I) && "Expected promotable zero store");
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000757
Tim Northover3b0846e2014-05-24 12:50:23 +0000758 // Construct the new instruction.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000759 MachineInstrBuilder MIB;
Chad Rosierc46ef882016-02-09 19:33:42 +0000760 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
Jun Bum Lim397eb7b2016-02-12 15:25:39 +0000761 .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
Chad Rosierb5933d72016-02-09 19:02:12 +0000762 .addOperand(BaseRegOp)
763 .addImm(OffsetImm)
Chad Rosierd7363db2016-02-09 19:09:22 +0000764 .setMemRefs(I->mergeMemRefsWith(*MergeMI));
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000765
Tim Northover3b0846e2014-05-24 12:50:23 +0000766 (void)MIB;
767
Chad Rosierb5933d72016-02-09 19:02:12 +0000768 DEBUG(dbgs() << "Creating wider load/store. Replacing instructions:\n ");
769 DEBUG(I->print(dbgs()));
770 DEBUG(dbgs() << " ");
Chad Rosierd7363db2016-02-09 19:09:22 +0000771 DEBUG(MergeMI->print(dbgs()));
Chad Rosierb5933d72016-02-09 19:02:12 +0000772 DEBUG(dbgs() << " with instruction:\n ");
773 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
774 DEBUG(dbgs() << "\n");
775
776 // Erase the old instructions.
777 I->eraseFromParent();
Chad Rosierd7363db2016-02-09 19:09:22 +0000778 MergeMI->eraseFromParent();
Chad Rosierb5933d72016-02-09 19:02:12 +0000779 return NextI;
780}
781
782MachineBasicBlock::iterator
783AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
784 MachineBasicBlock::iterator Paired,
785 const LdStPairFlags &Flags) {
786 MachineBasicBlock::iterator NextI = I;
787 ++NextI;
788 // If NextI is the second of the two instructions to be merged, we need
789 // to skip one further. Either way we merge will invalidate the iterator,
790 // and we don't need to scan the new instruction, as it's a pairwise
791 // instruction, which we're not considering for further action anyway.
792 if (NextI == Paired)
793 ++NextI;
794
795 int SExtIdx = Flags.getSExtIdx();
796 unsigned Opc =
797 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
Chad Rosiere4e15ba2016-03-09 17:29:48 +0000798 bool IsUnscaled = TII->isUnscaledLdSt(Opc);
Chad Rosierb5933d72016-02-09 19:02:12 +0000799 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
800
801 bool MergeForward = Flags.getMergeForward();
802 // Insert our new paired instruction after whichever of the paired
803 // instructions MergeForward indicates.
804 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
805 // Also based on MergeForward is from where we copy the base register operand
806 // so we get the flags compatible with the input code.
807 const MachineOperand &BaseRegOp =
808 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
809
Chad Rosier00f9d232016-02-11 14:25:08 +0000810 int Offset = getLdStOffsetOp(I).getImm();
811 int PairedOffset = getLdStOffsetOp(Paired).getImm();
Chad Rosiere4e15ba2016-03-09 17:29:48 +0000812 bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode());
Chad Rosier00f9d232016-02-11 14:25:08 +0000813 if (IsUnscaled != PairedIsUnscaled) {
814 // We're trying to pair instructions that differ in how they are scaled. If
815 // I is scaled then scale the offset of Paired accordingly. Otherwise, do
816 // the opposite (i.e., make Paired's offset unscaled).
817 int MemSize = getMemScale(Paired);
818 if (PairedIsUnscaled) {
819 // If the unscaled offset isn't a multiple of the MemSize, we can't
820 // pair the operations together.
821 assert(!(PairedOffset % getMemScale(Paired)) &&
822 "Offset should be a multiple of the stride!");
823 PairedOffset /= MemSize;
824 } else {
825 PairedOffset *= MemSize;
826 }
827 }
828
Chad Rosierb5933d72016-02-09 19:02:12 +0000829 // Which register is Rt and which is Rt2 depends on the offset order.
830 MachineInstr *RtMI, *Rt2MI;
Chad Rosier00f9d232016-02-11 14:25:08 +0000831 if (Offset == PairedOffset + OffsetStride) {
Chad Rosierb5933d72016-02-09 19:02:12 +0000832 RtMI = Paired;
833 Rt2MI = I;
834 // Here we swapped the assumption made for SExtIdx.
835 // I.e., we turn ldp I, Paired into ldp Paired, I.
836 // Update the index accordingly.
837 if (SExtIdx != -1)
838 SExtIdx = (SExtIdx + 1) % 2;
839 } else {
840 RtMI = I;
841 Rt2MI = Paired;
842 }
843 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
Chad Rosier00f9d232016-02-11 14:25:08 +0000844 // Scale the immediate offset, if necessary.
Chad Rosiere4e15ba2016-03-09 17:29:48 +0000845 if (TII->isUnscaledLdSt(RtMI->getOpcode())) {
Chad Rosier00f9d232016-02-11 14:25:08 +0000846 assert(!(OffsetImm % getMemScale(RtMI)) &&
847 "Unscaled offset cannot be scaled.");
848 OffsetImm /= getMemScale(RtMI);
Chad Rosier87e33412016-02-09 20:18:07 +0000849 }
Chad Rosierb5933d72016-02-09 19:02:12 +0000850
851 // Construct the new instruction.
852 MachineInstrBuilder MIB;
Chad Rosierc46ef882016-02-09 19:33:42 +0000853 DebugLoc DL = I->getDebugLoc();
854 MachineBasicBlock *MBB = I->getParent();
855 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc)))
Chad Rosierb5933d72016-02-09 19:02:12 +0000856 .addOperand(getLdStRegOp(RtMI))
857 .addOperand(getLdStRegOp(Rt2MI))
858 .addOperand(BaseRegOp)
Chad Rosiere40b9512016-03-08 17:16:38 +0000859 .addImm(OffsetImm)
860 .setMemRefs(I->mergeMemRefsWith(*Paired));
Chad Rosierb5933d72016-02-09 19:02:12 +0000861
862 (void)MIB;
Tim Northover3b0846e2014-05-24 12:50:23 +0000863
864 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
865 DEBUG(I->print(dbgs()));
866 DEBUG(dbgs() << " ");
867 DEBUG(Paired->print(dbgs()));
868 DEBUG(dbgs() << " with instruction:\n ");
Quentin Colombet66b61632015-03-06 22:42:10 +0000869 if (SExtIdx != -1) {
870 // Generate the sign extension for the proper result of the ldp.
871 // I.e., with X1, that would be:
872 // %W1<def> = KILL %W1, %X1<imp-def>
873 // %X1<def> = SBFMXri %X1<kill>, 0, 31
874 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
875 // Right now, DstMO has the extended register, since it comes from an
876 // extended opcode.
877 unsigned DstRegX = DstMO.getReg();
878 // Get the W variant of that register.
879 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
880 // Update the result of LDP to use the W instead of the X variant.
881 DstMO.setReg(DstRegW);
882 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
883 DEBUG(dbgs() << "\n");
884 // Make the machine verifier happy by providing a definition for
885 // the X register.
886 // Insert this definition right after the generated LDP, i.e., before
887 // InsertionPoint.
888 MachineInstrBuilder MIBKill =
Chad Rosierc46ef882016-02-09 19:33:42 +0000889 BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
Quentin Colombet66b61632015-03-06 22:42:10 +0000890 .addReg(DstRegW)
891 .addReg(DstRegX, RegState::Define);
892 MIBKill->getOperand(2).setImplicit();
893 // Create the sign extension.
894 MachineInstrBuilder MIBSXTW =
Chad Rosierc46ef882016-02-09 19:33:42 +0000895 BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
Quentin Colombet66b61632015-03-06 22:42:10 +0000896 .addReg(DstRegX)
897 .addImm(0)
898 .addImm(31);
899 (void)MIBSXTW;
900 DEBUG(dbgs() << " Extend operand:\n ");
901 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
Quentin Colombet66b61632015-03-06 22:42:10 +0000902 } else {
903 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
Quentin Colombet66b61632015-03-06 22:42:10 +0000904 }
Chad Rosier1c44c5982016-02-09 20:27:45 +0000905 DEBUG(dbgs() << "\n");
Tim Northover3b0846e2014-05-24 12:50:23 +0000906
907 // Erase the old instructions.
908 I->eraseFromParent();
909 Paired->eraseFromParent();
910
911 return NextI;
912}
913
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000914MachineBasicBlock::iterator
915AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
916 MachineBasicBlock::iterator StoreI) {
917 MachineBasicBlock::iterator NextI = LoadI;
918 ++NextI;
919
920 int LoadSize = getMemScale(LoadI);
921 int StoreSize = getMemScale(StoreI);
922 unsigned LdRt = getLdStRegOp(LoadI).getReg();
923 unsigned StRt = getLdStRegOp(StoreI).getReg();
924 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
925
926 assert((IsStoreXReg ||
927 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
928 "Unexpected RegClass");
929
930 MachineInstr *BitExtMI;
931 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
932 // Remove the load, if the destination register of the loads is the same
933 // register for stored value.
934 if (StRt == LdRt && LoadSize == 8) {
935 DEBUG(dbgs() << "Remove load instruction:\n ");
936 DEBUG(LoadI->print(dbgs()));
937 DEBUG(dbgs() << "\n");
938 LoadI->eraseFromParent();
939 return NextI;
940 }
941 // Replace the load with a mov if the load and store are in the same size.
942 BitExtMI =
943 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
944 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
945 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
946 .addReg(StRt)
947 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
948 } else {
949 // FIXME: Currently we disable this transformation in big-endian targets as
950 // performance and correctness are verified only in little-endian.
951 if (!Subtarget->isLittleEndian())
952 return NextI;
Chad Rosiere4e15ba2016-03-09 17:29:48 +0000953 bool IsUnscaled = TII->isUnscaledLdSt(LoadI);
954 assert(IsUnscaled == TII->isUnscaledLdSt(StoreI) &&
955 "Unsupported ld/st match");
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000956 assert(LoadSize <= StoreSize && "Invalid load size");
957 int UnscaledLdOffset = IsUnscaled
958 ? getLdStOffsetOp(LoadI).getImm()
959 : getLdStOffsetOp(LoadI).getImm() * LoadSize;
960 int UnscaledStOffset = IsUnscaled
961 ? getLdStOffsetOp(StoreI).getImm()
962 : getLdStOffsetOp(StoreI).getImm() * StoreSize;
963 int Width = LoadSize * 8;
964 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
965 int Imms = Immr + Width - 1;
966 unsigned DestReg = IsStoreXReg
967 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
968 &AArch64::GPR64RegClass)
969 : LdRt;
970
971 assert((UnscaledLdOffset >= UnscaledStOffset &&
972 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
973 "Invalid offset");
974
975 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
976 Imms = Immr + Width - 1;
977 if (UnscaledLdOffset == UnscaledStOffset) {
978 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
979 | ((Immr) << 6) // immr
980 | ((Imms) << 0) // imms
981 ;
982
983 BitExtMI =
984 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
985 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
986 DestReg)
987 .addReg(StRt)
988 .addImm(AndMaskEncoded);
989 } else {
990 BitExtMI =
991 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
992 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
993 DestReg)
994 .addReg(StRt)
995 .addImm(Immr)
996 .addImm(Imms);
997 }
998 }
999
1000 DEBUG(dbgs() << "Promoting load by replacing :\n ");
1001 DEBUG(StoreI->print(dbgs()));
1002 DEBUG(dbgs() << " ");
1003 DEBUG(LoadI->print(dbgs()));
1004 DEBUG(dbgs() << " with instructions:\n ");
1005 DEBUG(StoreI->print(dbgs()));
1006 DEBUG(dbgs() << " ");
1007 DEBUG((BitExtMI)->print(dbgs()));
1008 DEBUG(dbgs() << "\n");
1009
1010 // Erase the old instructions.
1011 LoadI->eraseFromParent();
1012 return NextI;
1013}
1014
Tim Northover3b0846e2014-05-24 12:50:23 +00001015/// trackRegDefsUses - Remember what registers the specified instruction uses
1016/// and modifies.
Pete Cooper7be8f8f2015-08-03 19:04:32 +00001017static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
Tim Northover3b0846e2014-05-24 12:50:23 +00001018 BitVector &UsedRegs,
1019 const TargetRegisterInfo *TRI) {
Pete Cooper7be8f8f2015-08-03 19:04:32 +00001020 for (const MachineOperand &MO : MI->operands()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001021 if (MO.isRegMask())
1022 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
1023
1024 if (!MO.isReg())
1025 continue;
1026 unsigned Reg = MO.getReg();
Geoff Berry173b14d2016-02-09 20:47:21 +00001027 if (!Reg)
1028 continue;
Tim Northover3b0846e2014-05-24 12:50:23 +00001029 if (MO.isDef()) {
1030 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1031 ModifiedRegs.set(*AI);
1032 } else {
1033 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
1034 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1035 UsedRegs.set(*AI);
1036 }
1037 }
1038}
1039
1040static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
Chad Rosier3dd0e942015-08-18 16:20:03 +00001041 // Convert the byte-offset used by unscaled into an "element" offset used
1042 // by the scaled pair load/store instructions.
Chad Rosier00f9d232016-02-11 14:25:08 +00001043 if (IsUnscaled) {
1044 // If the byte-offset isn't a multiple of the stride, there's no point
1045 // trying to match it.
1046 if (Offset % OffsetStride)
1047 return false;
Chad Rosier3dd0e942015-08-18 16:20:03 +00001048 Offset /= OffsetStride;
Chad Rosier00f9d232016-02-11 14:25:08 +00001049 }
Chad Rosier3dd0e942015-08-18 16:20:03 +00001050 return Offset <= 63 && Offset >= -64;
Tim Northover3b0846e2014-05-24 12:50:23 +00001051}
1052
1053// Do alignment, specialized to power of 2 and for signed ints,
1054// avoiding having to do a C-style cast from uint_64t to int when
Rui Ueyamada00f2f2016-01-14 21:06:47 +00001055// using alignTo from include/llvm/Support/MathExtras.h.
Tim Northover3b0846e2014-05-24 12:50:23 +00001056// FIXME: Move this function to include/MathExtras.h?
1057static int alignTo(int Num, int PowOf2) {
1058 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1059}
1060
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001061static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
1062 const AArch64InstrInfo *TII) {
1063 // One of the instructions must modify memory.
1064 if (!MIa->mayStore() && !MIb->mayStore())
1065 return false;
1066
1067 // Both instructions must be memory operations.
1068 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
1069 return false;
1070
1071 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
1072}
1073
1074static bool mayAlias(MachineInstr *MIa,
1075 SmallVectorImpl<MachineInstr *> &MemInsns,
1076 const AArch64InstrInfo *TII) {
1077 for (auto &MIb : MemInsns)
1078 if (mayAlias(MIa, MIb, TII))
1079 return true;
1080
1081 return false;
1082}
1083
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001084bool AArch64LoadStoreOpt::findMatchingStore(
1085 MachineBasicBlock::iterator I, unsigned Limit,
1086 MachineBasicBlock::iterator &StoreI) {
Jun Bum Lim633b2d82016-02-11 16:18:24 +00001087 MachineBasicBlock::iterator B = I->getParent()->begin();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001088 MachineBasicBlock::iterator MBBI = I;
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001089 MachineInstr *LoadMI = I;
1090 unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001091
Jun Bum Lim633b2d82016-02-11 16:18:24 +00001092 // If the load is the first instruction in the block, there's obviously
1093 // not any matching store.
1094 if (MBBI == B)
1095 return false;
1096
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001097 // Track which registers have been modified and used between the first insn
1098 // and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001099 ModifiedRegs.reset();
1100 UsedRegs.reset();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001101
Jun Bum Lim633b2d82016-02-11 16:18:24 +00001102 unsigned Count = 0;
1103 do {
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001104 --MBBI;
1105 MachineInstr *MI = MBBI;
Jun Bum Lim633b2d82016-02-11 16:18:24 +00001106
1107 // Don't count DBG_VALUE instructions towards the search limit.
1108 if (!MI->isDebugValue())
1109 ++Count;
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001110
1111 // If the load instruction reads directly from the address to which the
1112 // store instruction writes and the stored value is not modified, we can
1113 // promote the load. Since we do not handle stores with pre-/post-index,
1114 // it's unnecessary to check if BaseReg is modified by the store itself.
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001115 if (MI->mayStore() && isMatchingStore(LoadMI, MI) &&
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001116 BaseReg == getLdStBaseOp(MI).getReg() &&
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001117 isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001118 !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
1119 StoreI = MBBI;
1120 return true;
1121 }
1122
1123 if (MI->isCall())
1124 return false;
1125
1126 // Update modified / uses register lists.
1127 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1128
1129 // Otherwise, if the base register is modified, we have no match, so
1130 // return early.
1131 if (ModifiedRegs[BaseReg])
1132 return false;
1133
1134 // If we encounter a store aliased with the load, return early.
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001135 if (MI->mayStore() && mayAlias(LoadMI, MI, TII))
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001136 return false;
Jun Bum Lim633b2d82016-02-11 16:18:24 +00001137 } while (MBBI != B && Count < Limit);
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001138 return false;
1139}
1140
Chad Rosierc3f6cb92016-02-10 19:45:48 +00001141// Returns true if these two opcodes can be merged or paired. Otherwise,
1142// returns false.
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001143static bool canMergeOpc(unsigned OpcA, unsigned OpcB, LdStPairFlags &Flags,
1144 const AArch64InstrInfo *TII) {
Chad Rosierc3f6cb92016-02-10 19:45:48 +00001145 // Opcodes match: nothing more to check.
1146 if (OpcA == OpcB)
1147 return true;
1148
1149 // Try to match a sign-extended load/store with a zero-extended load/store.
1150 bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1151 unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
1152 assert(IsValidLdStrOpc &&
1153 "Given Opc should be a Load or Store with an immediate");
1154 // OpcA will be the first instruction in the pair.
1155 if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
1156 Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
1157 return true;
1158 }
Chad Rosier00f9d232016-02-11 14:25:08 +00001159
1160 // If the second instruction isn't even a load/store, bail out.
1161 if (!PairIsValidLdStrOpc)
1162 return false;
1163
1164 // FIXME: We don't support merging narrow loads/stores with mixed
1165 // scaled/unscaled offsets.
1166 if (isNarrowLoadOrStore(OpcA) || isNarrowLoadOrStore(OpcB))
1167 return false;
1168
1169 // Try to match an unscaled load/store with a scaled load/store.
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001170 return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) &&
Chad Rosier00f9d232016-02-11 14:25:08 +00001171 getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
1172
1173 // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
Chad Rosierc3f6cb92016-02-10 19:45:48 +00001174}
1175
Chad Rosier9f4ec2e2016-02-10 18:49:28 +00001176/// Scan the instructions looking for a load/store that can be combined with the
1177/// current instruction into a wider equivalent or a load/store pair.
Tim Northover3b0846e2014-05-24 12:50:23 +00001178MachineBasicBlock::iterator
1179AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001180 LdStPairFlags &Flags, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001181 MachineBasicBlock::iterator E = I->getParent()->end();
1182 MachineBasicBlock::iterator MBBI = I;
1183 MachineInstr *FirstMI = I;
1184 ++MBBI;
1185
Matthias Braunfa3872e2015-05-18 20:27:55 +00001186 unsigned Opc = FirstMI->getOpcode();
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +00001187 bool MayLoad = FirstMI->mayLoad();
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001188 bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
Chad Rosierf77e9092015-08-06 15:50:12 +00001189 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1190 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1191 int Offset = getLdStOffsetOp(FirstMI).getImm();
Chad Rosierf11d0402015-10-01 18:17:12 +00001192 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
Jun Bum Lim397eb7b2016-02-12 15:25:39 +00001193 bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001194
1195 // Track which registers have been modified and used between the first insn
1196 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001197 ModifiedRegs.reset();
1198 UsedRegs.reset();
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001199
1200 // Remember any instructions that read/write memory between FirstMI and MI.
1201 SmallVector<MachineInstr *, 4> MemInsns;
1202
Tim Northover3b0846e2014-05-24 12:50:23 +00001203 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1204 MachineInstr *MI = MBBI;
1205 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1206 // optimization by changing how far we scan.
1207 if (MI->isDebugValue())
1208 continue;
1209
1210 // Now that we know this is a real instruction, count it.
1211 ++Count;
1212
Chad Rosier18896c02016-02-04 16:01:40 +00001213 Flags.setSExtIdx(-1);
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001214 if (canMergeOpc(Opc, MI->getOpcode(), Flags, TII) &&
Chad Rosierc3f6cb92016-02-10 19:45:48 +00001215 getLdStOffsetOp(MI).isImm()) {
Chad Rosierc56a9132015-08-10 18:42:45 +00001216 assert(MI->mayLoadOrStore() && "Expected memory operation.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001217 // If we've found another instruction with the same opcode, check to see
1218 // if the base and offset are compatible with our starting instruction.
1219 // These instructions all have scaled immediate operands, so we just
1220 // check for +1/-1. Make sure to check the new instruction offset is
1221 // actually an immediate and not a symbolic reference destined for
1222 // a relocation.
1223 //
1224 // Pairwise instructions have a 7-bit signed offset field. Single insns
1225 // have a 12-bit unsigned offset field. To be a valid combine, the
1226 // final offset must be in range.
Chad Rosierf77e9092015-08-06 15:50:12 +00001227 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1228 int MIOffset = getLdStOffsetOp(MI).getImm();
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001229 bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
Chad Rosier00f9d232016-02-11 14:25:08 +00001230 if (IsUnscaled != MIIsUnscaled) {
1231 // We're trying to pair instructions that differ in how they are scaled.
1232 // If FirstMI is scaled then scale the offset of MI accordingly.
1233 // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1234 int MemSize = getMemScale(MI);
1235 if (MIIsUnscaled) {
1236 // If the unscaled offset isn't a multiple of the MemSize, we can't
1237 // pair the operations together: bail and keep looking.
1238 if (MIOffset % MemSize)
1239 continue;
1240 MIOffset /= MemSize;
1241 } else {
1242 MIOffset *= MemSize;
1243 }
1244 }
1245
Tim Northover3b0846e2014-05-24 12:50:23 +00001246 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1247 (Offset + OffsetStride == MIOffset))) {
1248 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1249 // If this is a volatile load/store that otherwise matched, stop looking
1250 // as something is going on that we don't have enough information to
1251 // safely transform. Similarly, stop if we see a hint to avoid pairs.
1252 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1253 return E;
1254 // If the resultant immediate offset of merging these instructions
1255 // is out of range for a pairwise instruction, bail and keep looking.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001256 bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
1257 if (!IsNarrowLoad &&
Chad Rosier00f9d232016-02-11 14:25:08 +00001258 !inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001259 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001260 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001261 continue;
1262 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001263
Jun Bum Lim397eb7b2016-02-12 15:25:39 +00001264 if (IsNarrowLoad || IsPromotableZeroStore) {
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001265 // If the alignment requirements of the scaled wide load/store
1266 // instruction can't express the offset of the scaled narrow
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001267 // input, bail and keep looking.
1268 if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
1269 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1270 MemInsns.push_back(MI);
1271 continue;
1272 }
1273 } else {
1274 // If the alignment requirements of the paired (scaled) instruction
1275 // can't express the offset of the unscaled input, bail and keep
1276 // looking.
1277 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1278 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1279 MemInsns.push_back(MI);
1280 continue;
1281 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001282 }
1283 // If the destination register of the loads is the same register, bail
1284 // and keep looking. A load-pair instruction with both destination
1285 // registers the same is UNPREDICTABLE and will result in an exception.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001286 // For narrow stores, allow only when the stored value is the same
1287 // (i.e., WZR).
1288 if ((MayLoad && Reg == getLdStRegOp(MI).getReg()) ||
Jun Bum Lim397eb7b2016-02-12 15:25:39 +00001289 (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001290 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001291 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001292 continue;
1293 }
1294
1295 // If the Rt of the second instruction was not modified or used between
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001296 // the two instructions and none of the instructions between the second
1297 // and first alias with the second, we can combine the second into the
1298 // first.
Chad Rosierf77e9092015-08-06 15:50:12 +00001299 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1300 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001301 !mayAlias(MI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001302 Flags.setMergeForward(false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001303 return MBBI;
1304 }
1305
1306 // Likewise, if the Rt of the first instruction is not modified or used
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001307 // between the two instructions and none of the instructions between the
1308 // first and the second alias with the first, we can combine the first
1309 // into the second.
Chad Rosierf77e9092015-08-06 15:50:12 +00001310 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
Chad Rosier5f668e12015-09-03 14:19:43 +00001311 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001312 !mayAlias(FirstMI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001313 Flags.setMergeForward(true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001314 return MBBI;
1315 }
1316 // Unable to combine these instructions due to interference in between.
1317 // Keep looking.
1318 }
1319 }
1320
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001321 // If the instruction wasn't a matching load or store. Stop searching if we
1322 // encounter a call instruction that might modify memory.
1323 if (MI->isCall())
Tim Northover3b0846e2014-05-24 12:50:23 +00001324 return E;
1325
1326 // Update modified / uses register lists.
1327 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1328
1329 // Otherwise, if the base register is modified, we have no match, so
1330 // return early.
1331 if (ModifiedRegs[BaseReg])
1332 return E;
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001333
1334 // Update list of instructions that read/write memory.
1335 if (MI->mayLoadOrStore())
1336 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001337 }
1338 return E;
1339}
1340
1341MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +00001342AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1343 MachineBasicBlock::iterator Update,
1344 bool IsPreIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001345 assert((Update->getOpcode() == AArch64::ADDXri ||
1346 Update->getOpcode() == AArch64::SUBXri) &&
1347 "Unexpected base register update instruction to merge!");
1348 MachineBasicBlock::iterator NextI = I;
1349 // Return the instruction following the merged instruction, which is
1350 // the instruction following our unmerged load. Unless that's the add/sub
1351 // instruction we're merging, in which case it's the one after that.
1352 if (++NextI == Update)
1353 ++NextI;
1354
1355 int Value = Update->getOperand(2).getImm();
1356 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
Chad Rosier2dfd3542015-09-23 13:51:44 +00001357 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
Tim Northover3b0846e2014-05-24 12:50:23 +00001358 if (Update->getOpcode() == AArch64::SUBXri)
1359 Value = -Value;
1360
Chad Rosier2dfd3542015-09-23 13:51:44 +00001361 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1362 : getPostIndexedOpcode(I->getOpcode());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001363 MachineInstrBuilder MIB;
1364 if (!isPairedLdSt(I)) {
1365 // Non-paired instruction.
1366 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1367 .addOperand(getLdStRegOp(Update))
1368 .addOperand(getLdStRegOp(I))
1369 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001370 .addImm(Value)
1371 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001372 } else {
1373 // Paired instruction.
Chad Rosier32d4d372015-09-29 16:07:32 +00001374 int Scale = getMemScale(I);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001375 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1376 .addOperand(getLdStRegOp(Update))
1377 .addOperand(getLdStRegOp(I, 0))
1378 .addOperand(getLdStRegOp(I, 1))
1379 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001380 .addImm(Value / Scale)
1381 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001382 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001383 (void)MIB;
1384
Chad Rosier2dfd3542015-09-23 13:51:44 +00001385 if (IsPreIdx)
1386 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1387 else
1388 DEBUG(dbgs() << "Creating post-indexed load/store.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001389 DEBUG(dbgs() << " Replacing instructions:\n ");
1390 DEBUG(I->print(dbgs()));
1391 DEBUG(dbgs() << " ");
1392 DEBUG(Update->print(dbgs()));
1393 DEBUG(dbgs() << " with instruction:\n ");
1394 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1395 DEBUG(dbgs() << "\n");
1396
1397 // Erase the old instructions for the block.
1398 I->eraseFromParent();
1399 Update->eraseFromParent();
1400
1401 return NextI;
1402}
1403
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001404bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
1405 MachineInstr *MI,
1406 unsigned BaseReg, int Offset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001407 switch (MI->getOpcode()) {
1408 default:
1409 break;
1410 case AArch64::SUBXri:
1411 // Negate the offset for a SUB instruction.
1412 Offset *= -1;
1413 // FALLTHROUGH
1414 case AArch64::ADDXri:
1415 // Make sure it's a vanilla immediate operand, not a relocation or
1416 // anything else we can't handle.
1417 if (!MI->getOperand(2).isImm())
1418 break;
1419 // Watch out for 1 << 12 shifted value.
1420 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
1421 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001422
1423 // The update instruction source and destination register must be the
1424 // same as the load/store base register.
1425 if (MI->getOperand(0).getReg() != BaseReg ||
1426 MI->getOperand(1).getReg() != BaseReg)
1427 break;
1428
1429 bool IsPairedInsn = isPairedLdSt(MemMI);
1430 int UpdateOffset = MI->getOperand(2).getImm();
1431 // For non-paired load/store instructions, the immediate must fit in a
1432 // signed 9-bit integer.
1433 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1434 break;
1435
1436 // For paired load/store instructions, the immediate must be a multiple of
1437 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1438 // integer.
1439 if (IsPairedInsn) {
Chad Rosier32d4d372015-09-29 16:07:32 +00001440 int Scale = getMemScale(MemMI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001441 if (UpdateOffset % Scale != 0)
1442 break;
1443
1444 int ScaledOffset = UpdateOffset / Scale;
1445 if (ScaledOffset > 64 || ScaledOffset < -64)
1446 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001447 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001448
1449 // If we have a non-zero Offset, we check that it matches the amount
1450 // we're adding to the register.
1451 if (!Offset || Offset == MI->getOperand(2).getImm())
1452 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001453 break;
1454 }
1455 return false;
1456}
1457
1458MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001459 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001460 MachineBasicBlock::iterator E = I->getParent()->end();
1461 MachineInstr *MemMI = I;
1462 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001463
Chad Rosierf77e9092015-08-06 15:50:12 +00001464 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001465 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001466
Chad Rosierb7c5b912015-10-01 13:43:05 +00001467 // Scan forward looking for post-index opportunities. Updating instructions
1468 // can't be formed if the memory instruction doesn't have the offset we're
1469 // looking for.
1470 if (MIUnscaledOffset != UnscaledOffset)
1471 return E;
1472
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001473 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001474 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001475 bool IsPairedInsn = isPairedLdSt(MemMI);
1476 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1477 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1478 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1479 return E;
1480 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001481
Tim Northover3b0846e2014-05-24 12:50:23 +00001482 // Track which registers have been modified and used between the first insn
1483 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001484 ModifiedRegs.reset();
1485 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001486 ++MBBI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001487 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001488 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001489 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001490 if (MI->isDebugValue())
1491 continue;
1492
Chad Rosier35706ad2016-02-04 21:26:02 +00001493 // Now that we know this is a real instruction, count it.
1494 ++Count;
1495
Tim Northover3b0846e2014-05-24 12:50:23 +00001496 // If we found a match, return it.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001497 if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001498 return MBBI;
1499
1500 // Update the status of what the instruction clobbered and used.
1501 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1502
1503 // Otherwise, if the base register is used or modified, we have no match, so
1504 // return early.
1505 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1506 return E;
1507 }
1508 return E;
1509}
1510
1511MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001512 MachineBasicBlock::iterator I, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001513 MachineBasicBlock::iterator B = I->getParent()->begin();
1514 MachineBasicBlock::iterator E = I->getParent()->end();
1515 MachineInstr *MemMI = I;
1516 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001517
Chad Rosierf77e9092015-08-06 15:50:12 +00001518 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1519 int Offset = getLdStOffsetOp(MemMI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001520
1521 // If the load/store is the first instruction in the block, there's obviously
1522 // not any matching update. Ditto if the memory offset isn't zero.
1523 if (MBBI == B || Offset != 0)
1524 return E;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001525 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001526 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001527 bool IsPairedInsn = isPairedLdSt(MemMI);
1528 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1529 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1530 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1531 return E;
1532 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001533
1534 // Track which registers have been modified and used between the first insn
1535 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001536 ModifiedRegs.reset();
1537 UsedRegs.reset();
Geoff Berry173b14d2016-02-09 20:47:21 +00001538 unsigned Count = 0;
1539 do {
1540 --MBBI;
Tim Northover3b0846e2014-05-24 12:50:23 +00001541 MachineInstr *MI = MBBI;
Tim Northover3b0846e2014-05-24 12:50:23 +00001542
Geoff Berry173b14d2016-02-09 20:47:21 +00001543 // Don't count DBG_VALUE instructions towards the search limit.
1544 if (!MI->isDebugValue())
1545 ++Count;
Chad Rosier35706ad2016-02-04 21:26:02 +00001546
Tim Northover3b0846e2014-05-24 12:50:23 +00001547 // If we found a match, return it.
Chad Rosier11c825f2015-09-30 19:44:40 +00001548 if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001549 return MBBI;
1550
1551 // Update the status of what the instruction clobbered and used.
1552 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1553
1554 // Otherwise, if the base register is used or modified, we have no match, so
1555 // return early.
1556 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1557 return E;
Geoff Berry173b14d2016-02-09 20:47:21 +00001558 } while (MBBI != B && Count < Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001559 return E;
1560}
1561
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001562bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1563 MachineBasicBlock::iterator &MBBI) {
1564 MachineInstr *MI = MBBI;
1565 // If this is a volatile load, don't mess with it.
1566 if (MI->hasOrderedMemoryRef())
1567 return false;
1568
1569 // Make sure this is a reg+imm.
1570 // FIXME: It is possible to extend it to handle reg+reg cases.
1571 if (!getLdStOffsetOp(MI).isImm())
1572 return false;
1573
Chad Rosier35706ad2016-02-04 21:26:02 +00001574 // Look backward up to LdStLimit instructions.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001575 MachineBasicBlock::iterator StoreI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001576 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001577 ++NumLoadsFromStoresPromoted;
1578 // Promote the load. Keeping the iterator straight is a
1579 // pain, so we let the merge routine tell us what the next instruction
1580 // is after it's done mucking about.
1581 MBBI = promoteLoadFromStore(MBBI, StoreI);
1582 return true;
1583 }
1584 return false;
1585}
1586
Chad Rosier24c46ad2016-02-09 18:10:20 +00001587// Find narrow loads that can be converted into a single wider load with
1588// bitfield extract instructions. Also merge adjacent zero stores into a wider
1589// store.
1590bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1591 MachineBasicBlock::iterator &MBBI) {
Jun Bum Lim397eb7b2016-02-12 15:25:39 +00001592 assert((isNarrowLoad(MBBI) || isPromotableZeroStoreOpcode(MBBI)) &&
1593 "Expected narrow op.");
Chad Rosier24c46ad2016-02-09 18:10:20 +00001594 MachineInstr *MI = MBBI;
1595 MachineBasicBlock::iterator E = MI->getParent()->end();
1596
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001597 if (!TII->isCandidateToMergeOrPair(MI))
Chad Rosier24c46ad2016-02-09 18:10:20 +00001598 return false;
1599
Jun Bum Lim397eb7b2016-02-12 15:25:39 +00001600 // For promotable zero stores, the stored value should be WZR.
1601 if (isPromotableZeroStoreOpcode(MI) &&
1602 getLdStRegOp(MI).getReg() != AArch64::WZR)
Chad Rosierf7cd8ea2016-02-09 21:20:12 +00001603 return false;
1604
Chad Rosier24c46ad2016-02-09 18:10:20 +00001605 // Look ahead up to LdStLimit instructions for a mergable instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001606 LdStPairFlags Flags;
Jun Bum Lim397eb7b2016-02-12 15:25:39 +00001607 MachineBasicBlock::iterator MergeMI =
1608 findMatchingInsn(MBBI, Flags, LdStLimit);
Chad Rosierd7363db2016-02-09 19:09:22 +00001609 if (MergeMI != E) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001610 if (isNarrowLoad(MI)) {
1611 ++NumNarrowLoadsPromoted;
Jun Bum Lim397eb7b2016-02-12 15:25:39 +00001612 } else if (isPromotableZeroStoreInst(MI)) {
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001613 ++NumZeroStoresPromoted;
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001614 }
Chad Rosier24c46ad2016-02-09 18:10:20 +00001615 // Keeping the iterator straight is a pain, so we let the merge routine tell
1616 // us what the next instruction is after it's done mucking about.
Chad Rosierd7363db2016-02-09 19:09:22 +00001617 MBBI = mergeNarrowInsns(MBBI, MergeMI, Flags);
Chad Rosier24c46ad2016-02-09 18:10:20 +00001618 return true;
1619 }
1620 return false;
1621}
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001622
Chad Rosier24c46ad2016-02-09 18:10:20 +00001623// Find loads and stores that can be merged into a single load or store pair
1624// instruction.
1625bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
1626 MachineInstr *MI = MBBI;
1627 MachineBasicBlock::iterator E = MI->getParent()->end();
1628
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001629 if (!TII->isCandidateToMergeOrPair(MI))
Chad Rosier24c46ad2016-02-09 18:10:20 +00001630 return false;
1631
Chad Rosierfc3bf1f2016-02-10 15:52:46 +00001632 // Early exit if the offset is not possible to match. (6 bits of positive
1633 // range, plus allow an extra one in case we find a later insn that matches
1634 // with Offset-1)
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001635 bool IsUnscaled = TII->isUnscaledLdSt(MI);
Chad Rosierfc3bf1f2016-02-10 15:52:46 +00001636 int Offset = getLdStOffsetOp(MI).getImm();
1637 int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
1638 if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
1639 return false;
1640
Chad Rosier24c46ad2016-02-09 18:10:20 +00001641 // Look ahead up to LdStLimit instructions for a pairable instruction.
1642 LdStPairFlags Flags;
1643 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit);
1644 if (Paired != E) {
1645 ++NumPairCreated;
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001646 if (TII->isUnscaledLdSt(MI))
Chad Rosier24c46ad2016-02-09 18:10:20 +00001647 ++NumUnscaledPairCreated;
1648 // Keeping the iterator straight is a pain, so we let the merge routine tell
1649 // us what the next instruction is after it's done mucking about.
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001650 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1651 return true;
1652 }
1653 return false;
1654}
1655
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001656bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1657 bool enableNarrowLdOpt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001658 bool Modified = false;
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001659 // Four tranformations to do here:
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001660 // 1) Find loads that directly read from stores and promote them by
1661 // replacing with mov instructions. If the store is wider than the load,
1662 // the load will be replaced with a bitfield extract.
1663 // e.g.,
1664 // str w1, [x0, #4]
1665 // ldrh w2, [x0, #6]
1666 // ; becomes
1667 // str w1, [x0, #4]
1668 // lsr w2, w1, #16
Tim Northover3b0846e2014-05-24 12:50:23 +00001669 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001670 MBBI != E;) {
1671 MachineInstr *MI = MBBI;
1672 switch (MI->getOpcode()) {
1673 default:
1674 // Just move on to the next instruction.
1675 ++MBBI;
1676 break;
1677 // Scaled instructions.
1678 case AArch64::LDRBBui:
1679 case AArch64::LDRHHui:
1680 case AArch64::LDRWui:
1681 case AArch64::LDRXui:
1682 // Unscaled instructions.
1683 case AArch64::LDURBBi:
1684 case AArch64::LDURHHi:
1685 case AArch64::LDURWi:
1686 case AArch64::LDURXi: {
1687 if (tryToPromoteLoadFromStore(MBBI)) {
1688 Modified = true;
1689 break;
1690 }
1691 ++MBBI;
1692 break;
1693 }
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001694 }
1695 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001696 // 2) Find narrow loads that can be converted into a single wider load
1697 // with bitfield extract instructions.
1698 // e.g.,
1699 // ldrh w0, [x2]
1700 // ldrh w1, [x2, #2]
1701 // ; becomes
1702 // ldr w0, [x2]
1703 // ubfx w1, w0, #16, #16
1704 // and w0, w0, #ffff
Jun Bum Lim1de2d442016-02-05 20:02:03 +00001705 //
1706 // Also merge adjacent zero stores into a wider store.
1707 // e.g.,
1708 // strh wzr, [x0]
1709 // strh wzr, [x0, #2]
1710 // ; becomes
1711 // str wzr, [x0]
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001712 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001713 enableNarrowLdOpt && MBBI != E;) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001714 MachineInstr *MI = MBBI;
1715 switch (MI->getOpcode()) {
1716 default:
1717 // Just move on to the next instruction.
1718 ++MBBI;
1719 break;
1720 // Scaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001721 case AArch64::LDRBBui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001722 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001723 case AArch64::LDRSBWui:
1724 case AArch64::LDRSHWui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001725 case AArch64::STRBBui:
1726 case AArch64::STRHHui:
Jun Bum Lim397eb7b2016-02-12 15:25:39 +00001727 case AArch64::STRWui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001728 // Unscaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001729 case AArch64::LDURBBi:
1730 case AArch64::LDURHHi:
1731 case AArch64::LDURSBWi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001732 case AArch64::LDURSHWi:
1733 case AArch64::STURBBi:
Jun Bum Lim397eb7b2016-02-12 15:25:39 +00001734 case AArch64::STURHHi:
1735 case AArch64::STURWi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001736 if (tryToMergeLdStInst(MBBI)) {
1737 Modified = true;
1738 break;
1739 }
1740 ++MBBI;
1741 break;
1742 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001743 }
1744 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001745 // 3) Find loads and stores that can be merged into a single load or store
1746 // pair instruction.
1747 // e.g.,
1748 // ldr x0, [x2]
1749 // ldr x1, [x2, #8]
1750 // ; becomes
1751 // ldp x0, x1, [x2]
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001752 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Tim Northover3b0846e2014-05-24 12:50:23 +00001753 MBBI != E;) {
1754 MachineInstr *MI = MBBI;
1755 switch (MI->getOpcode()) {
1756 default:
1757 // Just move on to the next instruction.
1758 ++MBBI;
1759 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001760 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001761 case AArch64::STRSui:
1762 case AArch64::STRDui:
1763 case AArch64::STRQui:
1764 case AArch64::STRXui:
1765 case AArch64::STRWui:
1766 case AArch64::LDRSui:
1767 case AArch64::LDRDui:
1768 case AArch64::LDRQui:
1769 case AArch64::LDRXui:
1770 case AArch64::LDRWui:
Quentin Colombet29f55332015-01-24 01:25:54 +00001771 case AArch64::LDRSWui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001772 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001773 case AArch64::STURSi:
1774 case AArch64::STURDi:
1775 case AArch64::STURQi:
1776 case AArch64::STURWi:
1777 case AArch64::STURXi:
1778 case AArch64::LDURSi:
1779 case AArch64::LDURDi:
1780 case AArch64::LDURQi:
1781 case AArch64::LDURWi:
Quentin Colombet29f55332015-01-24 01:25:54 +00001782 case AArch64::LDURXi:
1783 case AArch64::LDURSWi: {
Chad Rosier24c46ad2016-02-09 18:10:20 +00001784 if (tryToPairLdStInst(MBBI)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001785 Modified = true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001786 break;
1787 }
1788 ++MBBI;
1789 break;
1790 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001791 }
1792 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001793 // 4) Find base register updates that can be merged into the load or store
1794 // as a base-reg writeback.
1795 // e.g.,
1796 // ldr x0, [x2]
1797 // add x2, x2, #4
1798 // ; becomes
1799 // ldr x0, [x2], #4
Tim Northover3b0846e2014-05-24 12:50:23 +00001800 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1801 MBBI != E;) {
1802 MachineInstr *MI = MBBI;
1803 // Do update merging. It's simpler to keep this separate from the above
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001804 // switchs, though not strictly necessary.
Matthias Braunfa3872e2015-05-18 20:27:55 +00001805 unsigned Opc = MI->getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +00001806 switch (Opc) {
1807 default:
1808 // Just move on to the next instruction.
1809 ++MBBI;
1810 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001811 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001812 case AArch64::STRSui:
1813 case AArch64::STRDui:
1814 case AArch64::STRQui:
1815 case AArch64::STRXui:
1816 case AArch64::STRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001817 case AArch64::STRHHui:
1818 case AArch64::STRBBui:
Tim Northover3b0846e2014-05-24 12:50:23 +00001819 case AArch64::LDRSui:
1820 case AArch64::LDRDui:
1821 case AArch64::LDRQui:
1822 case AArch64::LDRXui:
1823 case AArch64::LDRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001824 case AArch64::LDRHHui:
1825 case AArch64::LDRBBui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001826 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001827 case AArch64::STURSi:
1828 case AArch64::STURDi:
1829 case AArch64::STURQi:
1830 case AArch64::STURWi:
1831 case AArch64::STURXi:
1832 case AArch64::LDURSi:
1833 case AArch64::LDURDi:
1834 case AArch64::LDURQi:
1835 case AArch64::LDURWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001836 case AArch64::LDURXi:
1837 // Paired instructions.
1838 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +00001839 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001840 case AArch64::LDPDi:
1841 case AArch64::LDPQi:
1842 case AArch64::LDPWi:
1843 case AArch64::LDPXi:
1844 case AArch64::STPSi:
1845 case AArch64::STPDi:
1846 case AArch64::STPQi:
1847 case AArch64::STPWi:
1848 case AArch64::STPXi: {
Tim Northover3b0846e2014-05-24 12:50:23 +00001849 // Make sure this is a reg+imm (as opposed to an address reloc).
Chad Rosierf77e9092015-08-06 15:50:12 +00001850 if (!getLdStOffsetOp(MI).isImm()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001851 ++MBBI;
1852 break;
1853 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001854 // Look forward to try to form a post-index instruction. For example,
1855 // ldr x0, [x20]
1856 // add x20, x20, #32
1857 // merged into:
1858 // ldr x0, [x20], #32
Tim Northover3b0846e2014-05-24 12:50:23 +00001859 MachineBasicBlock::iterator Update =
Chad Rosier35706ad2016-02-04 21:26:02 +00001860 findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001861 if (Update != E) {
1862 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001863 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001864 Modified = true;
1865 ++NumPostFolded;
1866 break;
1867 }
1868 // Don't know how to handle pre/post-index versions, so move to the next
1869 // instruction.
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001870 if (TII->isUnscaledLdSt(Opc)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001871 ++MBBI;
1872 break;
1873 }
1874
1875 // Look back to try to find a pre-index instruction. For example,
1876 // add x0, x0, #8
1877 // ldr x1, [x0]
1878 // merged into:
1879 // ldr x1, [x0, #8]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001880 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001881 if (Update != E) {
1882 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001883 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001884 Modified = true;
1885 ++NumPreFolded;
1886 break;
1887 }
Chad Rosier7a83d772015-10-01 13:09:44 +00001888 // The immediate in the load/store is scaled by the size of the memory
1889 // operation. The immediate in the add we're looking for,
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001890 // however, is not, so adjust here.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001891 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001892
Tim Northover3b0846e2014-05-24 12:50:23 +00001893 // Look forward to try to find a post-index instruction. For example,
1894 // ldr x1, [x0, #64]
1895 // add x0, x0, #64
1896 // merged into:
1897 // ldr x1, [x0, #64]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001898 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001899 if (Update != E) {
1900 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001901 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001902 Modified = true;
1903 ++NumPreFolded;
1904 break;
1905 }
1906
1907 // Nothing found. Just move to the next instruction.
1908 ++MBBI;
1909 break;
1910 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001911 }
1912 }
1913
1914 return Modified;
1915}
1916
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001917bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
Chad Rosiercd2be7f2016-02-12 15:51:51 +00001918 bool ProfitableArch = Subtarget->isCortexA57() || Subtarget->isKryo();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001919 // FIXME: The benefit from converting narrow loads into a wider load could be
1920 // microarchitectural as it assumes that a single load with two bitfield
1921 // extracts is cheaper than two narrow loads. Currently, this conversion is
1922 // enabled only in cortex-a57 on which performance benefits were verified.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001923 return ProfitableArch && !Subtarget->requiresStrictAlign();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001924}
1925
Tim Northover3b0846e2014-05-24 12:50:23 +00001926bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
Oliver Stannardd414c992015-11-10 11:04:18 +00001927 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1928 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1929 TRI = Subtarget->getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001930
Chad Rosierbba881e2016-02-02 15:02:30 +00001931 // Resize the modified and used register bitfield trackers. We do this once
1932 // per function and then clear the bitfield each time we optimize a load or
1933 // store.
1934 ModifiedRegs.resize(TRI->getNumRegs());
1935 UsedRegs.resize(TRI->getNumRegs());
1936
Tim Northover3b0846e2014-05-24 12:50:23 +00001937 bool Modified = false;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001938 bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
Tim Northover3b0846e2014-05-24 12:50:23 +00001939 for (auto &MBB : Fn)
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001940 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001941
1942 return Modified;
1943}
1944
1945// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1946// loads and stores near one another?
1947
Chad Rosier3f8b09d2016-02-09 19:42:19 +00001948// FIXME: When pairing store instructions it's very possible for this pass to
1949// hoist a store with a KILL marker above another use (without a KILL marker).
1950// The resulting IR is invalid, but nothing uses the KILL markers after this
1951// pass, so it's never caused a problem in practice.
1952
Chad Rosier43f5c842015-08-05 12:40:13 +00001953/// createAArch64LoadStoreOptimizationPass - returns an instance of the
1954/// load / store optimization pass.
Tim Northover3b0846e2014-05-24 12:50:23 +00001955FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1956 return new AArch64LoadStoreOpt();
1957}