blob: 57007490f2e39051bd7f4c2f86525eca84c04f3b [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that performs load / store related peephole
11// optimizations. This pass should be run after register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AArch64InstrInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000016#include "AArch64Subtarget.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000017#include "MCTargetDesc/AArch64AddressingModes.h"
18#include "llvm/ADT/BitVector.h"
Chad Rosierce8e5ab2015-05-21 21:36:46 +000019#include "llvm/ADT/SmallVector.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000020#include "llvm/ADT/Statistic.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000021#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/ErrorHandling.h"
28#include "llvm/Support/raw_ostream.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000029#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Target/TargetRegisterInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000032using namespace llvm;
33
34#define DEBUG_TYPE "aarch64-ldst-opt"
35
Tim Northover3b0846e2014-05-24 12:50:23 +000036STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
37STATISTIC(NumPostFolded, "Number of post-index updates folded");
38STATISTIC(NumPreFolded, "Number of pre-index updates folded");
39STATISTIC(NumUnscaledPairCreated,
40 "Number of load/store from unscaled generated");
Jun Bum Limc12c2792015-11-19 18:41:27 +000041STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
Jun Bum Lim80ec0d32015-11-20 21:14:07 +000042STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
Jun Bum Lim6755c3b2015-12-22 16:36:16 +000043STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
Tim Northover3b0846e2014-05-24 12:50:23 +000044
Chad Rosier35706ad2016-02-04 21:26:02 +000045// The LdStLimit limits how far we search for load/store pairs.
46static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +000047 cl::init(20), cl::Hidden);
Tim Northover3b0846e2014-05-24 12:50:23 +000048
Chad Rosier35706ad2016-02-04 21:26:02 +000049// The UpdateLimit limits how far we search for update instructions when we form
50// pre-/post-index instructions.
51static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
52 cl::Hidden);
53
Chad Rosier96530b32015-08-05 13:44:51 +000054namespace llvm {
55void initializeAArch64LoadStoreOptPass(PassRegistry &);
56}
57
58#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
59
Tim Northover3b0846e2014-05-24 12:50:23 +000060namespace {
Chad Rosier96a18a92015-07-21 17:42:04 +000061
62typedef struct LdStPairFlags {
63 // If a matching instruction is found, MergeForward is set to true if the
64 // merge is to remove the first instruction and replace the second with
65 // a pair-wise insn, and false if the reverse is true.
66 bool MergeForward;
67
68 // SExtIdx gives the index of the result of the load pair that must be
69 // extended. The value of SExtIdx assumes that the paired load produces the
70 // value in this order: (I, returned iterator), i.e., -1 means no value has
71 // to be extended, 0 means I, and 1 means the returned iterator.
72 int SExtIdx;
73
74 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
75
76 void setMergeForward(bool V = true) { MergeForward = V; }
77 bool getMergeForward() const { return MergeForward; }
78
79 void setSExtIdx(int V) { SExtIdx = V; }
80 int getSExtIdx() const { return SExtIdx; }
81
82} LdStPairFlags;
83
Tim Northover3b0846e2014-05-24 12:50:23 +000084struct AArch64LoadStoreOpt : public MachineFunctionPass {
85 static char ID;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +000086 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
Chad Rosier96530b32015-08-05 13:44:51 +000087 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
88 }
Tim Northover3b0846e2014-05-24 12:50:23 +000089
90 const AArch64InstrInfo *TII;
91 const TargetRegisterInfo *TRI;
Oliver Stannardd414c992015-11-10 11:04:18 +000092 const AArch64Subtarget *Subtarget;
Tim Northover3b0846e2014-05-24 12:50:23 +000093
Chad Rosierbba881e2016-02-02 15:02:30 +000094 // Track which registers have been modified and used.
95 BitVector ModifiedRegs, UsedRegs;
96
Tim Northover3b0846e2014-05-24 12:50:23 +000097 // Scan the instructions looking for a load/store that can be combined
98 // with the current instruction into a load/store pair.
99 // Return the matching instruction if one is found, else MBB->end().
Tim Northover3b0846e2014-05-24 12:50:23 +0000100 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000101 LdStPairFlags &Flags,
Tim Northover3b0846e2014-05-24 12:50:23 +0000102 unsigned Limit);
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000103
104 // Scan the instructions looking for a store that writes to the address from
105 // which the current load instruction reads. Return true if one is found.
106 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
107 MachineBasicBlock::iterator &StoreI);
108
Tim Northover3b0846e2014-05-24 12:50:23 +0000109 // Merge the two instructions indicated into a single pair-wise instruction.
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000110 // If MergeForward is true, erase the first instruction and fold its
Tim Northover3b0846e2014-05-24 12:50:23 +0000111 // operation into the second. If false, the reverse. Return the instruction
112 // following the first instruction (which may change during processing).
113 MachineBasicBlock::iterator
114 mergePairedInsns(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000115 MachineBasicBlock::iterator Paired,
Chad Rosierfe5399f2015-07-21 17:47:56 +0000116 const LdStPairFlags &Flags);
Tim Northover3b0846e2014-05-24 12:50:23 +0000117
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000118 // Promote the load that reads directly from the address stored to.
119 MachineBasicBlock::iterator
120 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
121 MachineBasicBlock::iterator StoreI);
122
Tim Northover3b0846e2014-05-24 12:50:23 +0000123 // Scan the instruction list to find a base register update that can
124 // be combined with the current instruction (a load or store) using
125 // pre or post indexed addressing with writeback. Scan forwards.
126 MachineBasicBlock::iterator
Chad Rosier234bf6f2016-01-18 21:56:40 +0000127 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
Chad Rosier35706ad2016-02-04 21:26:02 +0000128 int UnscaledOffset, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000129
130 // Scan the instruction list to find a base register update that can
131 // be combined with the current instruction (a load or store) using
132 // pre or post indexed addressing with writeback. Scan backwards.
133 MachineBasicBlock::iterator
Chad Rosier35706ad2016-02-04 21:26:02 +0000134 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000135
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000136 // Find an instruction that updates the base register of the ld/st
137 // instruction.
138 bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
139 unsigned BaseReg, int Offset);
140
Chad Rosier2dfd3542015-09-23 13:51:44 +0000141 // Merge a pre- or post-index base register update into a ld/st instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000142 MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +0000143 mergeUpdateInsn(MachineBasicBlock::iterator I,
144 MachineBasicBlock::iterator Update, bool IsPreIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000145
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000146 // Find and merge foldable ldr/str instructions.
147 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
148
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000149 // Find and promote load instructions which read directly from store.
150 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
151
Jun Bum Lim22fe15e2015-11-06 16:27:47 +0000152 // Check if converting two narrow loads into a single wider load with
153 // bitfield extracts could be enabled.
154 bool enableNarrowLdMerge(MachineFunction &Fn);
155
156 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000157
158 bool runOnMachineFunction(MachineFunction &Fn) override;
159
160 const char *getPassName() const override {
Chad Rosier96530b32015-08-05 13:44:51 +0000161 return AARCH64_LOAD_STORE_OPT_NAME;
Tim Northover3b0846e2014-05-24 12:50:23 +0000162 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000163};
164char AArch64LoadStoreOpt::ID = 0;
Jim Grosbach1eee3df2014-08-11 22:42:31 +0000165} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +0000166
Chad Rosier96530b32015-08-05 13:44:51 +0000167INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
168 AARCH64_LOAD_STORE_OPT_NAME, false, false)
169
Chad Rosier22eb7102015-08-06 17:37:18 +0000170static bool isUnscaledLdSt(unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000171 switch (Opc) {
172 default:
173 return false;
174 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000175 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000176 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000177 case AArch64::STURBBi:
178 case AArch64::STURHHi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000179 case AArch64::STURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000180 case AArch64::STURXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000181 case AArch64::LDURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000182 case AArch64::LDURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000183 case AArch64::LDURQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000184 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000185 case AArch64::LDURXi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000186 case AArch64::LDURSWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000187 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000188 case AArch64::LDURBBi:
189 case AArch64::LDURSBWi:
190 case AArch64::LDURSHWi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000191 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000192 }
193}
194
Chad Rosier22eb7102015-08-06 17:37:18 +0000195static bool isUnscaledLdSt(MachineInstr *MI) {
196 return isUnscaledLdSt(MI->getOpcode());
197}
198
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000199static unsigned getBitExtrOpcode(MachineInstr *MI) {
200 switch (MI->getOpcode()) {
201 default:
202 llvm_unreachable("Unexpected opcode.");
203 case AArch64::LDRBBui:
204 case AArch64::LDURBBi:
205 case AArch64::LDRHHui:
206 case AArch64::LDURHHi:
207 return AArch64::UBFMWri;
208 case AArch64::LDRSBWui:
209 case AArch64::LDURSBWi:
210 case AArch64::LDRSHWui:
211 case AArch64::LDURSHWi:
212 return AArch64::SBFMWri;
213 }
214}
215
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000216static bool isNarrowStore(unsigned Opc) {
217 switch (Opc) {
218 default:
219 return false;
220 case AArch64::STRBBui:
221 case AArch64::STURBBi:
222 case AArch64::STRHHui:
223 case AArch64::STURHHi:
224 return true;
225 }
226}
227
228static bool isNarrowStore(MachineInstr *MI) {
229 return isNarrowStore(MI->getOpcode());
230}
231
Jun Bum Limc12c2792015-11-19 18:41:27 +0000232static bool isNarrowLoad(unsigned Opc) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000233 switch (Opc) {
234 default:
235 return false;
236 case AArch64::LDRHHui:
237 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000238 case AArch64::LDRBBui:
239 case AArch64::LDURBBi:
240 case AArch64::LDRSHWui:
241 case AArch64::LDURSHWi:
242 case AArch64::LDRSBWui:
243 case AArch64::LDURSBWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000244 return true;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000245 }
246}
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000247
Jun Bum Limc12c2792015-11-19 18:41:27 +0000248static bool isNarrowLoad(MachineInstr *MI) {
249 return isNarrowLoad(MI->getOpcode());
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000250}
251
Chad Rosier32d4d372015-09-29 16:07:32 +0000252// Scaling factor for unscaled load or store.
253static int getMemScale(MachineInstr *MI) {
Chad Rosier22eb7102015-08-06 17:37:18 +0000254 switch (MI->getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000255 default:
Chad Rosierdabe2532015-09-29 18:26:15 +0000256 llvm_unreachable("Opcode has unknown scale!");
257 case AArch64::LDRBBui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000258 case AArch64::LDURBBi:
259 case AArch64::LDRSBWui:
260 case AArch64::LDURSBWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000261 case AArch64::STRBBui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000262 case AArch64::STURBBi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000263 return 1;
264 case AArch64::LDRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000265 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000266 case AArch64::LDRSHWui:
267 case AArch64::LDURSHWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000268 case AArch64::STRHHui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000269 case AArch64::STURHHi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000270 return 2;
Chad Rosiera4d32172015-09-29 14:57:10 +0000271 case AArch64::LDRSui:
272 case AArch64::LDURSi:
273 case AArch64::LDRSWui:
274 case AArch64::LDURSWi:
275 case AArch64::LDRWui:
276 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000277 case AArch64::STRSui:
278 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000279 case AArch64::STRWui:
280 case AArch64::STURWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000281 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000282 case AArch64::LDPSWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000283 case AArch64::LDPWi:
284 case AArch64::STPSi:
285 case AArch64::STPWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000286 return 4;
Chad Rosiera4d32172015-09-29 14:57:10 +0000287 case AArch64::LDRDui:
288 case AArch64::LDURDi:
289 case AArch64::LDRXui:
290 case AArch64::LDURXi:
291 case AArch64::STRDui:
292 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000293 case AArch64::STRXui:
294 case AArch64::STURXi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000295 case AArch64::LDPDi:
296 case AArch64::LDPXi:
297 case AArch64::STPDi:
298 case AArch64::STPXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000299 return 8;
Tim Northover3b0846e2014-05-24 12:50:23 +0000300 case AArch64::LDRQui:
301 case AArch64::LDURQi:
Chad Rosiera4d32172015-09-29 14:57:10 +0000302 case AArch64::STRQui:
303 case AArch64::STURQi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000304 case AArch64::LDPQi:
305 case AArch64::STPQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000306 return 16;
Tim Northover3b0846e2014-05-24 12:50:23 +0000307 }
308}
309
Quentin Colombet66b61632015-03-06 22:42:10 +0000310static unsigned getMatchingNonSExtOpcode(unsigned Opc,
311 bool *IsValidLdStrOpc = nullptr) {
312 if (IsValidLdStrOpc)
313 *IsValidLdStrOpc = true;
314 switch (Opc) {
315 default:
316 if (IsValidLdStrOpc)
317 *IsValidLdStrOpc = false;
318 return UINT_MAX;
319 case AArch64::STRDui:
320 case AArch64::STURDi:
321 case AArch64::STRQui:
322 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000323 case AArch64::STRBBui:
324 case AArch64::STURBBi:
325 case AArch64::STRHHui:
326 case AArch64::STURHHi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000327 case AArch64::STRWui:
328 case AArch64::STURWi:
329 case AArch64::STRXui:
330 case AArch64::STURXi:
331 case AArch64::LDRDui:
332 case AArch64::LDURDi:
333 case AArch64::LDRQui:
334 case AArch64::LDURQi:
335 case AArch64::LDRWui:
336 case AArch64::LDURWi:
337 case AArch64::LDRXui:
338 case AArch64::LDURXi:
339 case AArch64::STRSui:
340 case AArch64::STURSi:
341 case AArch64::LDRSui:
342 case AArch64::LDURSi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000343 case AArch64::LDRHHui:
344 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000345 case AArch64::LDRBBui:
346 case AArch64::LDURBBi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000347 return Opc;
348 case AArch64::LDRSWui:
349 return AArch64::LDRWui;
350 case AArch64::LDURSWi:
351 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000352 case AArch64::LDRSBWui:
353 return AArch64::LDRBBui;
354 case AArch64::LDRSHWui:
355 return AArch64::LDRHHui;
356 case AArch64::LDURSBWi:
357 return AArch64::LDURBBi;
358 case AArch64::LDURSHWi:
359 return AArch64::LDURHHi;
Quentin Colombet66b61632015-03-06 22:42:10 +0000360 }
361}
362
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000363static unsigned getMatchingWideOpcode(unsigned Opc) {
364 switch (Opc) {
365 default:
366 llvm_unreachable("Opcode has no wide equivalent!");
367 case AArch64::STRBBui:
368 return AArch64::STRHHui;
369 case AArch64::STRHHui:
370 return AArch64::STRWui;
371 case AArch64::STURBBi:
372 return AArch64::STURHHi;
373 case AArch64::STURHHi:
374 return AArch64::STURWi;
375 case AArch64::LDRHHui:
376 case AArch64::LDRSHWui:
377 return AArch64::LDRWui;
378 case AArch64::LDURHHi:
379 case AArch64::LDURSHWi:
380 return AArch64::LDURWi;
381 case AArch64::LDRBBui:
382 case AArch64::LDRSBWui:
383 return AArch64::LDRHHui;
384 case AArch64::LDURBBi:
385 case AArch64::LDURSBWi:
386 return AArch64::LDURHHi;
387 }
388}
389
Tim Northover3b0846e2014-05-24 12:50:23 +0000390static unsigned getMatchingPairOpcode(unsigned Opc) {
391 switch (Opc) {
392 default:
393 llvm_unreachable("Opcode has no pairwise equivalent!");
394 case AArch64::STRSui:
395 case AArch64::STURSi:
396 return AArch64::STPSi;
397 case AArch64::STRDui:
398 case AArch64::STURDi:
399 return AArch64::STPDi;
400 case AArch64::STRQui:
401 case AArch64::STURQi:
402 return AArch64::STPQi;
403 case AArch64::STRWui:
404 case AArch64::STURWi:
405 return AArch64::STPWi;
406 case AArch64::STRXui:
407 case AArch64::STURXi:
408 return AArch64::STPXi;
409 case AArch64::LDRSui:
410 case AArch64::LDURSi:
411 return AArch64::LDPSi;
412 case AArch64::LDRDui:
413 case AArch64::LDURDi:
414 return AArch64::LDPDi;
415 case AArch64::LDRQui:
416 case AArch64::LDURQi:
417 return AArch64::LDPQi;
418 case AArch64::LDRWui:
419 case AArch64::LDURWi:
420 return AArch64::LDPWi;
421 case AArch64::LDRXui:
422 case AArch64::LDURXi:
423 return AArch64::LDPXi;
Quentin Colombet29f55332015-01-24 01:25:54 +0000424 case AArch64::LDRSWui:
425 case AArch64::LDURSWi:
426 return AArch64::LDPSWi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000427 }
428}
429
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000430static unsigned isMatchingStore(MachineInstr *LoadInst,
431 MachineInstr *StoreInst) {
432 unsigned LdOpc = LoadInst->getOpcode();
433 unsigned StOpc = StoreInst->getOpcode();
434 switch (LdOpc) {
435 default:
436 llvm_unreachable("Unsupported load instruction!");
437 case AArch64::LDRBBui:
438 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
439 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
440 case AArch64::LDURBBi:
441 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
442 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
443 case AArch64::LDRHHui:
444 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
445 StOpc == AArch64::STRXui;
446 case AArch64::LDURHHi:
447 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
448 StOpc == AArch64::STURXi;
449 case AArch64::LDRWui:
450 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
451 case AArch64::LDURWi:
452 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
453 case AArch64::LDRXui:
454 return StOpc == AArch64::STRXui;
455 case AArch64::LDURXi:
456 return StOpc == AArch64::STURXi;
457 }
458}
459
Tim Northover3b0846e2014-05-24 12:50:23 +0000460static unsigned getPreIndexedOpcode(unsigned Opc) {
461 switch (Opc) {
462 default:
463 llvm_unreachable("Opcode has no pre-indexed equivalent!");
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000464 case AArch64::STRSui:
465 return AArch64::STRSpre;
466 case AArch64::STRDui:
467 return AArch64::STRDpre;
468 case AArch64::STRQui:
469 return AArch64::STRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000470 case AArch64::STRBBui:
471 return AArch64::STRBBpre;
472 case AArch64::STRHHui:
473 return AArch64::STRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000474 case AArch64::STRWui:
475 return AArch64::STRWpre;
476 case AArch64::STRXui:
477 return AArch64::STRXpre;
478 case AArch64::LDRSui:
479 return AArch64::LDRSpre;
480 case AArch64::LDRDui:
481 return AArch64::LDRDpre;
482 case AArch64::LDRQui:
483 return AArch64::LDRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000484 case AArch64::LDRBBui:
485 return AArch64::LDRBBpre;
486 case AArch64::LDRHHui:
487 return AArch64::LDRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000488 case AArch64::LDRWui:
489 return AArch64::LDRWpre;
490 case AArch64::LDRXui:
491 return AArch64::LDRXpre;
Quentin Colombet29f55332015-01-24 01:25:54 +0000492 case AArch64::LDRSWui:
493 return AArch64::LDRSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000494 case AArch64::LDPSi:
495 return AArch64::LDPSpre;
Chad Rosier43150122015-09-29 20:39:55 +0000496 case AArch64::LDPSWi:
497 return AArch64::LDPSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000498 case AArch64::LDPDi:
499 return AArch64::LDPDpre;
500 case AArch64::LDPQi:
501 return AArch64::LDPQpre;
502 case AArch64::LDPWi:
503 return AArch64::LDPWpre;
504 case AArch64::LDPXi:
505 return AArch64::LDPXpre;
506 case AArch64::STPSi:
507 return AArch64::STPSpre;
508 case AArch64::STPDi:
509 return AArch64::STPDpre;
510 case AArch64::STPQi:
511 return AArch64::STPQpre;
512 case AArch64::STPWi:
513 return AArch64::STPWpre;
514 case AArch64::STPXi:
515 return AArch64::STPXpre;
Tim Northover3b0846e2014-05-24 12:50:23 +0000516 }
517}
518
519static unsigned getPostIndexedOpcode(unsigned Opc) {
520 switch (Opc) {
521 default:
522 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
523 case AArch64::STRSui:
524 return AArch64::STRSpost;
525 case AArch64::STRDui:
526 return AArch64::STRDpost;
527 case AArch64::STRQui:
528 return AArch64::STRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000529 case AArch64::STRBBui:
530 return AArch64::STRBBpost;
531 case AArch64::STRHHui:
532 return AArch64::STRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000533 case AArch64::STRWui:
534 return AArch64::STRWpost;
535 case AArch64::STRXui:
536 return AArch64::STRXpost;
537 case AArch64::LDRSui:
538 return AArch64::LDRSpost;
539 case AArch64::LDRDui:
540 return AArch64::LDRDpost;
541 case AArch64::LDRQui:
542 return AArch64::LDRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000543 case AArch64::LDRBBui:
544 return AArch64::LDRBBpost;
545 case AArch64::LDRHHui:
546 return AArch64::LDRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000547 case AArch64::LDRWui:
548 return AArch64::LDRWpost;
549 case AArch64::LDRXui:
550 return AArch64::LDRXpost;
Quentin Colombet29f55332015-01-24 01:25:54 +0000551 case AArch64::LDRSWui:
552 return AArch64::LDRSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000553 case AArch64::LDPSi:
554 return AArch64::LDPSpost;
Chad Rosier43150122015-09-29 20:39:55 +0000555 case AArch64::LDPSWi:
556 return AArch64::LDPSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000557 case AArch64::LDPDi:
558 return AArch64::LDPDpost;
559 case AArch64::LDPQi:
560 return AArch64::LDPQpost;
561 case AArch64::LDPWi:
562 return AArch64::LDPWpost;
563 case AArch64::LDPXi:
564 return AArch64::LDPXpost;
565 case AArch64::STPSi:
566 return AArch64::STPSpost;
567 case AArch64::STPDi:
568 return AArch64::STPDpost;
569 case AArch64::STPQi:
570 return AArch64::STPQpost;
571 case AArch64::STPWi:
572 return AArch64::STPWpost;
573 case AArch64::STPXi:
574 return AArch64::STPXpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000575 }
576}
577
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000578static bool isPairedLdSt(const MachineInstr *MI) {
579 switch (MI->getOpcode()) {
580 default:
581 return false;
582 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000583 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000584 case AArch64::LDPDi:
585 case AArch64::LDPQi:
586 case AArch64::LDPWi:
587 case AArch64::LDPXi:
588 case AArch64::STPSi:
589 case AArch64::STPDi:
590 case AArch64::STPQi:
591 case AArch64::STPWi:
592 case AArch64::STPXi:
593 return true;
594 }
595}
596
597static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
598 unsigned PairedRegOp = 0) {
599 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
600 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
601 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000602}
603
604static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000605 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
606 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000607}
608
609static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000610 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
611 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000612}
613
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000614static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
615 MachineInstr *StoreInst) {
616 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
617 int LoadSize = getMemScale(LoadInst);
618 int StoreSize = getMemScale(StoreInst);
619 int UnscaledStOffset = isUnscaledLdSt(StoreInst)
620 ? getLdStOffsetOp(StoreInst).getImm()
621 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
622 int UnscaledLdOffset = isUnscaledLdSt(LoadInst)
623 ? getLdStOffsetOp(LoadInst).getImm()
624 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
625 return (UnscaledStOffset <= UnscaledLdOffset) &&
626 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
627}
628
Tim Northover3b0846e2014-05-24 12:50:23 +0000629MachineBasicBlock::iterator
630AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
631 MachineBasicBlock::iterator Paired,
Chad Rosier96a18a92015-07-21 17:42:04 +0000632 const LdStPairFlags &Flags) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000633 MachineBasicBlock::iterator NextI = I;
634 ++NextI;
635 // If NextI is the second of the two instructions to be merged, we need
636 // to skip one further. Either way we merge will invalidate the iterator,
637 // and we don't need to scan the new instruction, as it's a pairwise
638 // instruction, which we're not considering for further action anyway.
639 if (NextI == Paired)
640 ++NextI;
641
Chad Rosier96a18a92015-07-21 17:42:04 +0000642 int SExtIdx = Flags.getSExtIdx();
Quentin Colombet66b61632015-03-06 22:42:10 +0000643 unsigned Opc =
644 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
Chad Rosier22eb7102015-08-06 17:37:18 +0000645 bool IsUnscaled = isUnscaledLdSt(Opc);
Chad Rosierf11d0402015-10-01 18:17:12 +0000646 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
Tim Northover3b0846e2014-05-24 12:50:23 +0000647
Chad Rosier96a18a92015-07-21 17:42:04 +0000648 bool MergeForward = Flags.getMergeForward();
Tim Northover3b0846e2014-05-24 12:50:23 +0000649 // Insert our new paired instruction after whichever of the paired
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000650 // instructions MergeForward indicates.
651 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
652 // Also based on MergeForward is from where we copy the base register operand
Tim Northover3b0846e2014-05-24 12:50:23 +0000653 // so we get the flags compatible with the input code.
Chad Rosierf77e9092015-08-06 15:50:12 +0000654 const MachineOperand &BaseRegOp =
655 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000656
657 // Which register is Rt and which is Rt2 depends on the offset order.
658 MachineInstr *RtMI, *Rt2MI;
Renato Golin6274e522016-02-05 12:14:30 +0000659 if (getLdStOffsetOp(I).getImm() ==
660 getLdStOffsetOp(Paired).getImm() + OffsetStride) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000661 RtMI = Paired;
662 Rt2MI = I;
Quentin Colombet66b61632015-03-06 22:42:10 +0000663 // Here we swapped the assumption made for SExtIdx.
664 // I.e., we turn ldp I, Paired into ldp Paired, I.
665 // Update the index accordingly.
666 if (SExtIdx != -1)
667 SExtIdx = (SExtIdx + 1) % 2;
Tim Northover3b0846e2014-05-24 12:50:23 +0000668 } else {
669 RtMI = I;
670 Rt2MI = Paired;
671 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000672
James Molloy5b18b4c2015-10-23 10:41:38 +0000673 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000674
Jun Bum Limc12c2792015-11-19 18:41:27 +0000675 if (isNarrowLoad(Opc)) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000676 // Change the scaled offset from small to large type.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000677 if (!IsUnscaled) {
678 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000679 OffsetImm /= 2;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000680 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000681 MachineInstr *RtNewDest = MergeForward ? I : Paired;
Oliver Stannardd414c992015-11-10 11:04:18 +0000682 // When merging small (< 32 bit) loads for big-endian targets, the order of
683 // the component parts gets swapped.
684 if (!Subtarget->isLittleEndian())
685 std::swap(RtMI, Rt2MI);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000686 // Construct the new load instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000687 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
688 NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000689 TII->get(getMatchingWideOpcode(Opc)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000690 .addOperand(getLdStRegOp(RtNewDest))
691 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000692 .addImm(OffsetImm)
693 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000694
695 DEBUG(
696 dbgs()
697 << "Creating the new load and extract. Replacing instructions:\n ");
698 DEBUG(I->print(dbgs()));
699 DEBUG(dbgs() << " ");
700 DEBUG(Paired->print(dbgs()));
701 DEBUG(dbgs() << " with instructions:\n ");
702 DEBUG((NewMemMI)->print(dbgs()));
703
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000704 int Width = getMemScale(I) == 1 ? 8 : 16;
705 int LSBLow = 0;
706 int LSBHigh = Width;
707 int ImmsLow = LSBLow + Width - 1;
708 int ImmsHigh = LSBHigh + Width - 1;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000709 MachineInstr *ExtDestMI = MergeForward ? Paired : I;
Oliver Stannardd414c992015-11-10 11:04:18 +0000710 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000711 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000712 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000713 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000714 .addOperand(getLdStRegOp(Rt2MI))
715 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000716 .addImm(LSBHigh)
717 .addImm(ImmsHigh);
718 // Create the bitfield extract for low bits.
719 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
720 // For unsigned, prefer to use AND for low bits.
721 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
722 TII->get(AArch64::ANDWri))
723 .addOperand(getLdStRegOp(RtMI))
724 .addReg(getLdStRegOp(RtNewDest).getReg())
725 .addImm(ImmsLow);
726 } else {
727 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
728 TII->get(getBitExtrOpcode(RtMI)))
729 .addOperand(getLdStRegOp(RtMI))
730 .addReg(getLdStRegOp(RtNewDest).getReg())
731 .addImm(LSBLow)
732 .addImm(ImmsLow);
733 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000734 } else {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000735 // Create the bitfield extract for low bits.
736 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
737 // For unsigned, prefer to use AND for low bits.
738 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
739 TII->get(AArch64::ANDWri))
740 .addOperand(getLdStRegOp(RtMI))
741 .addReg(getLdStRegOp(RtNewDest).getReg())
742 .addImm(ImmsLow);
743 } else {
744 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
745 TII->get(getBitExtrOpcode(RtMI)))
746 .addOperand(getLdStRegOp(RtMI))
747 .addReg(getLdStRegOp(RtNewDest).getReg())
748 .addImm(LSBLow)
749 .addImm(ImmsLow);
750 }
751
752 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000753 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000754 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000755 .addOperand(getLdStRegOp(Rt2MI))
756 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000757 .addImm(LSBHigh)
758 .addImm(ImmsHigh);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000759 }
760 DEBUG(dbgs() << " ");
761 DEBUG((BitExtMI1)->print(dbgs()));
762 DEBUG(dbgs() << " ");
763 DEBUG((BitExtMI2)->print(dbgs()));
764 DEBUG(dbgs() << "\n");
765
766 // Erase the old instructions.
767 I->eraseFromParent();
768 Paired->eraseFromParent();
769 return NextI;
770 }
771
Tim Northover3b0846e2014-05-24 12:50:23 +0000772 // Construct the new instruction.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000773 MachineInstrBuilder MIB;
774 if (isNarrowStore(Opc)) {
775 // Change the scaled offset from small to large type.
776 if (!IsUnscaled) {
777 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
778 OffsetImm /= 2;
779 }
780 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000781 TII->get(getMatchingWideOpcode(Opc)))
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000782 .addOperand(getLdStRegOp(I))
783 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000784 .addImm(OffsetImm)
785 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000786 } else {
Renato Golin6274e522016-02-05 12:14:30 +0000787 // Handle Unscaled
788 if (IsUnscaled)
789 OffsetImm /= OffsetStride;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000790 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000791 TII->get(getMatchingPairOpcode(Opc)))
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000792 .addOperand(getLdStRegOp(RtMI))
793 .addOperand(getLdStRegOp(Rt2MI))
794 .addOperand(BaseRegOp)
795 .addImm(OffsetImm);
796 }
797
Tim Northover3b0846e2014-05-24 12:50:23 +0000798 (void)MIB;
799
800 // FIXME: Do we need/want to copy the mem operands from the source
801 // instructions? Probably. What uses them after this?
802
803 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
804 DEBUG(I->print(dbgs()));
805 DEBUG(dbgs() << " ");
806 DEBUG(Paired->print(dbgs()));
807 DEBUG(dbgs() << " with instruction:\n ");
Quentin Colombet66b61632015-03-06 22:42:10 +0000808
809 if (SExtIdx != -1) {
810 // Generate the sign extension for the proper result of the ldp.
811 // I.e., with X1, that would be:
812 // %W1<def> = KILL %W1, %X1<imp-def>
813 // %X1<def> = SBFMXri %X1<kill>, 0, 31
814 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
815 // Right now, DstMO has the extended register, since it comes from an
816 // extended opcode.
817 unsigned DstRegX = DstMO.getReg();
818 // Get the W variant of that register.
819 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
820 // Update the result of LDP to use the W instead of the X variant.
821 DstMO.setReg(DstRegW);
822 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
823 DEBUG(dbgs() << "\n");
824 // Make the machine verifier happy by providing a definition for
825 // the X register.
826 // Insert this definition right after the generated LDP, i.e., before
827 // InsertionPoint.
828 MachineInstrBuilder MIBKill =
829 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
830 TII->get(TargetOpcode::KILL), DstRegW)
831 .addReg(DstRegW)
832 .addReg(DstRegX, RegState::Define);
833 MIBKill->getOperand(2).setImplicit();
834 // Create the sign extension.
835 MachineInstrBuilder MIBSXTW =
836 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
837 TII->get(AArch64::SBFMXri), DstRegX)
838 .addReg(DstRegX)
839 .addImm(0)
840 .addImm(31);
841 (void)MIBSXTW;
842 DEBUG(dbgs() << " Extend operand:\n ");
843 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
844 DEBUG(dbgs() << "\n");
845 } else {
846 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
847 DEBUG(dbgs() << "\n");
848 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000849
850 // Erase the old instructions.
851 I->eraseFromParent();
852 Paired->eraseFromParent();
853
854 return NextI;
855}
856
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000857MachineBasicBlock::iterator
858AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
859 MachineBasicBlock::iterator StoreI) {
860 MachineBasicBlock::iterator NextI = LoadI;
861 ++NextI;
862
863 int LoadSize = getMemScale(LoadI);
864 int StoreSize = getMemScale(StoreI);
865 unsigned LdRt = getLdStRegOp(LoadI).getReg();
866 unsigned StRt = getLdStRegOp(StoreI).getReg();
867 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
868
869 assert((IsStoreXReg ||
870 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
871 "Unexpected RegClass");
872
873 MachineInstr *BitExtMI;
874 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
875 // Remove the load, if the destination register of the loads is the same
876 // register for stored value.
877 if (StRt == LdRt && LoadSize == 8) {
878 DEBUG(dbgs() << "Remove load instruction:\n ");
879 DEBUG(LoadI->print(dbgs()));
880 DEBUG(dbgs() << "\n");
881 LoadI->eraseFromParent();
882 return NextI;
883 }
884 // Replace the load with a mov if the load and store are in the same size.
885 BitExtMI =
886 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
887 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
888 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
889 .addReg(StRt)
890 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
891 } else {
892 // FIXME: Currently we disable this transformation in big-endian targets as
893 // performance and correctness are verified only in little-endian.
894 if (!Subtarget->isLittleEndian())
895 return NextI;
896 bool IsUnscaled = isUnscaledLdSt(LoadI);
897 assert(IsUnscaled == isUnscaledLdSt(StoreI) && "Unsupported ld/st match");
898 assert(LoadSize <= StoreSize && "Invalid load size");
899 int UnscaledLdOffset = IsUnscaled
900 ? getLdStOffsetOp(LoadI).getImm()
901 : getLdStOffsetOp(LoadI).getImm() * LoadSize;
902 int UnscaledStOffset = IsUnscaled
903 ? getLdStOffsetOp(StoreI).getImm()
904 : getLdStOffsetOp(StoreI).getImm() * StoreSize;
905 int Width = LoadSize * 8;
906 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
907 int Imms = Immr + Width - 1;
908 unsigned DestReg = IsStoreXReg
909 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
910 &AArch64::GPR64RegClass)
911 : LdRt;
912
913 assert((UnscaledLdOffset >= UnscaledStOffset &&
914 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
915 "Invalid offset");
916
917 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
918 Imms = Immr + Width - 1;
919 if (UnscaledLdOffset == UnscaledStOffset) {
920 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
921 | ((Immr) << 6) // immr
922 | ((Imms) << 0) // imms
923 ;
924
925 BitExtMI =
926 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
927 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
928 DestReg)
929 .addReg(StRt)
930 .addImm(AndMaskEncoded);
931 } else {
932 BitExtMI =
933 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
934 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
935 DestReg)
936 .addReg(StRt)
937 .addImm(Immr)
938 .addImm(Imms);
939 }
940 }
941
942 DEBUG(dbgs() << "Promoting load by replacing :\n ");
943 DEBUG(StoreI->print(dbgs()));
944 DEBUG(dbgs() << " ");
945 DEBUG(LoadI->print(dbgs()));
946 DEBUG(dbgs() << " with instructions:\n ");
947 DEBUG(StoreI->print(dbgs()));
948 DEBUG(dbgs() << " ");
949 DEBUG((BitExtMI)->print(dbgs()));
950 DEBUG(dbgs() << "\n");
951
952 // Erase the old instructions.
953 LoadI->eraseFromParent();
954 return NextI;
955}
956
Tim Northover3b0846e2014-05-24 12:50:23 +0000957/// trackRegDefsUses - Remember what registers the specified instruction uses
958/// and modifies.
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000959static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
Tim Northover3b0846e2014-05-24 12:50:23 +0000960 BitVector &UsedRegs,
961 const TargetRegisterInfo *TRI) {
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000962 for (const MachineOperand &MO : MI->operands()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000963 if (MO.isRegMask())
964 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
965
966 if (!MO.isReg())
967 continue;
968 unsigned Reg = MO.getReg();
969 if (MO.isDef()) {
970 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
971 ModifiedRegs.set(*AI);
972 } else {
973 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
974 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
975 UsedRegs.set(*AI);
976 }
977 }
978}
979
980static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
Chad Rosier3dd0e942015-08-18 16:20:03 +0000981 // Convert the byte-offset used by unscaled into an "element" offset used
982 // by the scaled pair load/store instructions.
Renato Golin6274e522016-02-05 12:14:30 +0000983 if (IsUnscaled)
Chad Rosier3dd0e942015-08-18 16:20:03 +0000984 Offset /= OffsetStride;
Renato Golin6274e522016-02-05 12:14:30 +0000985
Chad Rosier3dd0e942015-08-18 16:20:03 +0000986 return Offset <= 63 && Offset >= -64;
Tim Northover3b0846e2014-05-24 12:50:23 +0000987}
988
989// Do alignment, specialized to power of 2 and for signed ints,
990// avoiding having to do a C-style cast from uint_64t to int when
Rui Ueyamada00f2f2016-01-14 21:06:47 +0000991// using alignTo from include/llvm/Support/MathExtras.h.
Tim Northover3b0846e2014-05-24 12:50:23 +0000992// FIXME: Move this function to include/MathExtras.h?
993static int alignTo(int Num, int PowOf2) {
994 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
995}
996
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000997static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
998 const AArch64InstrInfo *TII) {
999 // One of the instructions must modify memory.
1000 if (!MIa->mayStore() && !MIb->mayStore())
1001 return false;
1002
1003 // Both instructions must be memory operations.
1004 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
1005 return false;
1006
1007 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
1008}
1009
1010static bool mayAlias(MachineInstr *MIa,
1011 SmallVectorImpl<MachineInstr *> &MemInsns,
1012 const AArch64InstrInfo *TII) {
1013 for (auto &MIb : MemInsns)
1014 if (mayAlias(MIa, MIb, TII))
1015 return true;
1016
1017 return false;
1018}
1019
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001020bool AArch64LoadStoreOpt::findMatchingStore(
1021 MachineBasicBlock::iterator I, unsigned Limit,
1022 MachineBasicBlock::iterator &StoreI) {
1023 MachineBasicBlock::iterator E = I->getParent()->begin();
1024 MachineBasicBlock::iterator MBBI = I;
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001025 MachineInstr *LoadMI = I;
1026 unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001027
1028 // Track which registers have been modified and used between the first insn
1029 // and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001030 ModifiedRegs.reset();
1031 UsedRegs.reset();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001032
Chad Rosier1142f3c2016-02-02 15:22:55 +00001033 // FIXME: We miss the case where the matching store is the first instruction
1034 // in the basic block.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001035 for (unsigned Count = 0; MBBI != E && Count < Limit;) {
1036 --MBBI;
1037 MachineInstr *MI = MBBI;
1038 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1039 // optimization by changing how far we scan.
1040 if (MI->isDebugValue())
1041 continue;
1042 // Now that we know this is a real instruction, count it.
1043 ++Count;
1044
1045 // If the load instruction reads directly from the address to which the
1046 // store instruction writes and the stored value is not modified, we can
1047 // promote the load. Since we do not handle stores with pre-/post-index,
1048 // it's unnecessary to check if BaseReg is modified by the store itself.
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001049 if (MI->mayStore() && isMatchingStore(LoadMI, MI) &&
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001050 BaseReg == getLdStBaseOp(MI).getReg() &&
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001051 isLdOffsetInRangeOfSt(LoadMI, MI) &&
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001052 !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
1053 StoreI = MBBI;
1054 return true;
1055 }
1056
1057 if (MI->isCall())
1058 return false;
1059
1060 // Update modified / uses register lists.
1061 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1062
1063 // Otherwise, if the base register is modified, we have no match, so
1064 // return early.
1065 if (ModifiedRegs[BaseReg])
1066 return false;
1067
1068 // If we encounter a store aliased with the load, return early.
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001069 if (MI->mayStore() && mayAlias(LoadMI, MI, TII))
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001070 return false;
1071 }
1072 return false;
1073}
1074
Tim Northover3b0846e2014-05-24 12:50:23 +00001075/// findMatchingInsn - Scan the instructions looking for a load/store that can
1076/// be combined with the current instruction into a load/store pair.
1077MachineBasicBlock::iterator
1078AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001079 LdStPairFlags &Flags, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001080 MachineBasicBlock::iterator E = I->getParent()->end();
1081 MachineBasicBlock::iterator MBBI = I;
1082 MachineInstr *FirstMI = I;
1083 ++MBBI;
1084
Matthias Braunfa3872e2015-05-18 20:27:55 +00001085 unsigned Opc = FirstMI->getOpcode();
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +00001086 bool MayLoad = FirstMI->mayLoad();
Chad Rosier22eb7102015-08-06 17:37:18 +00001087 bool IsUnscaled = isUnscaledLdSt(FirstMI);
Chad Rosierf77e9092015-08-06 15:50:12 +00001088 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1089 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1090 int Offset = getLdStOffsetOp(FirstMI).getImm();
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001091 bool IsNarrowStore = isNarrowStore(Opc);
1092
1093 // For narrow stores, find only the case where the stored value is WZR.
1094 if (IsNarrowStore && Reg != AArch64::WZR)
1095 return E;
Tim Northover3b0846e2014-05-24 12:50:23 +00001096
1097 // Early exit if the first instruction modifies the base register.
1098 // e.g., ldr x0, [x0]
Tim Northover3b0846e2014-05-24 12:50:23 +00001099 if (FirstMI->modifiesRegister(BaseReg, TRI))
1100 return E;
Chad Rosiercaed6db2015-08-10 17:17:19 +00001101
Jun Bum Lim1de2d442016-02-05 20:02:03 +00001102 // Early exit if the offset is not possible to match. (6 bits of positive
Chad Rosiercaed6db2015-08-10 17:17:19 +00001103 // range, plus allow an extra one in case we find a later insn that matches
1104 // with Offset-1)
Chad Rosierf11d0402015-10-01 18:17:12 +00001105 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001106 if (!(isNarrowLoad(Opc) || IsNarrowStore) &&
1107 !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
Tim Northover3b0846e2014-05-24 12:50:23 +00001108 return E;
1109
1110 // Track which registers have been modified and used between the first insn
1111 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001112 ModifiedRegs.reset();
1113 UsedRegs.reset();
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001114
1115 // Remember any instructions that read/write memory between FirstMI and MI.
1116 SmallVector<MachineInstr *, 4> MemInsns;
1117
Tim Northover3b0846e2014-05-24 12:50:23 +00001118 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1119 MachineInstr *MI = MBBI;
1120 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1121 // optimization by changing how far we scan.
1122 if (MI->isDebugValue())
1123 continue;
1124
1125 // Now that we know this is a real instruction, count it.
1126 ++Count;
1127
Renato Golin6274e522016-02-05 12:14:30 +00001128 bool CanMergeOpc = Opc == MI->getOpcode();
Chad Rosier18896c02016-02-04 16:01:40 +00001129 Flags.setSExtIdx(-1);
Renato Golin6274e522016-02-05 12:14:30 +00001130 if (!CanMergeOpc) {
1131 bool IsValidLdStrOpc;
1132 unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
1133 assert(IsValidLdStrOpc &&
1134 "Given Opc should be a Load or Store with an immediate");
1135 // Opc will be the first instruction in the pair.
1136 Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
1137 CanMergeOpc = NonSExtOpc == getMatchingNonSExtOpcode(MI->getOpcode());
1138 }
1139
1140 if (CanMergeOpc && getLdStOffsetOp(MI).isImm()) {
Chad Rosierc56a9132015-08-10 18:42:45 +00001141 assert(MI->mayLoadOrStore() && "Expected memory operation.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001142 // If we've found another instruction with the same opcode, check to see
1143 // if the base and offset are compatible with our starting instruction.
1144 // These instructions all have scaled immediate operands, so we just
1145 // check for +1/-1. Make sure to check the new instruction offset is
1146 // actually an immediate and not a symbolic reference destined for
1147 // a relocation.
1148 //
1149 // Pairwise instructions have a 7-bit signed offset field. Single insns
1150 // have a 12-bit unsigned offset field. To be a valid combine, the
1151 // final offset must be in range.
Chad Rosierf77e9092015-08-06 15:50:12 +00001152 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1153 int MIOffset = getLdStOffsetOp(MI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001154 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1155 (Offset + OffsetStride == MIOffset))) {
1156 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1157 // If this is a volatile load/store that otherwise matched, stop looking
1158 // as something is going on that we don't have enough information to
1159 // safely transform. Similarly, stop if we see a hint to avoid pairs.
1160 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1161 return E;
1162 // If the resultant immediate offset of merging these instructions
1163 // is out of range for a pairwise instruction, bail and keep looking.
Renato Golin6274e522016-02-05 12:14:30 +00001164 bool MIIsUnscaled = isUnscaledLdSt(MI);
Jun Bum Limc12c2792015-11-19 18:41:27 +00001165 bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
1166 if (!IsNarrowLoad &&
Renato Golin6274e522016-02-05 12:14:30 +00001167 !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001168 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001169 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001170 continue;
1171 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001172
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001173 if (IsNarrowLoad || IsNarrowStore) {
1174 // If the alignment requirements of the scaled wide load/store
1175 // instruction can't express the offset of the scaled narrow
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001176 // input, bail and keep looking.
1177 if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
1178 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1179 MemInsns.push_back(MI);
1180 continue;
1181 }
1182 } else {
1183 // If the alignment requirements of the paired (scaled) instruction
1184 // can't express the offset of the unscaled input, bail and keep
1185 // looking.
1186 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1187 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1188 MemInsns.push_back(MI);
1189 continue;
1190 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001191 }
1192 // If the destination register of the loads is the same register, bail
1193 // and keep looking. A load-pair instruction with both destination
1194 // registers the same is UNPREDICTABLE and will result in an exception.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001195 // For narrow stores, allow only when the stored value is the same
1196 // (i.e., WZR).
1197 if ((MayLoad && Reg == getLdStRegOp(MI).getReg()) ||
1198 (IsNarrowStore && Reg != getLdStRegOp(MI).getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001199 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001200 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001201 continue;
1202 }
1203
1204 // If the Rt of the second instruction was not modified or used between
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001205 // the two instructions and none of the instructions between the second
1206 // and first alias with the second, we can combine the second into the
1207 // first.
Chad Rosierf77e9092015-08-06 15:50:12 +00001208 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1209 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001210 !mayAlias(MI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001211 Flags.setMergeForward(false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001212 return MBBI;
1213 }
1214
1215 // Likewise, if the Rt of the first instruction is not modified or used
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001216 // between the two instructions and none of the instructions between the
1217 // first and the second alias with the first, we can combine the first
1218 // into the second.
Chad Rosierf77e9092015-08-06 15:50:12 +00001219 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
Chad Rosier5f668e12015-09-03 14:19:43 +00001220 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001221 !mayAlias(FirstMI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001222 Flags.setMergeForward(true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001223 return MBBI;
1224 }
1225 // Unable to combine these instructions due to interference in between.
1226 // Keep looking.
1227 }
1228 }
1229
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001230 // If the instruction wasn't a matching load or store. Stop searching if we
1231 // encounter a call instruction that might modify memory.
1232 if (MI->isCall())
Tim Northover3b0846e2014-05-24 12:50:23 +00001233 return E;
1234
1235 // Update modified / uses register lists.
1236 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1237
1238 // Otherwise, if the base register is modified, we have no match, so
1239 // return early.
1240 if (ModifiedRegs[BaseReg])
1241 return E;
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001242
1243 // Update list of instructions that read/write memory.
1244 if (MI->mayLoadOrStore())
1245 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001246 }
1247 return E;
1248}
1249
1250MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +00001251AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1252 MachineBasicBlock::iterator Update,
1253 bool IsPreIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001254 assert((Update->getOpcode() == AArch64::ADDXri ||
1255 Update->getOpcode() == AArch64::SUBXri) &&
1256 "Unexpected base register update instruction to merge!");
1257 MachineBasicBlock::iterator NextI = I;
1258 // Return the instruction following the merged instruction, which is
1259 // the instruction following our unmerged load. Unless that's the add/sub
1260 // instruction we're merging, in which case it's the one after that.
1261 if (++NextI == Update)
1262 ++NextI;
1263
1264 int Value = Update->getOperand(2).getImm();
1265 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
Chad Rosier2dfd3542015-09-23 13:51:44 +00001266 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
Tim Northover3b0846e2014-05-24 12:50:23 +00001267 if (Update->getOpcode() == AArch64::SUBXri)
1268 Value = -Value;
1269
Chad Rosier2dfd3542015-09-23 13:51:44 +00001270 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1271 : getPostIndexedOpcode(I->getOpcode());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001272 MachineInstrBuilder MIB;
1273 if (!isPairedLdSt(I)) {
1274 // Non-paired instruction.
1275 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1276 .addOperand(getLdStRegOp(Update))
1277 .addOperand(getLdStRegOp(I))
1278 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001279 .addImm(Value)
1280 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001281 } else {
1282 // Paired instruction.
Chad Rosier32d4d372015-09-29 16:07:32 +00001283 int Scale = getMemScale(I);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001284 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1285 .addOperand(getLdStRegOp(Update))
1286 .addOperand(getLdStRegOp(I, 0))
1287 .addOperand(getLdStRegOp(I, 1))
1288 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001289 .addImm(Value / Scale)
1290 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001291 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001292 (void)MIB;
1293
Chad Rosier2dfd3542015-09-23 13:51:44 +00001294 if (IsPreIdx)
1295 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1296 else
1297 DEBUG(dbgs() << "Creating post-indexed load/store.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001298 DEBUG(dbgs() << " Replacing instructions:\n ");
1299 DEBUG(I->print(dbgs()));
1300 DEBUG(dbgs() << " ");
1301 DEBUG(Update->print(dbgs()));
1302 DEBUG(dbgs() << " with instruction:\n ");
1303 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1304 DEBUG(dbgs() << "\n");
1305
1306 // Erase the old instructions for the block.
1307 I->eraseFromParent();
1308 Update->eraseFromParent();
1309
1310 return NextI;
1311}
1312
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001313bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
1314 MachineInstr *MI,
1315 unsigned BaseReg, int Offset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001316 switch (MI->getOpcode()) {
1317 default:
1318 break;
1319 case AArch64::SUBXri:
1320 // Negate the offset for a SUB instruction.
1321 Offset *= -1;
1322 // FALLTHROUGH
1323 case AArch64::ADDXri:
1324 // Make sure it's a vanilla immediate operand, not a relocation or
1325 // anything else we can't handle.
1326 if (!MI->getOperand(2).isImm())
1327 break;
1328 // Watch out for 1 << 12 shifted value.
1329 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
1330 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001331
1332 // The update instruction source and destination register must be the
1333 // same as the load/store base register.
1334 if (MI->getOperand(0).getReg() != BaseReg ||
1335 MI->getOperand(1).getReg() != BaseReg)
1336 break;
1337
1338 bool IsPairedInsn = isPairedLdSt(MemMI);
1339 int UpdateOffset = MI->getOperand(2).getImm();
1340 // For non-paired load/store instructions, the immediate must fit in a
1341 // signed 9-bit integer.
1342 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1343 break;
1344
1345 // For paired load/store instructions, the immediate must be a multiple of
1346 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1347 // integer.
1348 if (IsPairedInsn) {
Chad Rosier32d4d372015-09-29 16:07:32 +00001349 int Scale = getMemScale(MemMI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001350 if (UpdateOffset % Scale != 0)
1351 break;
1352
1353 int ScaledOffset = UpdateOffset / Scale;
1354 if (ScaledOffset > 64 || ScaledOffset < -64)
1355 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001356 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001357
1358 // If we have a non-zero Offset, we check that it matches the amount
1359 // we're adding to the register.
1360 if (!Offset || Offset == MI->getOperand(2).getImm())
1361 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001362 break;
1363 }
1364 return false;
1365}
1366
1367MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001368 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001369 MachineBasicBlock::iterator E = I->getParent()->end();
1370 MachineInstr *MemMI = I;
1371 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001372
Chad Rosierf77e9092015-08-06 15:50:12 +00001373 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001374 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001375
Chad Rosierb7c5b912015-10-01 13:43:05 +00001376 // Scan forward looking for post-index opportunities. Updating instructions
1377 // can't be formed if the memory instruction doesn't have the offset we're
1378 // looking for.
1379 if (MIUnscaledOffset != UnscaledOffset)
1380 return E;
1381
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001382 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001383 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001384 bool IsPairedInsn = isPairedLdSt(MemMI);
1385 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1386 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1387 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1388 return E;
1389 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001390
Tim Northover3b0846e2014-05-24 12:50:23 +00001391 // Track which registers have been modified and used between the first insn
1392 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001393 ModifiedRegs.reset();
1394 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001395 ++MBBI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001396 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001397 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001398 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001399 if (MI->isDebugValue())
1400 continue;
1401
Chad Rosier35706ad2016-02-04 21:26:02 +00001402 // Now that we know this is a real instruction, count it.
1403 ++Count;
1404
Tim Northover3b0846e2014-05-24 12:50:23 +00001405 // If we found a match, return it.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001406 if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001407 return MBBI;
1408
1409 // Update the status of what the instruction clobbered and used.
1410 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1411
1412 // Otherwise, if the base register is used or modified, we have no match, so
1413 // return early.
1414 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1415 return E;
1416 }
1417 return E;
1418}
1419
1420MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001421 MachineBasicBlock::iterator I, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001422 MachineBasicBlock::iterator B = I->getParent()->begin();
1423 MachineBasicBlock::iterator E = I->getParent()->end();
1424 MachineInstr *MemMI = I;
1425 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001426
Chad Rosierf77e9092015-08-06 15:50:12 +00001427 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1428 int Offset = getLdStOffsetOp(MemMI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001429
1430 // If the load/store is the first instruction in the block, there's obviously
1431 // not any matching update. Ditto if the memory offset isn't zero.
1432 if (MBBI == B || Offset != 0)
1433 return E;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001434 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001435 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001436 bool IsPairedInsn = isPairedLdSt(MemMI);
1437 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1438 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1439 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1440 return E;
1441 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001442
1443 // Track which registers have been modified and used between the first insn
1444 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001445 ModifiedRegs.reset();
1446 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001447 --MBBI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001448 for (unsigned Count = 0; MBBI != B && Count < Limit; --MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001449 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001450 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001451 if (MI->isDebugValue())
1452 continue;
1453
Chad Rosier35706ad2016-02-04 21:26:02 +00001454 // Now that we know this is a real instruction, count it.
1455 ++Count;
1456
Tim Northover3b0846e2014-05-24 12:50:23 +00001457 // If we found a match, return it.
Chad Rosier11c825f2015-09-30 19:44:40 +00001458 if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001459 return MBBI;
1460
1461 // Update the status of what the instruction clobbered and used.
1462 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1463
1464 // Otherwise, if the base register is used or modified, we have no match, so
1465 // return early.
1466 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1467 return E;
1468 }
1469 return E;
1470}
1471
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001472bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1473 MachineBasicBlock::iterator &MBBI) {
1474 MachineInstr *MI = MBBI;
1475 // If this is a volatile load, don't mess with it.
1476 if (MI->hasOrderedMemoryRef())
1477 return false;
1478
1479 // Make sure this is a reg+imm.
1480 // FIXME: It is possible to extend it to handle reg+reg cases.
1481 if (!getLdStOffsetOp(MI).isImm())
1482 return false;
1483
Chad Rosier35706ad2016-02-04 21:26:02 +00001484 // Look backward up to LdStLimit instructions.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001485 MachineBasicBlock::iterator StoreI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001486 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001487 ++NumLoadsFromStoresPromoted;
1488 // Promote the load. Keeping the iterator straight is a
1489 // pain, so we let the merge routine tell us what the next instruction
1490 // is after it's done mucking about.
1491 MBBI = promoteLoadFromStore(MBBI, StoreI);
1492 return true;
1493 }
1494 return false;
1495}
1496
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001497bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1498 MachineBasicBlock::iterator &MBBI) {
1499 MachineInstr *MI = MBBI;
1500 MachineBasicBlock::iterator E = MI->getParent()->end();
1501 // If this is a volatile load/store, don't mess with it.
1502 if (MI->hasOrderedMemoryRef())
1503 return false;
1504
1505 // Make sure this is a reg+imm (as opposed to an address reloc).
1506 if (!getLdStOffsetOp(MI).isImm())
1507 return false;
1508
1509 // Check if this load/store has a hint to avoid pair formation.
1510 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1511 if (TII->isLdStPairSuppressed(MI))
1512 return false;
1513
Chad Rosier35706ad2016-02-04 21:26:02 +00001514 // Look ahead up to LdStLimit instructions for a pairable instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001515 LdStPairFlags Flags;
Chad Rosier35706ad2016-02-04 21:26:02 +00001516 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit);
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001517 if (Paired != E) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001518 if (isNarrowLoad(MI)) {
1519 ++NumNarrowLoadsPromoted;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001520 } else if (isNarrowStore(MI)) {
1521 ++NumZeroStoresPromoted;
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001522 } else {
1523 ++NumPairCreated;
1524 if (isUnscaledLdSt(MI))
1525 ++NumUnscaledPairCreated;
1526 }
1527
1528 // Merge the loads into a pair. Keeping the iterator straight is a
1529 // pain, so we let the merge routine tell us what the next instruction
1530 // is after it's done mucking about.
1531 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1532 return true;
1533 }
1534 return false;
1535}
1536
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001537bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1538 bool enableNarrowLdOpt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001539 bool Modified = false;
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001540 // Four tranformations to do here:
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001541 // 1) Find loads that directly read from stores and promote them by
1542 // replacing with mov instructions. If the store is wider than the load,
1543 // the load will be replaced with a bitfield extract.
1544 // e.g.,
1545 // str w1, [x0, #4]
1546 // ldrh w2, [x0, #6]
1547 // ; becomes
1548 // str w1, [x0, #4]
1549 // lsr w2, w1, #16
Tim Northover3b0846e2014-05-24 12:50:23 +00001550 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001551 MBBI != E;) {
1552 MachineInstr *MI = MBBI;
1553 switch (MI->getOpcode()) {
1554 default:
1555 // Just move on to the next instruction.
1556 ++MBBI;
1557 break;
1558 // Scaled instructions.
1559 case AArch64::LDRBBui:
1560 case AArch64::LDRHHui:
1561 case AArch64::LDRWui:
1562 case AArch64::LDRXui:
1563 // Unscaled instructions.
1564 case AArch64::LDURBBi:
1565 case AArch64::LDURHHi:
1566 case AArch64::LDURWi:
1567 case AArch64::LDURXi: {
1568 if (tryToPromoteLoadFromStore(MBBI)) {
1569 Modified = true;
1570 break;
1571 }
1572 ++MBBI;
1573 break;
1574 }
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001575 }
1576 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001577 // 2) Find narrow loads that can be converted into a single wider load
1578 // with bitfield extract instructions.
1579 // e.g.,
1580 // ldrh w0, [x2]
1581 // ldrh w1, [x2, #2]
1582 // ; becomes
1583 // ldr w0, [x2]
1584 // ubfx w1, w0, #16, #16
1585 // and w0, w0, #ffff
Jun Bum Lim1de2d442016-02-05 20:02:03 +00001586 //
1587 // Also merge adjacent zero stores into a wider store.
1588 // e.g.,
1589 // strh wzr, [x0]
1590 // strh wzr, [x0, #2]
1591 // ; becomes
1592 // str wzr, [x0]
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001593 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001594 enableNarrowLdOpt && MBBI != E;) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001595 MachineInstr *MI = MBBI;
1596 switch (MI->getOpcode()) {
1597 default:
1598 // Just move on to the next instruction.
1599 ++MBBI;
1600 break;
1601 // Scaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001602 case AArch64::LDRBBui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001603 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001604 case AArch64::LDRSBWui:
1605 case AArch64::LDRSHWui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001606 case AArch64::STRBBui:
1607 case AArch64::STRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001608 // Unscaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001609 case AArch64::LDURBBi:
1610 case AArch64::LDURHHi:
1611 case AArch64::LDURSBWi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001612 case AArch64::LDURSHWi:
1613 case AArch64::STURBBi:
1614 case AArch64::STURHHi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001615 if (tryToMergeLdStInst(MBBI)) {
1616 Modified = true;
1617 break;
1618 }
1619 ++MBBI;
1620 break;
1621 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001622 }
1623 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001624 // 3) Find loads and stores that can be merged into a single load or store
1625 // pair instruction.
1626 // e.g.,
1627 // ldr x0, [x2]
1628 // ldr x1, [x2, #8]
1629 // ; becomes
1630 // ldp x0, x1, [x2]
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001631 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Tim Northover3b0846e2014-05-24 12:50:23 +00001632 MBBI != E;) {
1633 MachineInstr *MI = MBBI;
1634 switch (MI->getOpcode()) {
1635 default:
1636 // Just move on to the next instruction.
1637 ++MBBI;
1638 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001639 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001640 case AArch64::STRSui:
1641 case AArch64::STRDui:
1642 case AArch64::STRQui:
1643 case AArch64::STRXui:
1644 case AArch64::STRWui:
1645 case AArch64::LDRSui:
1646 case AArch64::LDRDui:
1647 case AArch64::LDRQui:
1648 case AArch64::LDRXui:
1649 case AArch64::LDRWui:
Quentin Colombet29f55332015-01-24 01:25:54 +00001650 case AArch64::LDRSWui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001651 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001652 case AArch64::STURSi:
1653 case AArch64::STURDi:
1654 case AArch64::STURQi:
1655 case AArch64::STURWi:
1656 case AArch64::STURXi:
1657 case AArch64::LDURSi:
1658 case AArch64::LDURDi:
1659 case AArch64::LDURQi:
1660 case AArch64::LDURWi:
Quentin Colombet29f55332015-01-24 01:25:54 +00001661 case AArch64::LDURXi:
1662 case AArch64::LDURSWi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001663 if (tryToMergeLdStInst(MBBI)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001664 Modified = true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001665 break;
1666 }
1667 ++MBBI;
1668 break;
1669 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001670 }
1671 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001672 // 4) Find base register updates that can be merged into the load or store
1673 // as a base-reg writeback.
1674 // e.g.,
1675 // ldr x0, [x2]
1676 // add x2, x2, #4
1677 // ; becomes
1678 // ldr x0, [x2], #4
Tim Northover3b0846e2014-05-24 12:50:23 +00001679 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1680 MBBI != E;) {
1681 MachineInstr *MI = MBBI;
1682 // Do update merging. It's simpler to keep this separate from the above
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001683 // switchs, though not strictly necessary.
Matthias Braunfa3872e2015-05-18 20:27:55 +00001684 unsigned Opc = MI->getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +00001685 switch (Opc) {
1686 default:
1687 // Just move on to the next instruction.
1688 ++MBBI;
1689 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001690 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001691 case AArch64::STRSui:
1692 case AArch64::STRDui:
1693 case AArch64::STRQui:
1694 case AArch64::STRXui:
1695 case AArch64::STRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001696 case AArch64::STRHHui:
1697 case AArch64::STRBBui:
Tim Northover3b0846e2014-05-24 12:50:23 +00001698 case AArch64::LDRSui:
1699 case AArch64::LDRDui:
1700 case AArch64::LDRQui:
1701 case AArch64::LDRXui:
1702 case AArch64::LDRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001703 case AArch64::LDRHHui:
1704 case AArch64::LDRBBui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001705 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001706 case AArch64::STURSi:
1707 case AArch64::STURDi:
1708 case AArch64::STURQi:
1709 case AArch64::STURWi:
1710 case AArch64::STURXi:
1711 case AArch64::LDURSi:
1712 case AArch64::LDURDi:
1713 case AArch64::LDURQi:
1714 case AArch64::LDURWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001715 case AArch64::LDURXi:
1716 // Paired instructions.
1717 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +00001718 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001719 case AArch64::LDPDi:
1720 case AArch64::LDPQi:
1721 case AArch64::LDPWi:
1722 case AArch64::LDPXi:
1723 case AArch64::STPSi:
1724 case AArch64::STPDi:
1725 case AArch64::STPQi:
1726 case AArch64::STPWi:
1727 case AArch64::STPXi: {
Tim Northover3b0846e2014-05-24 12:50:23 +00001728 // Make sure this is a reg+imm (as opposed to an address reloc).
Chad Rosierf77e9092015-08-06 15:50:12 +00001729 if (!getLdStOffsetOp(MI).isImm()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001730 ++MBBI;
1731 break;
1732 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001733 // Look forward to try to form a post-index instruction. For example,
1734 // ldr x0, [x20]
1735 // add x20, x20, #32
1736 // merged into:
1737 // ldr x0, [x20], #32
Tim Northover3b0846e2014-05-24 12:50:23 +00001738 MachineBasicBlock::iterator Update =
Chad Rosier35706ad2016-02-04 21:26:02 +00001739 findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001740 if (Update != E) {
1741 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001742 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001743 Modified = true;
1744 ++NumPostFolded;
1745 break;
1746 }
1747 // Don't know how to handle pre/post-index versions, so move to the next
1748 // instruction.
Chad Rosier22eb7102015-08-06 17:37:18 +00001749 if (isUnscaledLdSt(Opc)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001750 ++MBBI;
1751 break;
1752 }
1753
1754 // Look back to try to find a pre-index instruction. For example,
1755 // add x0, x0, #8
1756 // ldr x1, [x0]
1757 // merged into:
1758 // ldr x1, [x0, #8]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001759 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001760 if (Update != E) {
1761 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001762 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001763 Modified = true;
1764 ++NumPreFolded;
1765 break;
1766 }
Chad Rosier7a83d772015-10-01 13:09:44 +00001767 // The immediate in the load/store is scaled by the size of the memory
1768 // operation. The immediate in the add we're looking for,
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001769 // however, is not, so adjust here.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001770 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001771
Tim Northover3b0846e2014-05-24 12:50:23 +00001772 // Look forward to try to find a post-index instruction. For example,
1773 // ldr x1, [x0, #64]
1774 // add x0, x0, #64
1775 // merged into:
1776 // ldr x1, [x0, #64]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001777 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001778 if (Update != E) {
1779 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001780 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001781 Modified = true;
1782 ++NumPreFolded;
1783 break;
1784 }
1785
1786 // Nothing found. Just move to the next instruction.
1787 ++MBBI;
1788 break;
1789 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001790 }
1791 }
1792
1793 return Modified;
1794}
1795
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001796bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001797 bool ProfitableArch = Subtarget->isCortexA57();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001798 // FIXME: The benefit from converting narrow loads into a wider load could be
1799 // microarchitectural as it assumes that a single load with two bitfield
1800 // extracts is cheaper than two narrow loads. Currently, this conversion is
1801 // enabled only in cortex-a57 on which performance benefits were verified.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001802 return ProfitableArch && !Subtarget->requiresStrictAlign();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001803}
1804
Tim Northover3b0846e2014-05-24 12:50:23 +00001805bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
Oliver Stannardd414c992015-11-10 11:04:18 +00001806 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1807 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1808 TRI = Subtarget->getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001809
Chad Rosierbba881e2016-02-02 15:02:30 +00001810 // Resize the modified and used register bitfield trackers. We do this once
1811 // per function and then clear the bitfield each time we optimize a load or
1812 // store.
1813 ModifiedRegs.resize(TRI->getNumRegs());
1814 UsedRegs.resize(TRI->getNumRegs());
1815
Tim Northover3b0846e2014-05-24 12:50:23 +00001816 bool Modified = false;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001817 bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
Tim Northover3b0846e2014-05-24 12:50:23 +00001818 for (auto &MBB : Fn)
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001819 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001820
1821 return Modified;
1822}
1823
1824// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1825// loads and stores near one another?
1826
Chad Rosier43f5c842015-08-05 12:40:13 +00001827/// createAArch64LoadStoreOptimizationPass - returns an instance of the
1828/// load / store optimization pass.
Tim Northover3b0846e2014-05-24 12:50:23 +00001829FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1830 return new AArch64LoadStoreOpt();
1831}