blob: 28dc3f39d2cfe5afa55e1b8d5d2c27b10f444ad8 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that performs load / store related peephole
11// optimizations. This pass should be run after register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AArch64InstrInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000016#include "AArch64Subtarget.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000017#include "MCTargetDesc/AArch64AddressingModes.h"
18#include "llvm/ADT/BitVector.h"
Chad Rosierce8e5ab2015-05-21 21:36:46 +000019#include "llvm/ADT/SmallVector.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000020#include "llvm/ADT/Statistic.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000021#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/ErrorHandling.h"
28#include "llvm/Support/raw_ostream.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000029#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Target/TargetRegisterInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000032using namespace llvm;
33
34#define DEBUG_TYPE "aarch64-ldst-opt"
35
Tim Northover3b0846e2014-05-24 12:50:23 +000036STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
37STATISTIC(NumPostFolded, "Number of post-index updates folded");
38STATISTIC(NumPreFolded, "Number of pre-index updates folded");
39STATISTIC(NumUnscaledPairCreated,
40 "Number of load/store from unscaled generated");
Jun Bum Limc12c2792015-11-19 18:41:27 +000041STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
Jun Bum Lim80ec0d32015-11-20 21:14:07 +000042STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
Jun Bum Lim6755c3b2015-12-22 16:36:16 +000043STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
Tim Northover3b0846e2014-05-24 12:50:23 +000044
Chad Rosier35706ad2016-02-04 21:26:02 +000045// The LdStLimit limits how far we search for load/store pairs.
46static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +000047 cl::init(20), cl::Hidden);
Tim Northover3b0846e2014-05-24 12:50:23 +000048
Chad Rosier35706ad2016-02-04 21:26:02 +000049// The UpdateLimit limits how far we search for update instructions when we form
50// pre-/post-index instructions.
51static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
52 cl::Hidden);
53
Chad Rosier96530b32015-08-05 13:44:51 +000054namespace llvm {
55void initializeAArch64LoadStoreOptPass(PassRegistry &);
56}
57
58#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
59
Tim Northover3b0846e2014-05-24 12:50:23 +000060namespace {
Chad Rosier96a18a92015-07-21 17:42:04 +000061
62typedef struct LdStPairFlags {
63 // If a matching instruction is found, MergeForward is set to true if the
64 // merge is to remove the first instruction and replace the second with
65 // a pair-wise insn, and false if the reverse is true.
66 bool MergeForward;
67
68 // SExtIdx gives the index of the result of the load pair that must be
69 // extended. The value of SExtIdx assumes that the paired load produces the
70 // value in this order: (I, returned iterator), i.e., -1 means no value has
71 // to be extended, 0 means I, and 1 means the returned iterator.
72 int SExtIdx;
73
74 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
75
76 void setMergeForward(bool V = true) { MergeForward = V; }
77 bool getMergeForward() const { return MergeForward; }
78
79 void setSExtIdx(int V) { SExtIdx = V; }
80 int getSExtIdx() const { return SExtIdx; }
81
82} LdStPairFlags;
83
Tim Northover3b0846e2014-05-24 12:50:23 +000084struct AArch64LoadStoreOpt : public MachineFunctionPass {
85 static char ID;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +000086 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
Chad Rosier96530b32015-08-05 13:44:51 +000087 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
88 }
Tim Northover3b0846e2014-05-24 12:50:23 +000089
90 const AArch64InstrInfo *TII;
91 const TargetRegisterInfo *TRI;
Oliver Stannardd414c992015-11-10 11:04:18 +000092 const AArch64Subtarget *Subtarget;
Tim Northover3b0846e2014-05-24 12:50:23 +000093
Chad Rosierbba881e2016-02-02 15:02:30 +000094 // Track which registers have been modified and used.
95 BitVector ModifiedRegs, UsedRegs;
96
Tim Northover3b0846e2014-05-24 12:50:23 +000097 // Scan the instructions looking for a load/store that can be combined
98 // with the current instruction into a load/store pair.
99 // Return the matching instruction if one is found, else MBB->end().
Tim Northover3b0846e2014-05-24 12:50:23 +0000100 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000101 LdStPairFlags &Flags,
Tim Northover3b0846e2014-05-24 12:50:23 +0000102 unsigned Limit);
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000103
104 // Scan the instructions looking for a store that writes to the address from
105 // which the current load instruction reads. Return true if one is found.
106 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
107 MachineBasicBlock::iterator &StoreI);
108
Tim Northover3b0846e2014-05-24 12:50:23 +0000109 // Merge the two instructions indicated into a single pair-wise instruction.
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000110 // If MergeForward is true, erase the first instruction and fold its
Tim Northover3b0846e2014-05-24 12:50:23 +0000111 // operation into the second. If false, the reverse. Return the instruction
112 // following the first instruction (which may change during processing).
113 MachineBasicBlock::iterator
114 mergePairedInsns(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000115 MachineBasicBlock::iterator Paired,
Chad Rosierfe5399f2015-07-21 17:47:56 +0000116 const LdStPairFlags &Flags);
Tim Northover3b0846e2014-05-24 12:50:23 +0000117
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000118 // Promote the load that reads directly from the address stored to.
119 MachineBasicBlock::iterator
120 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
121 MachineBasicBlock::iterator StoreI);
122
Tim Northover3b0846e2014-05-24 12:50:23 +0000123 // Scan the instruction list to find a base register update that can
124 // be combined with the current instruction (a load or store) using
125 // pre or post indexed addressing with writeback. Scan forwards.
126 MachineBasicBlock::iterator
Chad Rosier234bf6f2016-01-18 21:56:40 +0000127 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
Chad Rosier35706ad2016-02-04 21:26:02 +0000128 int UnscaledOffset, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000129
130 // Scan the instruction list to find a base register update that can
131 // be combined with the current instruction (a load or store) using
132 // pre or post indexed addressing with writeback. Scan backwards.
133 MachineBasicBlock::iterator
Chad Rosier35706ad2016-02-04 21:26:02 +0000134 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000135
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000136 // Find an instruction that updates the base register of the ld/st
137 // instruction.
138 bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
139 unsigned BaseReg, int Offset);
140
Chad Rosier2dfd3542015-09-23 13:51:44 +0000141 // Merge a pre- or post-index base register update into a ld/st instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000142 MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +0000143 mergeUpdateInsn(MachineBasicBlock::iterator I,
144 MachineBasicBlock::iterator Update, bool IsPreIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000145
Chad Rosier24c46ad2016-02-09 18:10:20 +0000146 // Is this a candidate for ld/st merging or pairing? For example, we don't
147 // touch volatiles or load/stores that have a hint to avoid pair formation.
148 bool isCandidateToMergeOrPair(MachineInstr *MI);
149
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000150 // Find and merge foldable ldr/str instructions.
151 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
152
Chad Rosier24c46ad2016-02-09 18:10:20 +0000153 // Find and pair ldr/str instructions.
154 bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
155
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000156 // Find and promote load instructions which read directly from store.
157 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
158
Jun Bum Lim22fe15e2015-11-06 16:27:47 +0000159 // Check if converting two narrow loads into a single wider load with
160 // bitfield extracts could be enabled.
161 bool enableNarrowLdMerge(MachineFunction &Fn);
162
163 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000164
165 bool runOnMachineFunction(MachineFunction &Fn) override;
166
167 const char *getPassName() const override {
Chad Rosier96530b32015-08-05 13:44:51 +0000168 return AARCH64_LOAD_STORE_OPT_NAME;
Tim Northover3b0846e2014-05-24 12:50:23 +0000169 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000170};
171char AArch64LoadStoreOpt::ID = 0;
Jim Grosbach1eee3df2014-08-11 22:42:31 +0000172} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +0000173
Chad Rosier96530b32015-08-05 13:44:51 +0000174INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
175 AARCH64_LOAD_STORE_OPT_NAME, false, false)
176
Chad Rosier22eb7102015-08-06 17:37:18 +0000177static bool isUnscaledLdSt(unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000178 switch (Opc) {
179 default:
180 return false;
181 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000182 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000183 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000184 case AArch64::STURBBi:
185 case AArch64::STURHHi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000186 case AArch64::STURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000187 case AArch64::STURXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000188 case AArch64::LDURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000189 case AArch64::LDURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000190 case AArch64::LDURQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000191 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000192 case AArch64::LDURXi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000193 case AArch64::LDURSWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000194 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000195 case AArch64::LDURBBi:
196 case AArch64::LDURSBWi:
197 case AArch64::LDURSHWi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000198 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000199 }
200}
201
Chad Rosier22eb7102015-08-06 17:37:18 +0000202static bool isUnscaledLdSt(MachineInstr *MI) {
203 return isUnscaledLdSt(MI->getOpcode());
204}
205
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000206static unsigned getBitExtrOpcode(MachineInstr *MI) {
207 switch (MI->getOpcode()) {
208 default:
209 llvm_unreachable("Unexpected opcode.");
210 case AArch64::LDRBBui:
211 case AArch64::LDURBBi:
212 case AArch64::LDRHHui:
213 case AArch64::LDURHHi:
214 return AArch64::UBFMWri;
215 case AArch64::LDRSBWui:
216 case AArch64::LDURSBWi:
217 case AArch64::LDRSHWui:
218 case AArch64::LDURSHWi:
219 return AArch64::SBFMWri;
220 }
221}
222
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000223static bool isNarrowStore(unsigned Opc) {
224 switch (Opc) {
225 default:
226 return false;
227 case AArch64::STRBBui:
228 case AArch64::STURBBi:
229 case AArch64::STRHHui:
230 case AArch64::STURHHi:
231 return true;
232 }
233}
234
235static bool isNarrowStore(MachineInstr *MI) {
236 return isNarrowStore(MI->getOpcode());
237}
238
Jun Bum Limc12c2792015-11-19 18:41:27 +0000239static bool isNarrowLoad(unsigned Opc) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000240 switch (Opc) {
241 default:
242 return false;
243 case AArch64::LDRHHui:
244 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000245 case AArch64::LDRBBui:
246 case AArch64::LDURBBi:
247 case AArch64::LDRSHWui:
248 case AArch64::LDURSHWi:
249 case AArch64::LDRSBWui:
250 case AArch64::LDURSBWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000251 return true;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000252 }
253}
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000254
Jun Bum Limc12c2792015-11-19 18:41:27 +0000255static bool isNarrowLoad(MachineInstr *MI) {
256 return isNarrowLoad(MI->getOpcode());
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000257}
258
Chad Rosier32d4d372015-09-29 16:07:32 +0000259// Scaling factor for unscaled load or store.
260static int getMemScale(MachineInstr *MI) {
Chad Rosier22eb7102015-08-06 17:37:18 +0000261 switch (MI->getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000262 default:
Chad Rosierdabe2532015-09-29 18:26:15 +0000263 llvm_unreachable("Opcode has unknown scale!");
264 case AArch64::LDRBBui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000265 case AArch64::LDURBBi:
266 case AArch64::LDRSBWui:
267 case AArch64::LDURSBWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000268 case AArch64::STRBBui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000269 case AArch64::STURBBi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000270 return 1;
271 case AArch64::LDRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000272 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000273 case AArch64::LDRSHWui:
274 case AArch64::LDURSHWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000275 case AArch64::STRHHui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000276 case AArch64::STURHHi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000277 return 2;
Chad Rosiera4d32172015-09-29 14:57:10 +0000278 case AArch64::LDRSui:
279 case AArch64::LDURSi:
280 case AArch64::LDRSWui:
281 case AArch64::LDURSWi:
282 case AArch64::LDRWui:
283 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000284 case AArch64::STRSui:
285 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000286 case AArch64::STRWui:
287 case AArch64::STURWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000288 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000289 case AArch64::LDPSWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000290 case AArch64::LDPWi:
291 case AArch64::STPSi:
292 case AArch64::STPWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000293 return 4;
Chad Rosiera4d32172015-09-29 14:57:10 +0000294 case AArch64::LDRDui:
295 case AArch64::LDURDi:
296 case AArch64::LDRXui:
297 case AArch64::LDURXi:
298 case AArch64::STRDui:
299 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000300 case AArch64::STRXui:
301 case AArch64::STURXi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000302 case AArch64::LDPDi:
303 case AArch64::LDPXi:
304 case AArch64::STPDi:
305 case AArch64::STPXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000306 return 8;
Tim Northover3b0846e2014-05-24 12:50:23 +0000307 case AArch64::LDRQui:
308 case AArch64::LDURQi:
Chad Rosiera4d32172015-09-29 14:57:10 +0000309 case AArch64::STRQui:
310 case AArch64::STURQi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000311 case AArch64::LDPQi:
312 case AArch64::STPQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000313 return 16;
Tim Northover3b0846e2014-05-24 12:50:23 +0000314 }
315}
316
Quentin Colombet66b61632015-03-06 22:42:10 +0000317static unsigned getMatchingNonSExtOpcode(unsigned Opc,
318 bool *IsValidLdStrOpc = nullptr) {
319 if (IsValidLdStrOpc)
320 *IsValidLdStrOpc = true;
321 switch (Opc) {
322 default:
323 if (IsValidLdStrOpc)
324 *IsValidLdStrOpc = false;
325 return UINT_MAX;
326 case AArch64::STRDui:
327 case AArch64::STURDi:
328 case AArch64::STRQui:
329 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000330 case AArch64::STRBBui:
331 case AArch64::STURBBi:
332 case AArch64::STRHHui:
333 case AArch64::STURHHi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000334 case AArch64::STRWui:
335 case AArch64::STURWi:
336 case AArch64::STRXui:
337 case AArch64::STURXi:
338 case AArch64::LDRDui:
339 case AArch64::LDURDi:
340 case AArch64::LDRQui:
341 case AArch64::LDURQi:
342 case AArch64::LDRWui:
343 case AArch64::LDURWi:
344 case AArch64::LDRXui:
345 case AArch64::LDURXi:
346 case AArch64::STRSui:
347 case AArch64::STURSi:
348 case AArch64::LDRSui:
349 case AArch64::LDURSi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000350 case AArch64::LDRHHui:
351 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000352 case AArch64::LDRBBui:
353 case AArch64::LDURBBi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000354 return Opc;
355 case AArch64::LDRSWui:
356 return AArch64::LDRWui;
357 case AArch64::LDURSWi:
358 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000359 case AArch64::LDRSBWui:
360 return AArch64::LDRBBui;
361 case AArch64::LDRSHWui:
362 return AArch64::LDRHHui;
363 case AArch64::LDURSBWi:
364 return AArch64::LDURBBi;
365 case AArch64::LDURSHWi:
366 return AArch64::LDURHHi;
Quentin Colombet66b61632015-03-06 22:42:10 +0000367 }
368}
369
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000370static unsigned getMatchingWideOpcode(unsigned Opc) {
371 switch (Opc) {
372 default:
373 llvm_unreachable("Opcode has no wide equivalent!");
374 case AArch64::STRBBui:
375 return AArch64::STRHHui;
376 case AArch64::STRHHui:
377 return AArch64::STRWui;
378 case AArch64::STURBBi:
379 return AArch64::STURHHi;
380 case AArch64::STURHHi:
381 return AArch64::STURWi;
382 case AArch64::LDRHHui:
383 case AArch64::LDRSHWui:
384 return AArch64::LDRWui;
385 case AArch64::LDURHHi:
386 case AArch64::LDURSHWi:
387 return AArch64::LDURWi;
388 case AArch64::LDRBBui:
389 case AArch64::LDRSBWui:
390 return AArch64::LDRHHui;
391 case AArch64::LDURBBi:
392 case AArch64::LDURSBWi:
393 return AArch64::LDURHHi;
394 }
395}
396
Tim Northover3b0846e2014-05-24 12:50:23 +0000397static unsigned getMatchingPairOpcode(unsigned Opc) {
398 switch (Opc) {
399 default:
400 llvm_unreachable("Opcode has no pairwise equivalent!");
401 case AArch64::STRSui:
402 case AArch64::STURSi:
403 return AArch64::STPSi;
404 case AArch64::STRDui:
405 case AArch64::STURDi:
406 return AArch64::STPDi;
407 case AArch64::STRQui:
408 case AArch64::STURQi:
409 return AArch64::STPQi;
410 case AArch64::STRWui:
411 case AArch64::STURWi:
412 return AArch64::STPWi;
413 case AArch64::STRXui:
414 case AArch64::STURXi:
415 return AArch64::STPXi;
416 case AArch64::LDRSui:
417 case AArch64::LDURSi:
418 return AArch64::LDPSi;
419 case AArch64::LDRDui:
420 case AArch64::LDURDi:
421 return AArch64::LDPDi;
422 case AArch64::LDRQui:
423 case AArch64::LDURQi:
424 return AArch64::LDPQi;
425 case AArch64::LDRWui:
426 case AArch64::LDURWi:
427 return AArch64::LDPWi;
428 case AArch64::LDRXui:
429 case AArch64::LDURXi:
430 return AArch64::LDPXi;
Quentin Colombet29f55332015-01-24 01:25:54 +0000431 case AArch64::LDRSWui:
432 case AArch64::LDURSWi:
433 return AArch64::LDPSWi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000434 }
435}
436
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000437static unsigned isMatchingStore(MachineInstr *LoadInst,
438 MachineInstr *StoreInst) {
439 unsigned LdOpc = LoadInst->getOpcode();
440 unsigned StOpc = StoreInst->getOpcode();
441 switch (LdOpc) {
442 default:
443 llvm_unreachable("Unsupported load instruction!");
444 case AArch64::LDRBBui:
445 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
446 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
447 case AArch64::LDURBBi:
448 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
449 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
450 case AArch64::LDRHHui:
451 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
452 StOpc == AArch64::STRXui;
453 case AArch64::LDURHHi:
454 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
455 StOpc == AArch64::STURXi;
456 case AArch64::LDRWui:
457 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
458 case AArch64::LDURWi:
459 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
460 case AArch64::LDRXui:
461 return StOpc == AArch64::STRXui;
462 case AArch64::LDURXi:
463 return StOpc == AArch64::STURXi;
464 }
465}
466
Tim Northover3b0846e2014-05-24 12:50:23 +0000467static unsigned getPreIndexedOpcode(unsigned Opc) {
468 switch (Opc) {
469 default:
470 llvm_unreachable("Opcode has no pre-indexed equivalent!");
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000471 case AArch64::STRSui:
472 return AArch64::STRSpre;
473 case AArch64::STRDui:
474 return AArch64::STRDpre;
475 case AArch64::STRQui:
476 return AArch64::STRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000477 case AArch64::STRBBui:
478 return AArch64::STRBBpre;
479 case AArch64::STRHHui:
480 return AArch64::STRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000481 case AArch64::STRWui:
482 return AArch64::STRWpre;
483 case AArch64::STRXui:
484 return AArch64::STRXpre;
485 case AArch64::LDRSui:
486 return AArch64::LDRSpre;
487 case AArch64::LDRDui:
488 return AArch64::LDRDpre;
489 case AArch64::LDRQui:
490 return AArch64::LDRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000491 case AArch64::LDRBBui:
492 return AArch64::LDRBBpre;
493 case AArch64::LDRHHui:
494 return AArch64::LDRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000495 case AArch64::LDRWui:
496 return AArch64::LDRWpre;
497 case AArch64::LDRXui:
498 return AArch64::LDRXpre;
Quentin Colombet29f55332015-01-24 01:25:54 +0000499 case AArch64::LDRSWui:
500 return AArch64::LDRSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000501 case AArch64::LDPSi:
502 return AArch64::LDPSpre;
Chad Rosier43150122015-09-29 20:39:55 +0000503 case AArch64::LDPSWi:
504 return AArch64::LDPSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000505 case AArch64::LDPDi:
506 return AArch64::LDPDpre;
507 case AArch64::LDPQi:
508 return AArch64::LDPQpre;
509 case AArch64::LDPWi:
510 return AArch64::LDPWpre;
511 case AArch64::LDPXi:
512 return AArch64::LDPXpre;
513 case AArch64::STPSi:
514 return AArch64::STPSpre;
515 case AArch64::STPDi:
516 return AArch64::STPDpre;
517 case AArch64::STPQi:
518 return AArch64::STPQpre;
519 case AArch64::STPWi:
520 return AArch64::STPWpre;
521 case AArch64::STPXi:
522 return AArch64::STPXpre;
Tim Northover3b0846e2014-05-24 12:50:23 +0000523 }
524}
525
526static unsigned getPostIndexedOpcode(unsigned Opc) {
527 switch (Opc) {
528 default:
529 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
530 case AArch64::STRSui:
531 return AArch64::STRSpost;
532 case AArch64::STRDui:
533 return AArch64::STRDpost;
534 case AArch64::STRQui:
535 return AArch64::STRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000536 case AArch64::STRBBui:
537 return AArch64::STRBBpost;
538 case AArch64::STRHHui:
539 return AArch64::STRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000540 case AArch64::STRWui:
541 return AArch64::STRWpost;
542 case AArch64::STRXui:
543 return AArch64::STRXpost;
544 case AArch64::LDRSui:
545 return AArch64::LDRSpost;
546 case AArch64::LDRDui:
547 return AArch64::LDRDpost;
548 case AArch64::LDRQui:
549 return AArch64::LDRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000550 case AArch64::LDRBBui:
551 return AArch64::LDRBBpost;
552 case AArch64::LDRHHui:
553 return AArch64::LDRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000554 case AArch64::LDRWui:
555 return AArch64::LDRWpost;
556 case AArch64::LDRXui:
557 return AArch64::LDRXpost;
Quentin Colombet29f55332015-01-24 01:25:54 +0000558 case AArch64::LDRSWui:
559 return AArch64::LDRSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000560 case AArch64::LDPSi:
561 return AArch64::LDPSpost;
Chad Rosier43150122015-09-29 20:39:55 +0000562 case AArch64::LDPSWi:
563 return AArch64::LDPSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000564 case AArch64::LDPDi:
565 return AArch64::LDPDpost;
566 case AArch64::LDPQi:
567 return AArch64::LDPQpost;
568 case AArch64::LDPWi:
569 return AArch64::LDPWpost;
570 case AArch64::LDPXi:
571 return AArch64::LDPXpost;
572 case AArch64::STPSi:
573 return AArch64::STPSpost;
574 case AArch64::STPDi:
575 return AArch64::STPDpost;
576 case AArch64::STPQi:
577 return AArch64::STPQpost;
578 case AArch64::STPWi:
579 return AArch64::STPWpost;
580 case AArch64::STPXi:
581 return AArch64::STPXpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000582 }
583}
584
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000585static bool isPairedLdSt(const MachineInstr *MI) {
586 switch (MI->getOpcode()) {
587 default:
588 return false;
589 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000590 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000591 case AArch64::LDPDi:
592 case AArch64::LDPQi:
593 case AArch64::LDPWi:
594 case AArch64::LDPXi:
595 case AArch64::STPSi:
596 case AArch64::STPDi:
597 case AArch64::STPQi:
598 case AArch64::STPWi:
599 case AArch64::STPXi:
600 return true;
601 }
602}
603
604static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
605 unsigned PairedRegOp = 0) {
606 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
607 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
608 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000609}
610
611static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000612 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
613 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000614}
615
616static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000617 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
618 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000619}
620
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000621static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
622 MachineInstr *StoreInst) {
623 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
624 int LoadSize = getMemScale(LoadInst);
625 int StoreSize = getMemScale(StoreInst);
626 int UnscaledStOffset = isUnscaledLdSt(StoreInst)
627 ? getLdStOffsetOp(StoreInst).getImm()
628 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
629 int UnscaledLdOffset = isUnscaledLdSt(LoadInst)
630 ? getLdStOffsetOp(LoadInst).getImm()
631 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
632 return (UnscaledStOffset <= UnscaledLdOffset) &&
633 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
634}
635
Tim Northover3b0846e2014-05-24 12:50:23 +0000636MachineBasicBlock::iterator
637AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
638 MachineBasicBlock::iterator Paired,
Chad Rosier96a18a92015-07-21 17:42:04 +0000639 const LdStPairFlags &Flags) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000640 MachineBasicBlock::iterator NextI = I;
641 ++NextI;
642 // If NextI is the second of the two instructions to be merged, we need
643 // to skip one further. Either way we merge will invalidate the iterator,
644 // and we don't need to scan the new instruction, as it's a pairwise
645 // instruction, which we're not considering for further action anyway.
646 if (NextI == Paired)
647 ++NextI;
648
Chad Rosier96a18a92015-07-21 17:42:04 +0000649 int SExtIdx = Flags.getSExtIdx();
Quentin Colombet66b61632015-03-06 22:42:10 +0000650 unsigned Opc =
651 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
Chad Rosier22eb7102015-08-06 17:37:18 +0000652 bool IsUnscaled = isUnscaledLdSt(Opc);
Chad Rosierf11d0402015-10-01 18:17:12 +0000653 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
Tim Northover3b0846e2014-05-24 12:50:23 +0000654
Chad Rosier96a18a92015-07-21 17:42:04 +0000655 bool MergeForward = Flags.getMergeForward();
Tim Northover3b0846e2014-05-24 12:50:23 +0000656 // Insert our new paired instruction after whichever of the paired
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000657 // instructions MergeForward indicates.
658 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
659 // Also based on MergeForward is from where we copy the base register operand
Tim Northover3b0846e2014-05-24 12:50:23 +0000660 // so we get the flags compatible with the input code.
Chad Rosierf77e9092015-08-06 15:50:12 +0000661 const MachineOperand &BaseRegOp =
662 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000663
664 // Which register is Rt and which is Rt2 depends on the offset order.
665 MachineInstr *RtMI, *Rt2MI;
Renato Golin6274e522016-02-05 12:14:30 +0000666 if (getLdStOffsetOp(I).getImm() ==
667 getLdStOffsetOp(Paired).getImm() + OffsetStride) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000668 RtMI = Paired;
669 Rt2MI = I;
Quentin Colombet66b61632015-03-06 22:42:10 +0000670 // Here we swapped the assumption made for SExtIdx.
671 // I.e., we turn ldp I, Paired into ldp Paired, I.
672 // Update the index accordingly.
673 if (SExtIdx != -1)
674 SExtIdx = (SExtIdx + 1) % 2;
Tim Northover3b0846e2014-05-24 12:50:23 +0000675 } else {
676 RtMI = I;
677 Rt2MI = Paired;
678 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000679
James Molloy5b18b4c2015-10-23 10:41:38 +0000680 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000681
Jun Bum Limc12c2792015-11-19 18:41:27 +0000682 if (isNarrowLoad(Opc)) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000683 // Change the scaled offset from small to large type.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000684 if (!IsUnscaled) {
685 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000686 OffsetImm /= 2;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000687 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000688 MachineInstr *RtNewDest = MergeForward ? I : Paired;
Oliver Stannardd414c992015-11-10 11:04:18 +0000689 // When merging small (< 32 bit) loads for big-endian targets, the order of
690 // the component parts gets swapped.
691 if (!Subtarget->isLittleEndian())
692 std::swap(RtMI, Rt2MI);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000693 // Construct the new load instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000694 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
695 NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000696 TII->get(getMatchingWideOpcode(Opc)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000697 .addOperand(getLdStRegOp(RtNewDest))
698 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000699 .addImm(OffsetImm)
700 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000701
702 DEBUG(
703 dbgs()
704 << "Creating the new load and extract. Replacing instructions:\n ");
705 DEBUG(I->print(dbgs()));
706 DEBUG(dbgs() << " ");
707 DEBUG(Paired->print(dbgs()));
708 DEBUG(dbgs() << " with instructions:\n ");
709 DEBUG((NewMemMI)->print(dbgs()));
710
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000711 int Width = getMemScale(I) == 1 ? 8 : 16;
712 int LSBLow = 0;
713 int LSBHigh = Width;
714 int ImmsLow = LSBLow + Width - 1;
715 int ImmsHigh = LSBHigh + Width - 1;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000716 MachineInstr *ExtDestMI = MergeForward ? Paired : I;
Oliver Stannardd414c992015-11-10 11:04:18 +0000717 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000718 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000719 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000720 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000721 .addOperand(getLdStRegOp(Rt2MI))
722 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000723 .addImm(LSBHigh)
724 .addImm(ImmsHigh);
725 // Create the bitfield extract for low bits.
726 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
727 // For unsigned, prefer to use AND for low bits.
728 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
729 TII->get(AArch64::ANDWri))
730 .addOperand(getLdStRegOp(RtMI))
731 .addReg(getLdStRegOp(RtNewDest).getReg())
732 .addImm(ImmsLow);
733 } else {
734 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
735 TII->get(getBitExtrOpcode(RtMI)))
736 .addOperand(getLdStRegOp(RtMI))
737 .addReg(getLdStRegOp(RtNewDest).getReg())
738 .addImm(LSBLow)
739 .addImm(ImmsLow);
740 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000741 } else {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000742 // Create the bitfield extract for low bits.
743 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
744 // For unsigned, prefer to use AND for low bits.
745 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
746 TII->get(AArch64::ANDWri))
747 .addOperand(getLdStRegOp(RtMI))
748 .addReg(getLdStRegOp(RtNewDest).getReg())
749 .addImm(ImmsLow);
750 } else {
751 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
752 TII->get(getBitExtrOpcode(RtMI)))
753 .addOperand(getLdStRegOp(RtMI))
754 .addReg(getLdStRegOp(RtNewDest).getReg())
755 .addImm(LSBLow)
756 .addImm(ImmsLow);
757 }
758
759 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000760 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000761 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000762 .addOperand(getLdStRegOp(Rt2MI))
763 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000764 .addImm(LSBHigh)
765 .addImm(ImmsHigh);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000766 }
767 DEBUG(dbgs() << " ");
768 DEBUG((BitExtMI1)->print(dbgs()));
769 DEBUG(dbgs() << " ");
770 DEBUG((BitExtMI2)->print(dbgs()));
771 DEBUG(dbgs() << "\n");
772
773 // Erase the old instructions.
774 I->eraseFromParent();
775 Paired->eraseFromParent();
776 return NextI;
777 }
778
Tim Northover3b0846e2014-05-24 12:50:23 +0000779 // Construct the new instruction.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000780 MachineInstrBuilder MIB;
781 if (isNarrowStore(Opc)) {
782 // Change the scaled offset from small to large type.
783 if (!IsUnscaled) {
784 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
785 OffsetImm /= 2;
786 }
787 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000788 TII->get(getMatchingWideOpcode(Opc)))
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000789 .addOperand(getLdStRegOp(I))
790 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000791 .addImm(OffsetImm)
792 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000793 } else {
Renato Golin6274e522016-02-05 12:14:30 +0000794 // Handle Unscaled
795 if (IsUnscaled)
796 OffsetImm /= OffsetStride;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000797 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000798 TII->get(getMatchingPairOpcode(Opc)))
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000799 .addOperand(getLdStRegOp(RtMI))
800 .addOperand(getLdStRegOp(Rt2MI))
801 .addOperand(BaseRegOp)
802 .addImm(OffsetImm);
803 }
804
Tim Northover3b0846e2014-05-24 12:50:23 +0000805 (void)MIB;
806
807 // FIXME: Do we need/want to copy the mem operands from the source
808 // instructions? Probably. What uses them after this?
809
810 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
811 DEBUG(I->print(dbgs()));
812 DEBUG(dbgs() << " ");
813 DEBUG(Paired->print(dbgs()));
814 DEBUG(dbgs() << " with instruction:\n ");
Quentin Colombet66b61632015-03-06 22:42:10 +0000815
816 if (SExtIdx != -1) {
817 // Generate the sign extension for the proper result of the ldp.
818 // I.e., with X1, that would be:
819 // %W1<def> = KILL %W1, %X1<imp-def>
820 // %X1<def> = SBFMXri %X1<kill>, 0, 31
821 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
822 // Right now, DstMO has the extended register, since it comes from an
823 // extended opcode.
824 unsigned DstRegX = DstMO.getReg();
825 // Get the W variant of that register.
826 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
827 // Update the result of LDP to use the W instead of the X variant.
828 DstMO.setReg(DstRegW);
829 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
830 DEBUG(dbgs() << "\n");
831 // Make the machine verifier happy by providing a definition for
832 // the X register.
833 // Insert this definition right after the generated LDP, i.e., before
834 // InsertionPoint.
835 MachineInstrBuilder MIBKill =
836 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
837 TII->get(TargetOpcode::KILL), DstRegW)
838 .addReg(DstRegW)
839 .addReg(DstRegX, RegState::Define);
840 MIBKill->getOperand(2).setImplicit();
841 // Create the sign extension.
842 MachineInstrBuilder MIBSXTW =
843 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
844 TII->get(AArch64::SBFMXri), DstRegX)
845 .addReg(DstRegX)
846 .addImm(0)
847 .addImm(31);
848 (void)MIBSXTW;
849 DEBUG(dbgs() << " Extend operand:\n ");
850 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
851 DEBUG(dbgs() << "\n");
852 } else {
853 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
854 DEBUG(dbgs() << "\n");
855 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000856
857 // Erase the old instructions.
858 I->eraseFromParent();
859 Paired->eraseFromParent();
860
861 return NextI;
862}
863
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000864MachineBasicBlock::iterator
865AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
866 MachineBasicBlock::iterator StoreI) {
867 MachineBasicBlock::iterator NextI = LoadI;
868 ++NextI;
869
870 int LoadSize = getMemScale(LoadI);
871 int StoreSize = getMemScale(StoreI);
872 unsigned LdRt = getLdStRegOp(LoadI).getReg();
873 unsigned StRt = getLdStRegOp(StoreI).getReg();
874 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
875
876 assert((IsStoreXReg ||
877 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
878 "Unexpected RegClass");
879
880 MachineInstr *BitExtMI;
881 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
882 // Remove the load, if the destination register of the loads is the same
883 // register for stored value.
884 if (StRt == LdRt && LoadSize == 8) {
885 DEBUG(dbgs() << "Remove load instruction:\n ");
886 DEBUG(LoadI->print(dbgs()));
887 DEBUG(dbgs() << "\n");
888 LoadI->eraseFromParent();
889 return NextI;
890 }
891 // Replace the load with a mov if the load and store are in the same size.
892 BitExtMI =
893 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
894 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
895 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
896 .addReg(StRt)
897 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
898 } else {
899 // FIXME: Currently we disable this transformation in big-endian targets as
900 // performance and correctness are verified only in little-endian.
901 if (!Subtarget->isLittleEndian())
902 return NextI;
903 bool IsUnscaled = isUnscaledLdSt(LoadI);
904 assert(IsUnscaled == isUnscaledLdSt(StoreI) && "Unsupported ld/st match");
905 assert(LoadSize <= StoreSize && "Invalid load size");
906 int UnscaledLdOffset = IsUnscaled
907 ? getLdStOffsetOp(LoadI).getImm()
908 : getLdStOffsetOp(LoadI).getImm() * LoadSize;
909 int UnscaledStOffset = IsUnscaled
910 ? getLdStOffsetOp(StoreI).getImm()
911 : getLdStOffsetOp(StoreI).getImm() * StoreSize;
912 int Width = LoadSize * 8;
913 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
914 int Imms = Immr + Width - 1;
915 unsigned DestReg = IsStoreXReg
916 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
917 &AArch64::GPR64RegClass)
918 : LdRt;
919
920 assert((UnscaledLdOffset >= UnscaledStOffset &&
921 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
922 "Invalid offset");
923
924 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
925 Imms = Immr + Width - 1;
926 if (UnscaledLdOffset == UnscaledStOffset) {
927 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
928 | ((Immr) << 6) // immr
929 | ((Imms) << 0) // imms
930 ;
931
932 BitExtMI =
933 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
934 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
935 DestReg)
936 .addReg(StRt)
937 .addImm(AndMaskEncoded);
938 } else {
939 BitExtMI =
940 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
941 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
942 DestReg)
943 .addReg(StRt)
944 .addImm(Immr)
945 .addImm(Imms);
946 }
947 }
948
949 DEBUG(dbgs() << "Promoting load by replacing :\n ");
950 DEBUG(StoreI->print(dbgs()));
951 DEBUG(dbgs() << " ");
952 DEBUG(LoadI->print(dbgs()));
953 DEBUG(dbgs() << " with instructions:\n ");
954 DEBUG(StoreI->print(dbgs()));
955 DEBUG(dbgs() << " ");
956 DEBUG((BitExtMI)->print(dbgs()));
957 DEBUG(dbgs() << "\n");
958
959 // Erase the old instructions.
960 LoadI->eraseFromParent();
961 return NextI;
962}
963
Tim Northover3b0846e2014-05-24 12:50:23 +0000964/// trackRegDefsUses - Remember what registers the specified instruction uses
965/// and modifies.
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000966static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
Tim Northover3b0846e2014-05-24 12:50:23 +0000967 BitVector &UsedRegs,
968 const TargetRegisterInfo *TRI) {
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000969 for (const MachineOperand &MO : MI->operands()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000970 if (MO.isRegMask())
971 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
972
973 if (!MO.isReg())
974 continue;
975 unsigned Reg = MO.getReg();
976 if (MO.isDef()) {
977 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
978 ModifiedRegs.set(*AI);
979 } else {
980 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
981 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
982 UsedRegs.set(*AI);
983 }
984 }
985}
986
987static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
Chad Rosier3dd0e942015-08-18 16:20:03 +0000988 // Convert the byte-offset used by unscaled into an "element" offset used
989 // by the scaled pair load/store instructions.
Renato Golin6274e522016-02-05 12:14:30 +0000990 if (IsUnscaled)
Chad Rosier3dd0e942015-08-18 16:20:03 +0000991 Offset /= OffsetStride;
Renato Golin6274e522016-02-05 12:14:30 +0000992
Chad Rosier3dd0e942015-08-18 16:20:03 +0000993 return Offset <= 63 && Offset >= -64;
Tim Northover3b0846e2014-05-24 12:50:23 +0000994}
995
996// Do alignment, specialized to power of 2 and for signed ints,
997// avoiding having to do a C-style cast from uint_64t to int when
Rui Ueyamada00f2f2016-01-14 21:06:47 +0000998// using alignTo from include/llvm/Support/MathExtras.h.
Tim Northover3b0846e2014-05-24 12:50:23 +0000999// FIXME: Move this function to include/MathExtras.h?
1000static int alignTo(int Num, int PowOf2) {
1001 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1002}
1003
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001004static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
1005 const AArch64InstrInfo *TII) {
1006 // One of the instructions must modify memory.
1007 if (!MIa->mayStore() && !MIb->mayStore())
1008 return false;
1009
1010 // Both instructions must be memory operations.
1011 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
1012 return false;
1013
1014 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
1015}
1016
1017static bool mayAlias(MachineInstr *MIa,
1018 SmallVectorImpl<MachineInstr *> &MemInsns,
1019 const AArch64InstrInfo *TII) {
1020 for (auto &MIb : MemInsns)
1021 if (mayAlias(MIa, MIb, TII))
1022 return true;
1023
1024 return false;
1025}
1026
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001027bool AArch64LoadStoreOpt::findMatchingStore(
1028 MachineBasicBlock::iterator I, unsigned Limit,
1029 MachineBasicBlock::iterator &StoreI) {
1030 MachineBasicBlock::iterator E = I->getParent()->begin();
1031 MachineBasicBlock::iterator MBBI = I;
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001032 MachineInstr *LoadMI = I;
1033 unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001034
1035 // Track which registers have been modified and used between the first insn
1036 // and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001037 ModifiedRegs.reset();
1038 UsedRegs.reset();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001039
Chad Rosier1142f3c2016-02-02 15:22:55 +00001040 // FIXME: We miss the case where the matching store is the first instruction
1041 // in the basic block.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001042 for (unsigned Count = 0; MBBI != E && Count < Limit;) {
1043 --MBBI;
1044 MachineInstr *MI = MBBI;
1045 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1046 // optimization by changing how far we scan.
1047 if (MI->isDebugValue())
1048 continue;
1049 // Now that we know this is a real instruction, count it.
1050 ++Count;
1051
1052 // If the load instruction reads directly from the address to which the
1053 // store instruction writes and the stored value is not modified, we can
1054 // promote the load. Since we do not handle stores with pre-/post-index,
1055 // it's unnecessary to check if BaseReg is modified by the store itself.
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001056 if (MI->mayStore() && isMatchingStore(LoadMI, MI) &&
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001057 BaseReg == getLdStBaseOp(MI).getReg() &&
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001058 isLdOffsetInRangeOfSt(LoadMI, MI) &&
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001059 !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
1060 StoreI = MBBI;
1061 return true;
1062 }
1063
1064 if (MI->isCall())
1065 return false;
1066
1067 // Update modified / uses register lists.
1068 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1069
1070 // Otherwise, if the base register is modified, we have no match, so
1071 // return early.
1072 if (ModifiedRegs[BaseReg])
1073 return false;
1074
1075 // If we encounter a store aliased with the load, return early.
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001076 if (MI->mayStore() && mayAlias(LoadMI, MI, TII))
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001077 return false;
1078 }
1079 return false;
1080}
1081
Tim Northover3b0846e2014-05-24 12:50:23 +00001082/// findMatchingInsn - Scan the instructions looking for a load/store that can
1083/// be combined with the current instruction into a load/store pair.
1084MachineBasicBlock::iterator
1085AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001086 LdStPairFlags &Flags, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001087 MachineBasicBlock::iterator E = I->getParent()->end();
1088 MachineBasicBlock::iterator MBBI = I;
1089 MachineInstr *FirstMI = I;
1090 ++MBBI;
1091
Matthias Braunfa3872e2015-05-18 20:27:55 +00001092 unsigned Opc = FirstMI->getOpcode();
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +00001093 bool MayLoad = FirstMI->mayLoad();
Chad Rosier22eb7102015-08-06 17:37:18 +00001094 bool IsUnscaled = isUnscaledLdSt(FirstMI);
Chad Rosierf77e9092015-08-06 15:50:12 +00001095 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1096 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1097 int Offset = getLdStOffsetOp(FirstMI).getImm();
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001098 bool IsNarrowStore = isNarrowStore(Opc);
1099
1100 // For narrow stores, find only the case where the stored value is WZR.
1101 if (IsNarrowStore && Reg != AArch64::WZR)
1102 return E;
Tim Northover3b0846e2014-05-24 12:50:23 +00001103
1104 // Early exit if the first instruction modifies the base register.
1105 // e.g., ldr x0, [x0]
Tim Northover3b0846e2014-05-24 12:50:23 +00001106 if (FirstMI->modifiesRegister(BaseReg, TRI))
1107 return E;
Chad Rosiercaed6db2015-08-10 17:17:19 +00001108
Jun Bum Lim1de2d442016-02-05 20:02:03 +00001109 // Early exit if the offset is not possible to match. (6 bits of positive
Chad Rosiercaed6db2015-08-10 17:17:19 +00001110 // range, plus allow an extra one in case we find a later insn that matches
1111 // with Offset-1)
Chad Rosierf11d0402015-10-01 18:17:12 +00001112 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001113 if (!(isNarrowLoad(Opc) || IsNarrowStore) &&
1114 !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
Tim Northover3b0846e2014-05-24 12:50:23 +00001115 return E;
1116
1117 // Track which registers have been modified and used between the first insn
1118 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001119 ModifiedRegs.reset();
1120 UsedRegs.reset();
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001121
1122 // Remember any instructions that read/write memory between FirstMI and MI.
1123 SmallVector<MachineInstr *, 4> MemInsns;
1124
Tim Northover3b0846e2014-05-24 12:50:23 +00001125 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1126 MachineInstr *MI = MBBI;
1127 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1128 // optimization by changing how far we scan.
1129 if (MI->isDebugValue())
1130 continue;
1131
1132 // Now that we know this is a real instruction, count it.
1133 ++Count;
1134
Renato Golin6274e522016-02-05 12:14:30 +00001135 bool CanMergeOpc = Opc == MI->getOpcode();
Chad Rosier18896c02016-02-04 16:01:40 +00001136 Flags.setSExtIdx(-1);
Renato Golin6274e522016-02-05 12:14:30 +00001137 if (!CanMergeOpc) {
1138 bool IsValidLdStrOpc;
1139 unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
1140 assert(IsValidLdStrOpc &&
1141 "Given Opc should be a Load or Store with an immediate");
1142 // Opc will be the first instruction in the pair.
1143 Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
1144 CanMergeOpc = NonSExtOpc == getMatchingNonSExtOpcode(MI->getOpcode());
1145 }
1146
1147 if (CanMergeOpc && getLdStOffsetOp(MI).isImm()) {
Chad Rosierc56a9132015-08-10 18:42:45 +00001148 assert(MI->mayLoadOrStore() && "Expected memory operation.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001149 // If we've found another instruction with the same opcode, check to see
1150 // if the base and offset are compatible with our starting instruction.
1151 // These instructions all have scaled immediate operands, so we just
1152 // check for +1/-1. Make sure to check the new instruction offset is
1153 // actually an immediate and not a symbolic reference destined for
1154 // a relocation.
1155 //
1156 // Pairwise instructions have a 7-bit signed offset field. Single insns
1157 // have a 12-bit unsigned offset field. To be a valid combine, the
1158 // final offset must be in range.
Chad Rosierf77e9092015-08-06 15:50:12 +00001159 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1160 int MIOffset = getLdStOffsetOp(MI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001161 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1162 (Offset + OffsetStride == MIOffset))) {
1163 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1164 // If this is a volatile load/store that otherwise matched, stop looking
1165 // as something is going on that we don't have enough information to
1166 // safely transform. Similarly, stop if we see a hint to avoid pairs.
1167 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1168 return E;
1169 // If the resultant immediate offset of merging these instructions
1170 // is out of range for a pairwise instruction, bail and keep looking.
Renato Golin6274e522016-02-05 12:14:30 +00001171 bool MIIsUnscaled = isUnscaledLdSt(MI);
Jun Bum Limc12c2792015-11-19 18:41:27 +00001172 bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
1173 if (!IsNarrowLoad &&
Renato Golin6274e522016-02-05 12:14:30 +00001174 !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001175 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001176 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001177 continue;
1178 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001179
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001180 if (IsNarrowLoad || IsNarrowStore) {
1181 // If the alignment requirements of the scaled wide load/store
1182 // instruction can't express the offset of the scaled narrow
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001183 // input, bail and keep looking.
1184 if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
1185 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1186 MemInsns.push_back(MI);
1187 continue;
1188 }
1189 } else {
1190 // If the alignment requirements of the paired (scaled) instruction
1191 // can't express the offset of the unscaled input, bail and keep
1192 // looking.
1193 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1194 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1195 MemInsns.push_back(MI);
1196 continue;
1197 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001198 }
1199 // If the destination register of the loads is the same register, bail
1200 // and keep looking. A load-pair instruction with both destination
1201 // registers the same is UNPREDICTABLE and will result in an exception.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001202 // For narrow stores, allow only when the stored value is the same
1203 // (i.e., WZR).
1204 if ((MayLoad && Reg == getLdStRegOp(MI).getReg()) ||
1205 (IsNarrowStore && Reg != getLdStRegOp(MI).getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001206 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001207 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001208 continue;
1209 }
1210
1211 // If the Rt of the second instruction was not modified or used between
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001212 // the two instructions and none of the instructions between the second
1213 // and first alias with the second, we can combine the second into the
1214 // first.
Chad Rosierf77e9092015-08-06 15:50:12 +00001215 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1216 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001217 !mayAlias(MI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001218 Flags.setMergeForward(false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001219 return MBBI;
1220 }
1221
1222 // Likewise, if the Rt of the first instruction is not modified or used
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001223 // between the two instructions and none of the instructions between the
1224 // first and the second alias with the first, we can combine the first
1225 // into the second.
Chad Rosierf77e9092015-08-06 15:50:12 +00001226 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
Chad Rosier5f668e12015-09-03 14:19:43 +00001227 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001228 !mayAlias(FirstMI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001229 Flags.setMergeForward(true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001230 return MBBI;
1231 }
1232 // Unable to combine these instructions due to interference in between.
1233 // Keep looking.
1234 }
1235 }
1236
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001237 // If the instruction wasn't a matching load or store. Stop searching if we
1238 // encounter a call instruction that might modify memory.
1239 if (MI->isCall())
Tim Northover3b0846e2014-05-24 12:50:23 +00001240 return E;
1241
1242 // Update modified / uses register lists.
1243 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1244
1245 // Otherwise, if the base register is modified, we have no match, so
1246 // return early.
1247 if (ModifiedRegs[BaseReg])
1248 return E;
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001249
1250 // Update list of instructions that read/write memory.
1251 if (MI->mayLoadOrStore())
1252 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001253 }
1254 return E;
1255}
1256
1257MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +00001258AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1259 MachineBasicBlock::iterator Update,
1260 bool IsPreIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001261 assert((Update->getOpcode() == AArch64::ADDXri ||
1262 Update->getOpcode() == AArch64::SUBXri) &&
1263 "Unexpected base register update instruction to merge!");
1264 MachineBasicBlock::iterator NextI = I;
1265 // Return the instruction following the merged instruction, which is
1266 // the instruction following our unmerged load. Unless that's the add/sub
1267 // instruction we're merging, in which case it's the one after that.
1268 if (++NextI == Update)
1269 ++NextI;
1270
1271 int Value = Update->getOperand(2).getImm();
1272 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
Chad Rosier2dfd3542015-09-23 13:51:44 +00001273 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
Tim Northover3b0846e2014-05-24 12:50:23 +00001274 if (Update->getOpcode() == AArch64::SUBXri)
1275 Value = -Value;
1276
Chad Rosier2dfd3542015-09-23 13:51:44 +00001277 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1278 : getPostIndexedOpcode(I->getOpcode());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001279 MachineInstrBuilder MIB;
1280 if (!isPairedLdSt(I)) {
1281 // Non-paired instruction.
1282 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1283 .addOperand(getLdStRegOp(Update))
1284 .addOperand(getLdStRegOp(I))
1285 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001286 .addImm(Value)
1287 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001288 } else {
1289 // Paired instruction.
Chad Rosier32d4d372015-09-29 16:07:32 +00001290 int Scale = getMemScale(I);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001291 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1292 .addOperand(getLdStRegOp(Update))
1293 .addOperand(getLdStRegOp(I, 0))
1294 .addOperand(getLdStRegOp(I, 1))
1295 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001296 .addImm(Value / Scale)
1297 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001298 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001299 (void)MIB;
1300
Chad Rosier2dfd3542015-09-23 13:51:44 +00001301 if (IsPreIdx)
1302 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1303 else
1304 DEBUG(dbgs() << "Creating post-indexed load/store.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001305 DEBUG(dbgs() << " Replacing instructions:\n ");
1306 DEBUG(I->print(dbgs()));
1307 DEBUG(dbgs() << " ");
1308 DEBUG(Update->print(dbgs()));
1309 DEBUG(dbgs() << " with instruction:\n ");
1310 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1311 DEBUG(dbgs() << "\n");
1312
1313 // Erase the old instructions for the block.
1314 I->eraseFromParent();
1315 Update->eraseFromParent();
1316
1317 return NextI;
1318}
1319
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001320bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
1321 MachineInstr *MI,
1322 unsigned BaseReg, int Offset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001323 switch (MI->getOpcode()) {
1324 default:
1325 break;
1326 case AArch64::SUBXri:
1327 // Negate the offset for a SUB instruction.
1328 Offset *= -1;
1329 // FALLTHROUGH
1330 case AArch64::ADDXri:
1331 // Make sure it's a vanilla immediate operand, not a relocation or
1332 // anything else we can't handle.
1333 if (!MI->getOperand(2).isImm())
1334 break;
1335 // Watch out for 1 << 12 shifted value.
1336 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
1337 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001338
1339 // The update instruction source and destination register must be the
1340 // same as the load/store base register.
1341 if (MI->getOperand(0).getReg() != BaseReg ||
1342 MI->getOperand(1).getReg() != BaseReg)
1343 break;
1344
1345 bool IsPairedInsn = isPairedLdSt(MemMI);
1346 int UpdateOffset = MI->getOperand(2).getImm();
1347 // For non-paired load/store instructions, the immediate must fit in a
1348 // signed 9-bit integer.
1349 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1350 break;
1351
1352 // For paired load/store instructions, the immediate must be a multiple of
1353 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1354 // integer.
1355 if (IsPairedInsn) {
Chad Rosier32d4d372015-09-29 16:07:32 +00001356 int Scale = getMemScale(MemMI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001357 if (UpdateOffset % Scale != 0)
1358 break;
1359
1360 int ScaledOffset = UpdateOffset / Scale;
1361 if (ScaledOffset > 64 || ScaledOffset < -64)
1362 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001363 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001364
1365 // If we have a non-zero Offset, we check that it matches the amount
1366 // we're adding to the register.
1367 if (!Offset || Offset == MI->getOperand(2).getImm())
1368 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001369 break;
1370 }
1371 return false;
1372}
1373
1374MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001375 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001376 MachineBasicBlock::iterator E = I->getParent()->end();
1377 MachineInstr *MemMI = I;
1378 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001379
Chad Rosierf77e9092015-08-06 15:50:12 +00001380 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001381 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001382
Chad Rosierb7c5b912015-10-01 13:43:05 +00001383 // Scan forward looking for post-index opportunities. Updating instructions
1384 // can't be formed if the memory instruction doesn't have the offset we're
1385 // looking for.
1386 if (MIUnscaledOffset != UnscaledOffset)
1387 return E;
1388
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001389 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001390 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001391 bool IsPairedInsn = isPairedLdSt(MemMI);
1392 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1393 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1394 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1395 return E;
1396 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001397
Tim Northover3b0846e2014-05-24 12:50:23 +00001398 // Track which registers have been modified and used between the first insn
1399 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001400 ModifiedRegs.reset();
1401 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001402 ++MBBI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001403 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001404 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001405 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001406 if (MI->isDebugValue())
1407 continue;
1408
Chad Rosier35706ad2016-02-04 21:26:02 +00001409 // Now that we know this is a real instruction, count it.
1410 ++Count;
1411
Tim Northover3b0846e2014-05-24 12:50:23 +00001412 // If we found a match, return it.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001413 if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001414 return MBBI;
1415
1416 // Update the status of what the instruction clobbered and used.
1417 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1418
1419 // Otherwise, if the base register is used or modified, we have no match, so
1420 // return early.
1421 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1422 return E;
1423 }
1424 return E;
1425}
1426
1427MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001428 MachineBasicBlock::iterator I, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001429 MachineBasicBlock::iterator B = I->getParent()->begin();
1430 MachineBasicBlock::iterator E = I->getParent()->end();
1431 MachineInstr *MemMI = I;
1432 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001433
Chad Rosierf77e9092015-08-06 15:50:12 +00001434 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1435 int Offset = getLdStOffsetOp(MemMI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001436
1437 // If the load/store is the first instruction in the block, there's obviously
1438 // not any matching update. Ditto if the memory offset isn't zero.
1439 if (MBBI == B || Offset != 0)
1440 return E;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001441 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001442 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001443 bool IsPairedInsn = isPairedLdSt(MemMI);
1444 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1445 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1446 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1447 return E;
1448 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001449
1450 // Track which registers have been modified and used between the first insn
1451 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001452 ModifiedRegs.reset();
1453 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001454 --MBBI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001455 for (unsigned Count = 0; MBBI != B && Count < Limit; --MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001456 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001457 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001458 if (MI->isDebugValue())
1459 continue;
1460
Chad Rosier35706ad2016-02-04 21:26:02 +00001461 // Now that we know this is a real instruction, count it.
1462 ++Count;
1463
Tim Northover3b0846e2014-05-24 12:50:23 +00001464 // If we found a match, return it.
Chad Rosier11c825f2015-09-30 19:44:40 +00001465 if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001466 return MBBI;
1467
1468 // Update the status of what the instruction clobbered and used.
1469 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1470
1471 // Otherwise, if the base register is used or modified, we have no match, so
1472 // return early.
1473 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1474 return E;
1475 }
1476 return E;
1477}
1478
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001479bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1480 MachineBasicBlock::iterator &MBBI) {
1481 MachineInstr *MI = MBBI;
1482 // If this is a volatile load, don't mess with it.
1483 if (MI->hasOrderedMemoryRef())
1484 return false;
1485
1486 // Make sure this is a reg+imm.
1487 // FIXME: It is possible to extend it to handle reg+reg cases.
1488 if (!getLdStOffsetOp(MI).isImm())
1489 return false;
1490
Chad Rosier35706ad2016-02-04 21:26:02 +00001491 // Look backward up to LdStLimit instructions.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001492 MachineBasicBlock::iterator StoreI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001493 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001494 ++NumLoadsFromStoresPromoted;
1495 // Promote the load. Keeping the iterator straight is a
1496 // pain, so we let the merge routine tell us what the next instruction
1497 // is after it's done mucking about.
1498 MBBI = promoteLoadFromStore(MBBI, StoreI);
1499 return true;
1500 }
1501 return false;
1502}
1503
Chad Rosier24c46ad2016-02-09 18:10:20 +00001504bool AArch64LoadStoreOpt::isCandidateToMergeOrPair(MachineInstr *MI) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001505 // If this is a volatile load/store, don't mess with it.
1506 if (MI->hasOrderedMemoryRef())
1507 return false;
1508
1509 // Make sure this is a reg+imm (as opposed to an address reloc).
1510 if (!getLdStOffsetOp(MI).isImm())
1511 return false;
1512
1513 // Check if this load/store has a hint to avoid pair formation.
1514 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1515 if (TII->isLdStPairSuppressed(MI))
1516 return false;
1517
Chad Rosier24c46ad2016-02-09 18:10:20 +00001518 return true;
1519}
1520
1521// Find narrow loads that can be converted into a single wider load with
1522// bitfield extract instructions. Also merge adjacent zero stores into a wider
1523// store.
1524bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1525 MachineBasicBlock::iterator &MBBI) {
1526 assert((isNarrowLoad(MBBI) || isNarrowStore(MBBI)) && "Expected narrow op.");
1527 MachineInstr *MI = MBBI;
1528 MachineBasicBlock::iterator E = MI->getParent()->end();
1529
1530 if (!isCandidateToMergeOrPair(MI))
1531 return false;
1532
1533 // Look ahead up to LdStLimit instructions for a mergable instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001534 LdStPairFlags Flags;
Chad Rosier35706ad2016-02-04 21:26:02 +00001535 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit);
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001536 if (Paired != E) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001537 if (isNarrowLoad(MI)) {
1538 ++NumNarrowLoadsPromoted;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001539 } else if (isNarrowStore(MI)) {
1540 ++NumZeroStoresPromoted;
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001541 }
Chad Rosier24c46ad2016-02-09 18:10:20 +00001542 // Keeping the iterator straight is a pain, so we let the merge routine tell
1543 // us what the next instruction is after it's done mucking about.
1544 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1545 return true;
1546 }
1547 return false;
1548}
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001549
Chad Rosier24c46ad2016-02-09 18:10:20 +00001550// Find loads and stores that can be merged into a single load or store pair
1551// instruction.
1552bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
1553 MachineInstr *MI = MBBI;
1554 MachineBasicBlock::iterator E = MI->getParent()->end();
1555
1556 if (!isCandidateToMergeOrPair(MI))
1557 return false;
1558
1559 // Look ahead up to LdStLimit instructions for a pairable instruction.
1560 LdStPairFlags Flags;
1561 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit);
1562 if (Paired != E) {
1563 ++NumPairCreated;
1564 if (isUnscaledLdSt(MI))
1565 ++NumUnscaledPairCreated;
1566 // Keeping the iterator straight is a pain, so we let the merge routine tell
1567 // us what the next instruction is after it's done mucking about.
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001568 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1569 return true;
1570 }
1571 return false;
1572}
1573
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001574bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1575 bool enableNarrowLdOpt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001576 bool Modified = false;
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001577 // Four tranformations to do here:
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001578 // 1) Find loads that directly read from stores and promote them by
1579 // replacing with mov instructions. If the store is wider than the load,
1580 // the load will be replaced with a bitfield extract.
1581 // e.g.,
1582 // str w1, [x0, #4]
1583 // ldrh w2, [x0, #6]
1584 // ; becomes
1585 // str w1, [x0, #4]
1586 // lsr w2, w1, #16
Tim Northover3b0846e2014-05-24 12:50:23 +00001587 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001588 MBBI != E;) {
1589 MachineInstr *MI = MBBI;
1590 switch (MI->getOpcode()) {
1591 default:
1592 // Just move on to the next instruction.
1593 ++MBBI;
1594 break;
1595 // Scaled instructions.
1596 case AArch64::LDRBBui:
1597 case AArch64::LDRHHui:
1598 case AArch64::LDRWui:
1599 case AArch64::LDRXui:
1600 // Unscaled instructions.
1601 case AArch64::LDURBBi:
1602 case AArch64::LDURHHi:
1603 case AArch64::LDURWi:
1604 case AArch64::LDURXi: {
1605 if (tryToPromoteLoadFromStore(MBBI)) {
1606 Modified = true;
1607 break;
1608 }
1609 ++MBBI;
1610 break;
1611 }
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001612 }
1613 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001614 // 2) Find narrow loads that can be converted into a single wider load
1615 // with bitfield extract instructions.
1616 // e.g.,
1617 // ldrh w0, [x2]
1618 // ldrh w1, [x2, #2]
1619 // ; becomes
1620 // ldr w0, [x2]
1621 // ubfx w1, w0, #16, #16
1622 // and w0, w0, #ffff
Jun Bum Lim1de2d442016-02-05 20:02:03 +00001623 //
1624 // Also merge adjacent zero stores into a wider store.
1625 // e.g.,
1626 // strh wzr, [x0]
1627 // strh wzr, [x0, #2]
1628 // ; becomes
1629 // str wzr, [x0]
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001630 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001631 enableNarrowLdOpt && MBBI != E;) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001632 MachineInstr *MI = MBBI;
1633 switch (MI->getOpcode()) {
1634 default:
1635 // Just move on to the next instruction.
1636 ++MBBI;
1637 break;
1638 // Scaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001639 case AArch64::LDRBBui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001640 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001641 case AArch64::LDRSBWui:
1642 case AArch64::LDRSHWui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001643 case AArch64::STRBBui:
1644 case AArch64::STRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001645 // Unscaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001646 case AArch64::LDURBBi:
1647 case AArch64::LDURHHi:
1648 case AArch64::LDURSBWi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001649 case AArch64::LDURSHWi:
1650 case AArch64::STURBBi:
1651 case AArch64::STURHHi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001652 if (tryToMergeLdStInst(MBBI)) {
1653 Modified = true;
1654 break;
1655 }
1656 ++MBBI;
1657 break;
1658 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001659 }
1660 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001661 // 3) Find loads and stores that can be merged into a single load or store
1662 // pair instruction.
1663 // e.g.,
1664 // ldr x0, [x2]
1665 // ldr x1, [x2, #8]
1666 // ; becomes
1667 // ldp x0, x1, [x2]
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001668 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Tim Northover3b0846e2014-05-24 12:50:23 +00001669 MBBI != E;) {
1670 MachineInstr *MI = MBBI;
1671 switch (MI->getOpcode()) {
1672 default:
1673 // Just move on to the next instruction.
1674 ++MBBI;
1675 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001676 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001677 case AArch64::STRSui:
1678 case AArch64::STRDui:
1679 case AArch64::STRQui:
1680 case AArch64::STRXui:
1681 case AArch64::STRWui:
1682 case AArch64::LDRSui:
1683 case AArch64::LDRDui:
1684 case AArch64::LDRQui:
1685 case AArch64::LDRXui:
1686 case AArch64::LDRWui:
Quentin Colombet29f55332015-01-24 01:25:54 +00001687 case AArch64::LDRSWui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001688 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001689 case AArch64::STURSi:
1690 case AArch64::STURDi:
1691 case AArch64::STURQi:
1692 case AArch64::STURWi:
1693 case AArch64::STURXi:
1694 case AArch64::LDURSi:
1695 case AArch64::LDURDi:
1696 case AArch64::LDURQi:
1697 case AArch64::LDURWi:
Quentin Colombet29f55332015-01-24 01:25:54 +00001698 case AArch64::LDURXi:
1699 case AArch64::LDURSWi: {
Chad Rosier24c46ad2016-02-09 18:10:20 +00001700 if (tryToPairLdStInst(MBBI)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001701 Modified = true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001702 break;
1703 }
1704 ++MBBI;
1705 break;
1706 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001707 }
1708 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001709 // 4) Find base register updates that can be merged into the load or store
1710 // as a base-reg writeback.
1711 // e.g.,
1712 // ldr x0, [x2]
1713 // add x2, x2, #4
1714 // ; becomes
1715 // ldr x0, [x2], #4
Tim Northover3b0846e2014-05-24 12:50:23 +00001716 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1717 MBBI != E;) {
1718 MachineInstr *MI = MBBI;
1719 // Do update merging. It's simpler to keep this separate from the above
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001720 // switchs, though not strictly necessary.
Matthias Braunfa3872e2015-05-18 20:27:55 +00001721 unsigned Opc = MI->getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +00001722 switch (Opc) {
1723 default:
1724 // Just move on to the next instruction.
1725 ++MBBI;
1726 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001727 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001728 case AArch64::STRSui:
1729 case AArch64::STRDui:
1730 case AArch64::STRQui:
1731 case AArch64::STRXui:
1732 case AArch64::STRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001733 case AArch64::STRHHui:
1734 case AArch64::STRBBui:
Tim Northover3b0846e2014-05-24 12:50:23 +00001735 case AArch64::LDRSui:
1736 case AArch64::LDRDui:
1737 case AArch64::LDRQui:
1738 case AArch64::LDRXui:
1739 case AArch64::LDRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001740 case AArch64::LDRHHui:
1741 case AArch64::LDRBBui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001742 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001743 case AArch64::STURSi:
1744 case AArch64::STURDi:
1745 case AArch64::STURQi:
1746 case AArch64::STURWi:
1747 case AArch64::STURXi:
1748 case AArch64::LDURSi:
1749 case AArch64::LDURDi:
1750 case AArch64::LDURQi:
1751 case AArch64::LDURWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001752 case AArch64::LDURXi:
1753 // Paired instructions.
1754 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +00001755 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001756 case AArch64::LDPDi:
1757 case AArch64::LDPQi:
1758 case AArch64::LDPWi:
1759 case AArch64::LDPXi:
1760 case AArch64::STPSi:
1761 case AArch64::STPDi:
1762 case AArch64::STPQi:
1763 case AArch64::STPWi:
1764 case AArch64::STPXi: {
Tim Northover3b0846e2014-05-24 12:50:23 +00001765 // Make sure this is a reg+imm (as opposed to an address reloc).
Chad Rosierf77e9092015-08-06 15:50:12 +00001766 if (!getLdStOffsetOp(MI).isImm()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001767 ++MBBI;
1768 break;
1769 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001770 // Look forward to try to form a post-index instruction. For example,
1771 // ldr x0, [x20]
1772 // add x20, x20, #32
1773 // merged into:
1774 // ldr x0, [x20], #32
Tim Northover3b0846e2014-05-24 12:50:23 +00001775 MachineBasicBlock::iterator Update =
Chad Rosier35706ad2016-02-04 21:26:02 +00001776 findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001777 if (Update != E) {
1778 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001779 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001780 Modified = true;
1781 ++NumPostFolded;
1782 break;
1783 }
1784 // Don't know how to handle pre/post-index versions, so move to the next
1785 // instruction.
Chad Rosier22eb7102015-08-06 17:37:18 +00001786 if (isUnscaledLdSt(Opc)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001787 ++MBBI;
1788 break;
1789 }
1790
1791 // Look back to try to find a pre-index instruction. For example,
1792 // add x0, x0, #8
1793 // ldr x1, [x0]
1794 // merged into:
1795 // ldr x1, [x0, #8]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001796 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001797 if (Update != E) {
1798 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001799 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001800 Modified = true;
1801 ++NumPreFolded;
1802 break;
1803 }
Chad Rosier7a83d772015-10-01 13:09:44 +00001804 // The immediate in the load/store is scaled by the size of the memory
1805 // operation. The immediate in the add we're looking for,
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001806 // however, is not, so adjust here.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001807 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001808
Tim Northover3b0846e2014-05-24 12:50:23 +00001809 // Look forward to try to find a post-index instruction. For example,
1810 // ldr x1, [x0, #64]
1811 // add x0, x0, #64
1812 // merged into:
1813 // ldr x1, [x0, #64]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001814 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001815 if (Update != E) {
1816 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001817 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001818 Modified = true;
1819 ++NumPreFolded;
1820 break;
1821 }
1822
1823 // Nothing found. Just move to the next instruction.
1824 ++MBBI;
1825 break;
1826 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001827 }
1828 }
1829
1830 return Modified;
1831}
1832
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001833bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001834 bool ProfitableArch = Subtarget->isCortexA57();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001835 // FIXME: The benefit from converting narrow loads into a wider load could be
1836 // microarchitectural as it assumes that a single load with two bitfield
1837 // extracts is cheaper than two narrow loads. Currently, this conversion is
1838 // enabled only in cortex-a57 on which performance benefits were verified.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001839 return ProfitableArch && !Subtarget->requiresStrictAlign();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001840}
1841
Tim Northover3b0846e2014-05-24 12:50:23 +00001842bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
Oliver Stannardd414c992015-11-10 11:04:18 +00001843 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1844 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1845 TRI = Subtarget->getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001846
Chad Rosierbba881e2016-02-02 15:02:30 +00001847 // Resize the modified and used register bitfield trackers. We do this once
1848 // per function and then clear the bitfield each time we optimize a load or
1849 // store.
1850 ModifiedRegs.resize(TRI->getNumRegs());
1851 UsedRegs.resize(TRI->getNumRegs());
1852
Tim Northover3b0846e2014-05-24 12:50:23 +00001853 bool Modified = false;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001854 bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
Tim Northover3b0846e2014-05-24 12:50:23 +00001855 for (auto &MBB : Fn)
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001856 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001857
1858 return Modified;
1859}
1860
1861// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1862// loads and stores near one another?
1863
Chad Rosier43f5c842015-08-05 12:40:13 +00001864/// createAArch64LoadStoreOptimizationPass - returns an instance of the
1865/// load / store optimization pass.
Tim Northover3b0846e2014-05-24 12:50:23 +00001866FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1867 return new AArch64LoadStoreOpt();
1868}