blob: 0b16a2cfe9fdb870e7d43b503ba5a56d7e7b7ab7 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that performs load / store related peephole
11// optimizations. This pass should be run after register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AArch64InstrInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000016#include "AArch64Subtarget.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000017#include "MCTargetDesc/AArch64AddressingModes.h"
18#include "llvm/ADT/BitVector.h"
Chad Rosierce8e5ab2015-05-21 21:36:46 +000019#include "llvm/ADT/SmallVector.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000020#include "llvm/ADT/Statistic.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000021#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/ErrorHandling.h"
28#include "llvm/Support/raw_ostream.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000029#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Target/TargetRegisterInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000032using namespace llvm;
33
34#define DEBUG_TYPE "aarch64-ldst-opt"
35
36/// AArch64AllocLoadStoreOpt - Post-register allocation pass to combine
37/// load / store instructions to form ldp / stp instructions.
38
39STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
40STATISTIC(NumPostFolded, "Number of post-index updates folded");
41STATISTIC(NumPreFolded, "Number of pre-index updates folded");
42STATISTIC(NumUnscaledPairCreated,
43 "Number of load/store from unscaled generated");
Jun Bum Limc12c2792015-11-19 18:41:27 +000044STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
Jun Bum Lim80ec0d32015-11-20 21:14:07 +000045STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
Jun Bum Lim6755c3b2015-12-22 16:36:16 +000046STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
Tim Northover3b0846e2014-05-24 12:50:23 +000047
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +000048static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
49 cl::init(20), cl::Hidden);
Tim Northover3b0846e2014-05-24 12:50:23 +000050
Chad Rosier96530b32015-08-05 13:44:51 +000051namespace llvm {
52void initializeAArch64LoadStoreOptPass(PassRegistry &);
53}
54
55#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
56
Tim Northover3b0846e2014-05-24 12:50:23 +000057namespace {
Chad Rosier96a18a92015-07-21 17:42:04 +000058
59typedef struct LdStPairFlags {
60 // If a matching instruction is found, MergeForward is set to true if the
61 // merge is to remove the first instruction and replace the second with
62 // a pair-wise insn, and false if the reverse is true.
63 bool MergeForward;
64
65 // SExtIdx gives the index of the result of the load pair that must be
66 // extended. The value of SExtIdx assumes that the paired load produces the
67 // value in this order: (I, returned iterator), i.e., -1 means no value has
68 // to be extended, 0 means I, and 1 means the returned iterator.
69 int SExtIdx;
70
71 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
72
73 void setMergeForward(bool V = true) { MergeForward = V; }
74 bool getMergeForward() const { return MergeForward; }
75
76 void setSExtIdx(int V) { SExtIdx = V; }
77 int getSExtIdx() const { return SExtIdx; }
78
79} LdStPairFlags;
80
Tim Northover3b0846e2014-05-24 12:50:23 +000081struct AArch64LoadStoreOpt : public MachineFunctionPass {
82 static char ID;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +000083 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
Chad Rosier96530b32015-08-05 13:44:51 +000084 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
85 }
Tim Northover3b0846e2014-05-24 12:50:23 +000086
87 const AArch64InstrInfo *TII;
88 const TargetRegisterInfo *TRI;
Oliver Stannardd414c992015-11-10 11:04:18 +000089 const AArch64Subtarget *Subtarget;
Tim Northover3b0846e2014-05-24 12:50:23 +000090
Chad Rosierbba881e2016-02-02 15:02:30 +000091 // Track which registers have been modified and used.
92 BitVector ModifiedRegs, UsedRegs;
93
Tim Northover3b0846e2014-05-24 12:50:23 +000094 // Scan the instructions looking for a load/store that can be combined
95 // with the current instruction into a load/store pair.
96 // Return the matching instruction if one is found, else MBB->end().
Tim Northover3b0846e2014-05-24 12:50:23 +000097 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +000098 LdStPairFlags &Flags,
Tim Northover3b0846e2014-05-24 12:50:23 +000099 unsigned Limit);
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000100
101 // Scan the instructions looking for a store that writes to the address from
102 // which the current load instruction reads. Return true if one is found.
103 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
104 MachineBasicBlock::iterator &StoreI);
105
Tim Northover3b0846e2014-05-24 12:50:23 +0000106 // Merge the two instructions indicated into a single pair-wise instruction.
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000107 // If MergeForward is true, erase the first instruction and fold its
Tim Northover3b0846e2014-05-24 12:50:23 +0000108 // operation into the second. If false, the reverse. Return the instruction
109 // following the first instruction (which may change during processing).
110 MachineBasicBlock::iterator
111 mergePairedInsns(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000112 MachineBasicBlock::iterator Paired,
Chad Rosierfe5399f2015-07-21 17:47:56 +0000113 const LdStPairFlags &Flags);
Tim Northover3b0846e2014-05-24 12:50:23 +0000114
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000115 // Promote the load that reads directly from the address stored to.
116 MachineBasicBlock::iterator
117 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
118 MachineBasicBlock::iterator StoreI);
119
Tim Northover3b0846e2014-05-24 12:50:23 +0000120 // Scan the instruction list to find a base register update that can
121 // be combined with the current instruction (a load or store) using
122 // pre or post indexed addressing with writeback. Scan forwards.
123 MachineBasicBlock::iterator
Chad Rosier234bf6f2016-01-18 21:56:40 +0000124 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
Chad Rosier0b15e7c2015-10-01 13:33:31 +0000125 int UnscaledOffset);
Tim Northover3b0846e2014-05-24 12:50:23 +0000126
127 // Scan the instruction list to find a base register update that can
128 // be combined with the current instruction (a load or store) using
129 // pre or post indexed addressing with writeback. Scan backwards.
130 MachineBasicBlock::iterator
Chad Rosier234bf6f2016-01-18 21:56:40 +0000131 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000132
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000133 // Find an instruction that updates the base register of the ld/st
134 // instruction.
135 bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
136 unsigned BaseReg, int Offset);
137
Chad Rosier2dfd3542015-09-23 13:51:44 +0000138 // Merge a pre- or post-index base register update into a ld/st instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000139 MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +0000140 mergeUpdateInsn(MachineBasicBlock::iterator I,
141 MachineBasicBlock::iterator Update, bool IsPreIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000142
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000143 // Find and merge foldable ldr/str instructions.
144 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
145
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000146 // Find and promote load instructions which read directly from store.
147 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
148
Jun Bum Lim22fe15e2015-11-06 16:27:47 +0000149 // Check if converting two narrow loads into a single wider load with
150 // bitfield extracts could be enabled.
151 bool enableNarrowLdMerge(MachineFunction &Fn);
152
153 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000154
155 bool runOnMachineFunction(MachineFunction &Fn) override;
156
157 const char *getPassName() const override {
Chad Rosier96530b32015-08-05 13:44:51 +0000158 return AARCH64_LOAD_STORE_OPT_NAME;
Tim Northover3b0846e2014-05-24 12:50:23 +0000159 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000160};
161char AArch64LoadStoreOpt::ID = 0;
Jim Grosbach1eee3df2014-08-11 22:42:31 +0000162} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +0000163
Chad Rosier96530b32015-08-05 13:44:51 +0000164INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
165 AARCH64_LOAD_STORE_OPT_NAME, false, false)
166
Chad Rosier22eb7102015-08-06 17:37:18 +0000167static bool isUnscaledLdSt(unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000168 switch (Opc) {
169 default:
170 return false;
171 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000172 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000173 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000174 case AArch64::STURBBi:
175 case AArch64::STURHHi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000176 case AArch64::STURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000177 case AArch64::STURXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000178 case AArch64::LDURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000179 case AArch64::LDURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000180 case AArch64::LDURQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000181 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000182 case AArch64::LDURXi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000183 case AArch64::LDURSWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000184 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000185 case AArch64::LDURBBi:
186 case AArch64::LDURSBWi:
187 case AArch64::LDURSHWi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000188 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000189 }
190}
191
Chad Rosier22eb7102015-08-06 17:37:18 +0000192static bool isUnscaledLdSt(MachineInstr *MI) {
193 return isUnscaledLdSt(MI->getOpcode());
194}
195
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000196static unsigned getBitExtrOpcode(MachineInstr *MI) {
197 switch (MI->getOpcode()) {
198 default:
199 llvm_unreachable("Unexpected opcode.");
200 case AArch64::LDRBBui:
201 case AArch64::LDURBBi:
202 case AArch64::LDRHHui:
203 case AArch64::LDURHHi:
204 return AArch64::UBFMWri;
205 case AArch64::LDRSBWui:
206 case AArch64::LDURSBWi:
207 case AArch64::LDRSHWui:
208 case AArch64::LDURSHWi:
209 return AArch64::SBFMWri;
210 }
211}
212
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000213static bool isNarrowStore(unsigned Opc) {
214 switch (Opc) {
215 default:
216 return false;
217 case AArch64::STRBBui:
218 case AArch64::STURBBi:
219 case AArch64::STRHHui:
220 case AArch64::STURHHi:
221 return true;
222 }
223}
224
225static bool isNarrowStore(MachineInstr *MI) {
226 return isNarrowStore(MI->getOpcode());
227}
228
Jun Bum Limc12c2792015-11-19 18:41:27 +0000229static bool isNarrowLoad(unsigned Opc) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000230 switch (Opc) {
231 default:
232 return false;
233 case AArch64::LDRHHui:
234 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000235 case AArch64::LDRBBui:
236 case AArch64::LDURBBi:
237 case AArch64::LDRSHWui:
238 case AArch64::LDURSHWi:
239 case AArch64::LDRSBWui:
240 case AArch64::LDURSBWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000241 return true;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000242 }
243}
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000244
Jun Bum Limc12c2792015-11-19 18:41:27 +0000245static bool isNarrowLoad(MachineInstr *MI) {
246 return isNarrowLoad(MI->getOpcode());
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000247}
248
Chad Rosier32d4d372015-09-29 16:07:32 +0000249// Scaling factor for unscaled load or store.
250static int getMemScale(MachineInstr *MI) {
Chad Rosier22eb7102015-08-06 17:37:18 +0000251 switch (MI->getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000252 default:
Chad Rosierdabe2532015-09-29 18:26:15 +0000253 llvm_unreachable("Opcode has unknown scale!");
254 case AArch64::LDRBBui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000255 case AArch64::LDURBBi:
256 case AArch64::LDRSBWui:
257 case AArch64::LDURSBWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000258 case AArch64::STRBBui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000259 case AArch64::STURBBi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000260 return 1;
261 case AArch64::LDRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000262 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000263 case AArch64::LDRSHWui:
264 case AArch64::LDURSHWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000265 case AArch64::STRHHui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000266 case AArch64::STURHHi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000267 return 2;
Chad Rosiera4d32172015-09-29 14:57:10 +0000268 case AArch64::LDRSui:
269 case AArch64::LDURSi:
270 case AArch64::LDRSWui:
271 case AArch64::LDURSWi:
272 case AArch64::LDRWui:
273 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000274 case AArch64::STRSui:
275 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000276 case AArch64::STRWui:
277 case AArch64::STURWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000278 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000279 case AArch64::LDPSWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000280 case AArch64::LDPWi:
281 case AArch64::STPSi:
282 case AArch64::STPWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000283 return 4;
Chad Rosiera4d32172015-09-29 14:57:10 +0000284 case AArch64::LDRDui:
285 case AArch64::LDURDi:
286 case AArch64::LDRXui:
287 case AArch64::LDURXi:
288 case AArch64::STRDui:
289 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000290 case AArch64::STRXui:
291 case AArch64::STURXi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000292 case AArch64::LDPDi:
293 case AArch64::LDPXi:
294 case AArch64::STPDi:
295 case AArch64::STPXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000296 return 8;
Tim Northover3b0846e2014-05-24 12:50:23 +0000297 case AArch64::LDRQui:
298 case AArch64::LDURQi:
Chad Rosiera4d32172015-09-29 14:57:10 +0000299 case AArch64::STRQui:
300 case AArch64::STURQi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000301 case AArch64::LDPQi:
302 case AArch64::STPQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000303 return 16;
Tim Northover3b0846e2014-05-24 12:50:23 +0000304 }
305}
306
Quentin Colombet66b61632015-03-06 22:42:10 +0000307static unsigned getMatchingNonSExtOpcode(unsigned Opc,
308 bool *IsValidLdStrOpc = nullptr) {
309 if (IsValidLdStrOpc)
310 *IsValidLdStrOpc = true;
311 switch (Opc) {
312 default:
313 if (IsValidLdStrOpc)
314 *IsValidLdStrOpc = false;
315 return UINT_MAX;
316 case AArch64::STRDui:
317 case AArch64::STURDi:
318 case AArch64::STRQui:
319 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000320 case AArch64::STRBBui:
321 case AArch64::STURBBi:
322 case AArch64::STRHHui:
323 case AArch64::STURHHi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000324 case AArch64::STRWui:
325 case AArch64::STURWi:
326 case AArch64::STRXui:
327 case AArch64::STURXi:
328 case AArch64::LDRDui:
329 case AArch64::LDURDi:
330 case AArch64::LDRQui:
331 case AArch64::LDURQi:
332 case AArch64::LDRWui:
333 case AArch64::LDURWi:
334 case AArch64::LDRXui:
335 case AArch64::LDURXi:
336 case AArch64::STRSui:
337 case AArch64::STURSi:
338 case AArch64::LDRSui:
339 case AArch64::LDURSi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000340 case AArch64::LDRHHui:
341 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000342 case AArch64::LDRBBui:
343 case AArch64::LDURBBi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000344 return Opc;
345 case AArch64::LDRSWui:
346 return AArch64::LDRWui;
347 case AArch64::LDURSWi:
348 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000349 case AArch64::LDRSBWui:
350 return AArch64::LDRBBui;
351 case AArch64::LDRSHWui:
352 return AArch64::LDRHHui;
353 case AArch64::LDURSBWi:
354 return AArch64::LDURBBi;
355 case AArch64::LDURSHWi:
356 return AArch64::LDURHHi;
Quentin Colombet66b61632015-03-06 22:42:10 +0000357 }
358}
359
Tim Northover3b0846e2014-05-24 12:50:23 +0000360static unsigned getMatchingPairOpcode(unsigned Opc) {
361 switch (Opc) {
362 default:
363 llvm_unreachable("Opcode has no pairwise equivalent!");
364 case AArch64::STRSui:
365 case AArch64::STURSi:
366 return AArch64::STPSi;
367 case AArch64::STRDui:
368 case AArch64::STURDi:
369 return AArch64::STPDi;
370 case AArch64::STRQui:
371 case AArch64::STURQi:
372 return AArch64::STPQi;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000373 case AArch64::STRBBui:
374 return AArch64::STRHHui;
375 case AArch64::STRHHui:
376 return AArch64::STRWui;
377 case AArch64::STURBBi:
378 return AArch64::STURHHi;
379 case AArch64::STURHHi:
380 return AArch64::STURWi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000381 case AArch64::STRWui:
382 case AArch64::STURWi:
383 return AArch64::STPWi;
384 case AArch64::STRXui:
385 case AArch64::STURXi:
386 return AArch64::STPXi;
387 case AArch64::LDRSui:
388 case AArch64::LDURSi:
389 return AArch64::LDPSi;
390 case AArch64::LDRDui:
391 case AArch64::LDURDi:
392 return AArch64::LDPDi;
393 case AArch64::LDRQui:
394 case AArch64::LDURQi:
395 return AArch64::LDPQi;
396 case AArch64::LDRWui:
397 case AArch64::LDURWi:
398 return AArch64::LDPWi;
399 case AArch64::LDRXui:
400 case AArch64::LDURXi:
401 return AArch64::LDPXi;
Quentin Colombet29f55332015-01-24 01:25:54 +0000402 case AArch64::LDRSWui:
403 case AArch64::LDURSWi:
404 return AArch64::LDPSWi;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000405 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000406 case AArch64::LDRSHWui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000407 return AArch64::LDRWui;
408 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000409 case AArch64::LDURSHWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000410 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000411 case AArch64::LDRBBui:
412 case AArch64::LDRSBWui:
413 return AArch64::LDRHHui;
414 case AArch64::LDURBBi:
415 case AArch64::LDURSBWi:
416 return AArch64::LDURHHi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000417 }
418}
419
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000420static unsigned isMatchingStore(MachineInstr *LoadInst,
421 MachineInstr *StoreInst) {
422 unsigned LdOpc = LoadInst->getOpcode();
423 unsigned StOpc = StoreInst->getOpcode();
424 switch (LdOpc) {
425 default:
426 llvm_unreachable("Unsupported load instruction!");
427 case AArch64::LDRBBui:
428 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
429 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
430 case AArch64::LDURBBi:
431 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
432 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
433 case AArch64::LDRHHui:
434 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
435 StOpc == AArch64::STRXui;
436 case AArch64::LDURHHi:
437 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
438 StOpc == AArch64::STURXi;
439 case AArch64::LDRWui:
440 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
441 case AArch64::LDURWi:
442 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
443 case AArch64::LDRXui:
444 return StOpc == AArch64::STRXui;
445 case AArch64::LDURXi:
446 return StOpc == AArch64::STURXi;
447 }
448}
449
Tim Northover3b0846e2014-05-24 12:50:23 +0000450static unsigned getPreIndexedOpcode(unsigned Opc) {
451 switch (Opc) {
452 default:
453 llvm_unreachable("Opcode has no pre-indexed equivalent!");
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000454 case AArch64::STRSui:
455 return AArch64::STRSpre;
456 case AArch64::STRDui:
457 return AArch64::STRDpre;
458 case AArch64::STRQui:
459 return AArch64::STRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000460 case AArch64::STRBBui:
461 return AArch64::STRBBpre;
462 case AArch64::STRHHui:
463 return AArch64::STRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000464 case AArch64::STRWui:
465 return AArch64::STRWpre;
466 case AArch64::STRXui:
467 return AArch64::STRXpre;
468 case AArch64::LDRSui:
469 return AArch64::LDRSpre;
470 case AArch64::LDRDui:
471 return AArch64::LDRDpre;
472 case AArch64::LDRQui:
473 return AArch64::LDRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000474 case AArch64::LDRBBui:
475 return AArch64::LDRBBpre;
476 case AArch64::LDRHHui:
477 return AArch64::LDRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000478 case AArch64::LDRWui:
479 return AArch64::LDRWpre;
480 case AArch64::LDRXui:
481 return AArch64::LDRXpre;
Quentin Colombet29f55332015-01-24 01:25:54 +0000482 case AArch64::LDRSWui:
483 return AArch64::LDRSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000484 case AArch64::LDPSi:
485 return AArch64::LDPSpre;
Chad Rosier43150122015-09-29 20:39:55 +0000486 case AArch64::LDPSWi:
487 return AArch64::LDPSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000488 case AArch64::LDPDi:
489 return AArch64::LDPDpre;
490 case AArch64::LDPQi:
491 return AArch64::LDPQpre;
492 case AArch64::LDPWi:
493 return AArch64::LDPWpre;
494 case AArch64::LDPXi:
495 return AArch64::LDPXpre;
496 case AArch64::STPSi:
497 return AArch64::STPSpre;
498 case AArch64::STPDi:
499 return AArch64::STPDpre;
500 case AArch64::STPQi:
501 return AArch64::STPQpre;
502 case AArch64::STPWi:
503 return AArch64::STPWpre;
504 case AArch64::STPXi:
505 return AArch64::STPXpre;
Tim Northover3b0846e2014-05-24 12:50:23 +0000506 }
507}
508
509static unsigned getPostIndexedOpcode(unsigned Opc) {
510 switch (Opc) {
511 default:
512 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
513 case AArch64::STRSui:
514 return AArch64::STRSpost;
515 case AArch64::STRDui:
516 return AArch64::STRDpost;
517 case AArch64::STRQui:
518 return AArch64::STRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000519 case AArch64::STRBBui:
520 return AArch64::STRBBpost;
521 case AArch64::STRHHui:
522 return AArch64::STRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000523 case AArch64::STRWui:
524 return AArch64::STRWpost;
525 case AArch64::STRXui:
526 return AArch64::STRXpost;
527 case AArch64::LDRSui:
528 return AArch64::LDRSpost;
529 case AArch64::LDRDui:
530 return AArch64::LDRDpost;
531 case AArch64::LDRQui:
532 return AArch64::LDRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000533 case AArch64::LDRBBui:
534 return AArch64::LDRBBpost;
535 case AArch64::LDRHHui:
536 return AArch64::LDRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000537 case AArch64::LDRWui:
538 return AArch64::LDRWpost;
539 case AArch64::LDRXui:
540 return AArch64::LDRXpost;
Quentin Colombet29f55332015-01-24 01:25:54 +0000541 case AArch64::LDRSWui:
542 return AArch64::LDRSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000543 case AArch64::LDPSi:
544 return AArch64::LDPSpost;
Chad Rosier43150122015-09-29 20:39:55 +0000545 case AArch64::LDPSWi:
546 return AArch64::LDPSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000547 case AArch64::LDPDi:
548 return AArch64::LDPDpost;
549 case AArch64::LDPQi:
550 return AArch64::LDPQpost;
551 case AArch64::LDPWi:
552 return AArch64::LDPWpost;
553 case AArch64::LDPXi:
554 return AArch64::LDPXpost;
555 case AArch64::STPSi:
556 return AArch64::STPSpost;
557 case AArch64::STPDi:
558 return AArch64::STPDpost;
559 case AArch64::STPQi:
560 return AArch64::STPQpost;
561 case AArch64::STPWi:
562 return AArch64::STPWpost;
563 case AArch64::STPXi:
564 return AArch64::STPXpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000565 }
566}
567
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000568static bool isPairedLdSt(const MachineInstr *MI) {
569 switch (MI->getOpcode()) {
570 default:
571 return false;
572 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000573 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000574 case AArch64::LDPDi:
575 case AArch64::LDPQi:
576 case AArch64::LDPWi:
577 case AArch64::LDPXi:
578 case AArch64::STPSi:
579 case AArch64::STPDi:
580 case AArch64::STPQi:
581 case AArch64::STPWi:
582 case AArch64::STPXi:
583 return true;
584 }
585}
586
587static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
588 unsigned PairedRegOp = 0) {
589 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
590 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
591 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000592}
593
594static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000595 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
596 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000597}
598
599static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000600 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
601 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000602}
603
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000604static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
605 MachineInstr *StoreInst) {
606 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
607 int LoadSize = getMemScale(LoadInst);
608 int StoreSize = getMemScale(StoreInst);
609 int UnscaledStOffset = isUnscaledLdSt(StoreInst)
610 ? getLdStOffsetOp(StoreInst).getImm()
611 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
612 int UnscaledLdOffset = isUnscaledLdSt(LoadInst)
613 ? getLdStOffsetOp(LoadInst).getImm()
614 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
615 return (UnscaledStOffset <= UnscaledLdOffset) &&
616 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
617}
618
Tim Northover3b0846e2014-05-24 12:50:23 +0000619MachineBasicBlock::iterator
620AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
621 MachineBasicBlock::iterator Paired,
Chad Rosier96a18a92015-07-21 17:42:04 +0000622 const LdStPairFlags &Flags) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000623 MachineBasicBlock::iterator NextI = I;
624 ++NextI;
625 // If NextI is the second of the two instructions to be merged, we need
626 // to skip one further. Either way we merge will invalidate the iterator,
627 // and we don't need to scan the new instruction, as it's a pairwise
628 // instruction, which we're not considering for further action anyway.
629 if (NextI == Paired)
630 ++NextI;
631
Chad Rosier96a18a92015-07-21 17:42:04 +0000632 int SExtIdx = Flags.getSExtIdx();
Quentin Colombet66b61632015-03-06 22:42:10 +0000633 unsigned Opc =
634 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
Chad Rosier22eb7102015-08-06 17:37:18 +0000635 bool IsUnscaled = isUnscaledLdSt(Opc);
Chad Rosierf11d0402015-10-01 18:17:12 +0000636 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
Tim Northover3b0846e2014-05-24 12:50:23 +0000637
Chad Rosier96a18a92015-07-21 17:42:04 +0000638 bool MergeForward = Flags.getMergeForward();
Quentin Colombet66b61632015-03-06 22:42:10 +0000639 unsigned NewOpc = getMatchingPairOpcode(Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000640 // Insert our new paired instruction after whichever of the paired
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000641 // instructions MergeForward indicates.
642 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
643 // Also based on MergeForward is from where we copy the base register operand
Tim Northover3b0846e2014-05-24 12:50:23 +0000644 // so we get the flags compatible with the input code.
Chad Rosierf77e9092015-08-06 15:50:12 +0000645 const MachineOperand &BaseRegOp =
646 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000647
648 // Which register is Rt and which is Rt2 depends on the offset order.
649 MachineInstr *RtMI, *Rt2MI;
Chad Rosier08ef4622015-09-03 16:41:28 +0000650 if (getLdStOffsetOp(I).getImm() ==
651 getLdStOffsetOp(Paired).getImm() + OffsetStride) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000652 RtMI = Paired;
653 Rt2MI = I;
Quentin Colombet66b61632015-03-06 22:42:10 +0000654 // Here we swapped the assumption made for SExtIdx.
655 // I.e., we turn ldp I, Paired into ldp Paired, I.
656 // Update the index accordingly.
657 if (SExtIdx != -1)
658 SExtIdx = (SExtIdx + 1) % 2;
Tim Northover3b0846e2014-05-24 12:50:23 +0000659 } else {
660 RtMI = I;
661 Rt2MI = Paired;
662 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000663
James Molloy5b18b4c2015-10-23 10:41:38 +0000664 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000665
Jun Bum Limc12c2792015-11-19 18:41:27 +0000666 if (isNarrowLoad(Opc)) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000667 // Change the scaled offset from small to large type.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000668 if (!IsUnscaled) {
669 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000670 OffsetImm /= 2;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000671 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000672 MachineInstr *RtNewDest = MergeForward ? I : Paired;
Oliver Stannardd414c992015-11-10 11:04:18 +0000673 // When merging small (< 32 bit) loads for big-endian targets, the order of
674 // the component parts gets swapped.
675 if (!Subtarget->isLittleEndian())
676 std::swap(RtMI, Rt2MI);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000677 // Construct the new load instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000678 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
679 NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
680 TII->get(NewOpc))
681 .addOperand(getLdStRegOp(RtNewDest))
682 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000683 .addImm(OffsetImm)
684 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000685
686 DEBUG(
687 dbgs()
688 << "Creating the new load and extract. Replacing instructions:\n ");
689 DEBUG(I->print(dbgs()));
690 DEBUG(dbgs() << " ");
691 DEBUG(Paired->print(dbgs()));
692 DEBUG(dbgs() << " with instructions:\n ");
693 DEBUG((NewMemMI)->print(dbgs()));
694
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000695 int Width = getMemScale(I) == 1 ? 8 : 16;
696 int LSBLow = 0;
697 int LSBHigh = Width;
698 int ImmsLow = LSBLow + Width - 1;
699 int ImmsHigh = LSBHigh + Width - 1;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000700 MachineInstr *ExtDestMI = MergeForward ? Paired : I;
Oliver Stannardd414c992015-11-10 11:04:18 +0000701 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000702 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000703 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000704 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000705 .addOperand(getLdStRegOp(Rt2MI))
706 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000707 .addImm(LSBHigh)
708 .addImm(ImmsHigh);
709 // Create the bitfield extract for low bits.
710 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
711 // For unsigned, prefer to use AND for low bits.
712 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
713 TII->get(AArch64::ANDWri))
714 .addOperand(getLdStRegOp(RtMI))
715 .addReg(getLdStRegOp(RtNewDest).getReg())
716 .addImm(ImmsLow);
717 } else {
718 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
719 TII->get(getBitExtrOpcode(RtMI)))
720 .addOperand(getLdStRegOp(RtMI))
721 .addReg(getLdStRegOp(RtNewDest).getReg())
722 .addImm(LSBLow)
723 .addImm(ImmsLow);
724 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000725 } else {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000726 // Create the bitfield extract for low bits.
727 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
728 // For unsigned, prefer to use AND for low bits.
729 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
730 TII->get(AArch64::ANDWri))
731 .addOperand(getLdStRegOp(RtMI))
732 .addReg(getLdStRegOp(RtNewDest).getReg())
733 .addImm(ImmsLow);
734 } else {
735 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
736 TII->get(getBitExtrOpcode(RtMI)))
737 .addOperand(getLdStRegOp(RtMI))
738 .addReg(getLdStRegOp(RtNewDest).getReg())
739 .addImm(LSBLow)
740 .addImm(ImmsLow);
741 }
742
743 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000744 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000745 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000746 .addOperand(getLdStRegOp(Rt2MI))
747 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000748 .addImm(LSBHigh)
749 .addImm(ImmsHigh);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000750 }
751 DEBUG(dbgs() << " ");
752 DEBUG((BitExtMI1)->print(dbgs()));
753 DEBUG(dbgs() << " ");
754 DEBUG((BitExtMI2)->print(dbgs()));
755 DEBUG(dbgs() << "\n");
756
757 // Erase the old instructions.
758 I->eraseFromParent();
759 Paired->eraseFromParent();
760 return NextI;
761 }
762
Tim Northover3b0846e2014-05-24 12:50:23 +0000763 // Construct the new instruction.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000764 MachineInstrBuilder MIB;
765 if (isNarrowStore(Opc)) {
766 // Change the scaled offset from small to large type.
767 if (!IsUnscaled) {
768 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
769 OffsetImm /= 2;
770 }
771 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
772 TII->get(NewOpc))
773 .addOperand(getLdStRegOp(I))
774 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000775 .addImm(OffsetImm)
776 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000777 } else {
778 // Handle Unscaled
779 if (IsUnscaled)
780 OffsetImm /= OffsetStride;
781 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
782 TII->get(NewOpc))
783 .addOperand(getLdStRegOp(RtMI))
784 .addOperand(getLdStRegOp(Rt2MI))
785 .addOperand(BaseRegOp)
786 .addImm(OffsetImm);
787 }
788
Tim Northover3b0846e2014-05-24 12:50:23 +0000789 (void)MIB;
790
791 // FIXME: Do we need/want to copy the mem operands from the source
792 // instructions? Probably. What uses them after this?
793
794 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
795 DEBUG(I->print(dbgs()));
796 DEBUG(dbgs() << " ");
797 DEBUG(Paired->print(dbgs()));
798 DEBUG(dbgs() << " with instruction:\n ");
Quentin Colombet66b61632015-03-06 22:42:10 +0000799
800 if (SExtIdx != -1) {
801 // Generate the sign extension for the proper result of the ldp.
802 // I.e., with X1, that would be:
803 // %W1<def> = KILL %W1, %X1<imp-def>
804 // %X1<def> = SBFMXri %X1<kill>, 0, 31
805 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
806 // Right now, DstMO has the extended register, since it comes from an
807 // extended opcode.
808 unsigned DstRegX = DstMO.getReg();
809 // Get the W variant of that register.
810 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
811 // Update the result of LDP to use the W instead of the X variant.
812 DstMO.setReg(DstRegW);
813 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
814 DEBUG(dbgs() << "\n");
815 // Make the machine verifier happy by providing a definition for
816 // the X register.
817 // Insert this definition right after the generated LDP, i.e., before
818 // InsertionPoint.
819 MachineInstrBuilder MIBKill =
820 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
821 TII->get(TargetOpcode::KILL), DstRegW)
822 .addReg(DstRegW)
823 .addReg(DstRegX, RegState::Define);
824 MIBKill->getOperand(2).setImplicit();
825 // Create the sign extension.
826 MachineInstrBuilder MIBSXTW =
827 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
828 TII->get(AArch64::SBFMXri), DstRegX)
829 .addReg(DstRegX)
830 .addImm(0)
831 .addImm(31);
832 (void)MIBSXTW;
833 DEBUG(dbgs() << " Extend operand:\n ");
834 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
835 DEBUG(dbgs() << "\n");
836 } else {
837 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
838 DEBUG(dbgs() << "\n");
839 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000840
841 // Erase the old instructions.
842 I->eraseFromParent();
843 Paired->eraseFromParent();
844
845 return NextI;
846}
847
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000848MachineBasicBlock::iterator
849AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
850 MachineBasicBlock::iterator StoreI) {
851 MachineBasicBlock::iterator NextI = LoadI;
852 ++NextI;
853
854 int LoadSize = getMemScale(LoadI);
855 int StoreSize = getMemScale(StoreI);
856 unsigned LdRt = getLdStRegOp(LoadI).getReg();
857 unsigned StRt = getLdStRegOp(StoreI).getReg();
858 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
859
860 assert((IsStoreXReg ||
861 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
862 "Unexpected RegClass");
863
864 MachineInstr *BitExtMI;
865 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
866 // Remove the load, if the destination register of the loads is the same
867 // register for stored value.
868 if (StRt == LdRt && LoadSize == 8) {
869 DEBUG(dbgs() << "Remove load instruction:\n ");
870 DEBUG(LoadI->print(dbgs()));
871 DEBUG(dbgs() << "\n");
872 LoadI->eraseFromParent();
873 return NextI;
874 }
875 // Replace the load with a mov if the load and store are in the same size.
876 BitExtMI =
877 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
878 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
879 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
880 .addReg(StRt)
881 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
882 } else {
883 // FIXME: Currently we disable this transformation in big-endian targets as
884 // performance and correctness are verified only in little-endian.
885 if (!Subtarget->isLittleEndian())
886 return NextI;
887 bool IsUnscaled = isUnscaledLdSt(LoadI);
888 assert(IsUnscaled == isUnscaledLdSt(StoreI) && "Unsupported ld/st match");
889 assert(LoadSize <= StoreSize && "Invalid load size");
890 int UnscaledLdOffset = IsUnscaled
891 ? getLdStOffsetOp(LoadI).getImm()
892 : getLdStOffsetOp(LoadI).getImm() * LoadSize;
893 int UnscaledStOffset = IsUnscaled
894 ? getLdStOffsetOp(StoreI).getImm()
895 : getLdStOffsetOp(StoreI).getImm() * StoreSize;
896 int Width = LoadSize * 8;
897 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
898 int Imms = Immr + Width - 1;
899 unsigned DestReg = IsStoreXReg
900 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
901 &AArch64::GPR64RegClass)
902 : LdRt;
903
904 assert((UnscaledLdOffset >= UnscaledStOffset &&
905 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
906 "Invalid offset");
907
908 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
909 Imms = Immr + Width - 1;
910 if (UnscaledLdOffset == UnscaledStOffset) {
911 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
912 | ((Immr) << 6) // immr
913 | ((Imms) << 0) // imms
914 ;
915
916 BitExtMI =
917 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
918 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
919 DestReg)
920 .addReg(StRt)
921 .addImm(AndMaskEncoded);
922 } else {
923 BitExtMI =
924 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
925 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
926 DestReg)
927 .addReg(StRt)
928 .addImm(Immr)
929 .addImm(Imms);
930 }
931 }
932
933 DEBUG(dbgs() << "Promoting load by replacing :\n ");
934 DEBUG(StoreI->print(dbgs()));
935 DEBUG(dbgs() << " ");
936 DEBUG(LoadI->print(dbgs()));
937 DEBUG(dbgs() << " with instructions:\n ");
938 DEBUG(StoreI->print(dbgs()));
939 DEBUG(dbgs() << " ");
940 DEBUG((BitExtMI)->print(dbgs()));
941 DEBUG(dbgs() << "\n");
942
943 // Erase the old instructions.
944 LoadI->eraseFromParent();
945 return NextI;
946}
947
Tim Northover3b0846e2014-05-24 12:50:23 +0000948/// trackRegDefsUses - Remember what registers the specified instruction uses
949/// and modifies.
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000950static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
Tim Northover3b0846e2014-05-24 12:50:23 +0000951 BitVector &UsedRegs,
952 const TargetRegisterInfo *TRI) {
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000953 for (const MachineOperand &MO : MI->operands()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000954 if (MO.isRegMask())
955 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
956
957 if (!MO.isReg())
958 continue;
959 unsigned Reg = MO.getReg();
960 if (MO.isDef()) {
961 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
962 ModifiedRegs.set(*AI);
963 } else {
964 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
965 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
966 UsedRegs.set(*AI);
967 }
968 }
969}
970
971static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
Chad Rosier3dd0e942015-08-18 16:20:03 +0000972 // Convert the byte-offset used by unscaled into an "element" offset used
973 // by the scaled pair load/store instructions.
Chad Rosier08ef4622015-09-03 16:41:28 +0000974 if (IsUnscaled)
Chad Rosier3dd0e942015-08-18 16:20:03 +0000975 Offset /= OffsetStride;
976
977 return Offset <= 63 && Offset >= -64;
Tim Northover3b0846e2014-05-24 12:50:23 +0000978}
979
980// Do alignment, specialized to power of 2 and for signed ints,
981// avoiding having to do a C-style cast from uint_64t to int when
Rui Ueyamada00f2f2016-01-14 21:06:47 +0000982// using alignTo from include/llvm/Support/MathExtras.h.
Tim Northover3b0846e2014-05-24 12:50:23 +0000983// FIXME: Move this function to include/MathExtras.h?
984static int alignTo(int Num, int PowOf2) {
985 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
986}
987
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000988static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
989 const AArch64InstrInfo *TII) {
990 // One of the instructions must modify memory.
991 if (!MIa->mayStore() && !MIb->mayStore())
992 return false;
993
994 // Both instructions must be memory operations.
995 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
996 return false;
997
998 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
999}
1000
1001static bool mayAlias(MachineInstr *MIa,
1002 SmallVectorImpl<MachineInstr *> &MemInsns,
1003 const AArch64InstrInfo *TII) {
1004 for (auto &MIb : MemInsns)
1005 if (mayAlias(MIa, MIb, TII))
1006 return true;
1007
1008 return false;
1009}
1010
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001011bool AArch64LoadStoreOpt::findMatchingStore(
1012 MachineBasicBlock::iterator I, unsigned Limit,
1013 MachineBasicBlock::iterator &StoreI) {
1014 MachineBasicBlock::iterator E = I->getParent()->begin();
1015 MachineBasicBlock::iterator MBBI = I;
1016 MachineInstr *FirstMI = I;
1017 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1018
1019 // Track which registers have been modified and used between the first insn
1020 // and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001021 ModifiedRegs.reset();
1022 UsedRegs.reset();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001023
1024 for (unsigned Count = 0; MBBI != E && Count < Limit;) {
1025 --MBBI;
1026 MachineInstr *MI = MBBI;
1027 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1028 // optimization by changing how far we scan.
1029 if (MI->isDebugValue())
1030 continue;
1031 // Now that we know this is a real instruction, count it.
1032 ++Count;
1033
1034 // If the load instruction reads directly from the address to which the
1035 // store instruction writes and the stored value is not modified, we can
1036 // promote the load. Since we do not handle stores with pre-/post-index,
1037 // it's unnecessary to check if BaseReg is modified by the store itself.
1038 if (MI->mayStore() && isMatchingStore(FirstMI, MI) &&
1039 BaseReg == getLdStBaseOp(MI).getReg() &&
1040 isLdOffsetInRangeOfSt(FirstMI, MI) &&
1041 !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
1042 StoreI = MBBI;
1043 return true;
1044 }
1045
1046 if (MI->isCall())
1047 return false;
1048
1049 // Update modified / uses register lists.
1050 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1051
1052 // Otherwise, if the base register is modified, we have no match, so
1053 // return early.
1054 if (ModifiedRegs[BaseReg])
1055 return false;
1056
1057 // If we encounter a store aliased with the load, return early.
1058 if (MI->mayStore() && mayAlias(FirstMI, MI, TII))
1059 return false;
1060 }
1061 return false;
1062}
1063
Tim Northover3b0846e2014-05-24 12:50:23 +00001064/// findMatchingInsn - Scan the instructions looking for a load/store that can
1065/// be combined with the current instruction into a load/store pair.
1066MachineBasicBlock::iterator
1067AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001068 LdStPairFlags &Flags, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001069 MachineBasicBlock::iterator E = I->getParent()->end();
1070 MachineBasicBlock::iterator MBBI = I;
1071 MachineInstr *FirstMI = I;
1072 ++MBBI;
1073
Matthias Braunfa3872e2015-05-18 20:27:55 +00001074 unsigned Opc = FirstMI->getOpcode();
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +00001075 bool MayLoad = FirstMI->mayLoad();
Chad Rosier22eb7102015-08-06 17:37:18 +00001076 bool IsUnscaled = isUnscaledLdSt(FirstMI);
Chad Rosierf77e9092015-08-06 15:50:12 +00001077 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1078 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1079 int Offset = getLdStOffsetOp(FirstMI).getImm();
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001080 bool IsNarrowStore = isNarrowStore(Opc);
1081
1082 // For narrow stores, find only the case where the stored value is WZR.
1083 if (IsNarrowStore && Reg != AArch64::WZR)
1084 return E;
Tim Northover3b0846e2014-05-24 12:50:23 +00001085
1086 // Early exit if the first instruction modifies the base register.
1087 // e.g., ldr x0, [x0]
Tim Northover3b0846e2014-05-24 12:50:23 +00001088 if (FirstMI->modifiesRegister(BaseReg, TRI))
1089 return E;
Chad Rosiercaed6db2015-08-10 17:17:19 +00001090
1091 // Early exit if the offset if not possible to match. (6 bits of positive
1092 // range, plus allow an extra one in case we find a later insn that matches
1093 // with Offset-1)
Chad Rosierf11d0402015-10-01 18:17:12 +00001094 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001095 if (!(isNarrowLoad(Opc) || IsNarrowStore) &&
1096 !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
Tim Northover3b0846e2014-05-24 12:50:23 +00001097 return E;
1098
1099 // Track which registers have been modified and used between the first insn
1100 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001101 ModifiedRegs.reset();
1102 UsedRegs.reset();
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001103
1104 // Remember any instructions that read/write memory between FirstMI and MI.
1105 SmallVector<MachineInstr *, 4> MemInsns;
1106
Tim Northover3b0846e2014-05-24 12:50:23 +00001107 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1108 MachineInstr *MI = MBBI;
1109 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1110 // optimization by changing how far we scan.
1111 if (MI->isDebugValue())
1112 continue;
1113
1114 // Now that we know this is a real instruction, count it.
1115 ++Count;
1116
Chad Rosier08ef4622015-09-03 16:41:28 +00001117 bool CanMergeOpc = Opc == MI->getOpcode();
1118 Flags.setSExtIdx(-1);
1119 if (!CanMergeOpc) {
1120 bool IsValidLdStrOpc;
1121 unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
1122 assert(IsValidLdStrOpc &&
1123 "Given Opc should be a Load or Store with an immediate");
1124 // Opc will be the first instruction in the pair.
1125 Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
1126 CanMergeOpc = NonSExtOpc == getMatchingNonSExtOpcode(MI->getOpcode());
1127 }
1128
1129 if (CanMergeOpc && getLdStOffsetOp(MI).isImm()) {
Chad Rosierc56a9132015-08-10 18:42:45 +00001130 assert(MI->mayLoadOrStore() && "Expected memory operation.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001131 // If we've found another instruction with the same opcode, check to see
1132 // if the base and offset are compatible with our starting instruction.
1133 // These instructions all have scaled immediate operands, so we just
1134 // check for +1/-1. Make sure to check the new instruction offset is
1135 // actually an immediate and not a symbolic reference destined for
1136 // a relocation.
1137 //
1138 // Pairwise instructions have a 7-bit signed offset field. Single insns
1139 // have a 12-bit unsigned offset field. To be a valid combine, the
1140 // final offset must be in range.
Chad Rosierf77e9092015-08-06 15:50:12 +00001141 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1142 int MIOffset = getLdStOffsetOp(MI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001143 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1144 (Offset + OffsetStride == MIOffset))) {
1145 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1146 // If this is a volatile load/store that otherwise matched, stop looking
1147 // as something is going on that we don't have enough information to
1148 // safely transform. Similarly, stop if we see a hint to avoid pairs.
1149 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1150 return E;
1151 // If the resultant immediate offset of merging these instructions
1152 // is out of range for a pairwise instruction, bail and keep looking.
Chad Rosier08ef4622015-09-03 16:41:28 +00001153 bool MIIsUnscaled = isUnscaledLdSt(MI);
Jun Bum Limc12c2792015-11-19 18:41:27 +00001154 bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
1155 if (!IsNarrowLoad &&
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001156 !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001157 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001158 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001159 continue;
1160 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001161
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001162 if (IsNarrowLoad || IsNarrowStore) {
1163 // If the alignment requirements of the scaled wide load/store
1164 // instruction can't express the offset of the scaled narrow
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001165 // input, bail and keep looking.
1166 if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
1167 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1168 MemInsns.push_back(MI);
1169 continue;
1170 }
1171 } else {
1172 // If the alignment requirements of the paired (scaled) instruction
1173 // can't express the offset of the unscaled input, bail and keep
1174 // looking.
1175 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1176 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1177 MemInsns.push_back(MI);
1178 continue;
1179 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001180 }
1181 // If the destination register of the loads is the same register, bail
1182 // and keep looking. A load-pair instruction with both destination
1183 // registers the same is UNPREDICTABLE and will result in an exception.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001184 // For narrow stores, allow only when the stored value is the same
1185 // (i.e., WZR).
1186 if ((MayLoad && Reg == getLdStRegOp(MI).getReg()) ||
1187 (IsNarrowStore && Reg != getLdStRegOp(MI).getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001188 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001189 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001190 continue;
1191 }
1192
1193 // If the Rt of the second instruction was not modified or used between
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001194 // the two instructions and none of the instructions between the second
1195 // and first alias with the second, we can combine the second into the
1196 // first.
Chad Rosierf77e9092015-08-06 15:50:12 +00001197 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1198 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001199 !mayAlias(MI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001200 Flags.setMergeForward(false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001201 return MBBI;
1202 }
1203
1204 // Likewise, if the Rt of the first instruction is not modified or used
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001205 // between the two instructions and none of the instructions between the
1206 // first and the second alias with the first, we can combine the first
1207 // into the second.
Chad Rosierf77e9092015-08-06 15:50:12 +00001208 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
Chad Rosier5f668e12015-09-03 14:19:43 +00001209 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001210 !mayAlias(FirstMI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001211 Flags.setMergeForward(true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001212 return MBBI;
1213 }
1214 // Unable to combine these instructions due to interference in between.
1215 // Keep looking.
1216 }
1217 }
1218
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001219 // If the instruction wasn't a matching load or store. Stop searching if we
1220 // encounter a call instruction that might modify memory.
1221 if (MI->isCall())
Tim Northover3b0846e2014-05-24 12:50:23 +00001222 return E;
1223
1224 // Update modified / uses register lists.
1225 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1226
1227 // Otherwise, if the base register is modified, we have no match, so
1228 // return early.
1229 if (ModifiedRegs[BaseReg])
1230 return E;
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001231
1232 // Update list of instructions that read/write memory.
1233 if (MI->mayLoadOrStore())
1234 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001235 }
1236 return E;
1237}
1238
1239MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +00001240AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1241 MachineBasicBlock::iterator Update,
1242 bool IsPreIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001243 assert((Update->getOpcode() == AArch64::ADDXri ||
1244 Update->getOpcode() == AArch64::SUBXri) &&
1245 "Unexpected base register update instruction to merge!");
1246 MachineBasicBlock::iterator NextI = I;
1247 // Return the instruction following the merged instruction, which is
1248 // the instruction following our unmerged load. Unless that's the add/sub
1249 // instruction we're merging, in which case it's the one after that.
1250 if (++NextI == Update)
1251 ++NextI;
1252
1253 int Value = Update->getOperand(2).getImm();
1254 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
Chad Rosier2dfd3542015-09-23 13:51:44 +00001255 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
Tim Northover3b0846e2014-05-24 12:50:23 +00001256 if (Update->getOpcode() == AArch64::SUBXri)
1257 Value = -Value;
1258
Chad Rosier2dfd3542015-09-23 13:51:44 +00001259 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1260 : getPostIndexedOpcode(I->getOpcode());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001261 MachineInstrBuilder MIB;
1262 if (!isPairedLdSt(I)) {
1263 // Non-paired instruction.
1264 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1265 .addOperand(getLdStRegOp(Update))
1266 .addOperand(getLdStRegOp(I))
1267 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001268 .addImm(Value)
1269 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001270 } else {
1271 // Paired instruction.
Chad Rosier32d4d372015-09-29 16:07:32 +00001272 int Scale = getMemScale(I);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001273 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1274 .addOperand(getLdStRegOp(Update))
1275 .addOperand(getLdStRegOp(I, 0))
1276 .addOperand(getLdStRegOp(I, 1))
1277 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001278 .addImm(Value / Scale)
1279 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001280 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001281 (void)MIB;
1282
Chad Rosier2dfd3542015-09-23 13:51:44 +00001283 if (IsPreIdx)
1284 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1285 else
1286 DEBUG(dbgs() << "Creating post-indexed load/store.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001287 DEBUG(dbgs() << " Replacing instructions:\n ");
1288 DEBUG(I->print(dbgs()));
1289 DEBUG(dbgs() << " ");
1290 DEBUG(Update->print(dbgs()));
1291 DEBUG(dbgs() << " with instruction:\n ");
1292 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1293 DEBUG(dbgs() << "\n");
1294
1295 // Erase the old instructions for the block.
1296 I->eraseFromParent();
1297 Update->eraseFromParent();
1298
1299 return NextI;
1300}
1301
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001302bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
1303 MachineInstr *MI,
1304 unsigned BaseReg, int Offset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001305 switch (MI->getOpcode()) {
1306 default:
1307 break;
1308 case AArch64::SUBXri:
1309 // Negate the offset for a SUB instruction.
1310 Offset *= -1;
1311 // FALLTHROUGH
1312 case AArch64::ADDXri:
1313 // Make sure it's a vanilla immediate operand, not a relocation or
1314 // anything else we can't handle.
1315 if (!MI->getOperand(2).isImm())
1316 break;
1317 // Watch out for 1 << 12 shifted value.
1318 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
1319 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001320
1321 // The update instruction source and destination register must be the
1322 // same as the load/store base register.
1323 if (MI->getOperand(0).getReg() != BaseReg ||
1324 MI->getOperand(1).getReg() != BaseReg)
1325 break;
1326
1327 bool IsPairedInsn = isPairedLdSt(MemMI);
1328 int UpdateOffset = MI->getOperand(2).getImm();
1329 // For non-paired load/store instructions, the immediate must fit in a
1330 // signed 9-bit integer.
1331 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1332 break;
1333
1334 // For paired load/store instructions, the immediate must be a multiple of
1335 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1336 // integer.
1337 if (IsPairedInsn) {
Chad Rosier32d4d372015-09-29 16:07:32 +00001338 int Scale = getMemScale(MemMI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001339 if (UpdateOffset % Scale != 0)
1340 break;
1341
1342 int ScaledOffset = UpdateOffset / Scale;
1343 if (ScaledOffset > 64 || ScaledOffset < -64)
1344 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001345 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001346
1347 // If we have a non-zero Offset, we check that it matches the amount
1348 // we're adding to the register.
1349 if (!Offset || Offset == MI->getOperand(2).getImm())
1350 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001351 break;
1352 }
1353 return false;
1354}
1355
1356MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
Chad Rosier234bf6f2016-01-18 21:56:40 +00001357 MachineBasicBlock::iterator I, int UnscaledOffset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001358 MachineBasicBlock::iterator E = I->getParent()->end();
1359 MachineInstr *MemMI = I;
1360 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001361
Chad Rosierf77e9092015-08-06 15:50:12 +00001362 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001363 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001364
Chad Rosierb7c5b912015-10-01 13:43:05 +00001365 // Scan forward looking for post-index opportunities. Updating instructions
1366 // can't be formed if the memory instruction doesn't have the offset we're
1367 // looking for.
1368 if (MIUnscaledOffset != UnscaledOffset)
1369 return E;
1370
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001371 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001372 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001373 bool IsPairedInsn = isPairedLdSt(MemMI);
1374 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1375 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1376 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1377 return E;
1378 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001379
Tim Northover3b0846e2014-05-24 12:50:23 +00001380 // Track which registers have been modified and used between the first insn
1381 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001382 ModifiedRegs.reset();
1383 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001384 ++MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001385 for (; MBBI != E; ++MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001386 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001387 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001388 if (MI->isDebugValue())
1389 continue;
1390
Tim Northover3b0846e2014-05-24 12:50:23 +00001391 // If we found a match, return it.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001392 if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001393 return MBBI;
1394
1395 // Update the status of what the instruction clobbered and used.
1396 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1397
1398 // Otherwise, if the base register is used or modified, we have no match, so
1399 // return early.
1400 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1401 return E;
1402 }
1403 return E;
1404}
1405
1406MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
Chad Rosier234bf6f2016-01-18 21:56:40 +00001407 MachineBasicBlock::iterator I) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001408 MachineBasicBlock::iterator B = I->getParent()->begin();
1409 MachineBasicBlock::iterator E = I->getParent()->end();
1410 MachineInstr *MemMI = I;
1411 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001412
Chad Rosierf77e9092015-08-06 15:50:12 +00001413 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1414 int Offset = getLdStOffsetOp(MemMI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001415
1416 // If the load/store is the first instruction in the block, there's obviously
1417 // not any matching update. Ditto if the memory offset isn't zero.
1418 if (MBBI == B || Offset != 0)
1419 return E;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001420 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001421 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001422 bool IsPairedInsn = isPairedLdSt(MemMI);
1423 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1424 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1425 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1426 return E;
1427 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001428
1429 // Track which registers have been modified and used between the first insn
1430 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001431 ModifiedRegs.reset();
1432 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001433 --MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001434 for (; MBBI != B; --MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001435 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001436 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001437 if (MI->isDebugValue())
1438 continue;
1439
Tim Northover3b0846e2014-05-24 12:50:23 +00001440 // If we found a match, return it.
Chad Rosier11c825f2015-09-30 19:44:40 +00001441 if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001442 return MBBI;
1443
1444 // Update the status of what the instruction clobbered and used.
1445 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1446
1447 // Otherwise, if the base register is used or modified, we have no match, so
1448 // return early.
1449 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1450 return E;
1451 }
1452 return E;
1453}
1454
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001455bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1456 MachineBasicBlock::iterator &MBBI) {
1457 MachineInstr *MI = MBBI;
1458 // If this is a volatile load, don't mess with it.
1459 if (MI->hasOrderedMemoryRef())
1460 return false;
1461
1462 // Make sure this is a reg+imm.
1463 // FIXME: It is possible to extend it to handle reg+reg cases.
1464 if (!getLdStOffsetOp(MI).isImm())
1465 return false;
1466
1467 // Look backward up to ScanLimit instructions.
1468 MachineBasicBlock::iterator StoreI;
1469 if (findMatchingStore(MBBI, ScanLimit, StoreI)) {
1470 ++NumLoadsFromStoresPromoted;
1471 // Promote the load. Keeping the iterator straight is a
1472 // pain, so we let the merge routine tell us what the next instruction
1473 // is after it's done mucking about.
1474 MBBI = promoteLoadFromStore(MBBI, StoreI);
1475 return true;
1476 }
1477 return false;
1478}
1479
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001480bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1481 MachineBasicBlock::iterator &MBBI) {
1482 MachineInstr *MI = MBBI;
1483 MachineBasicBlock::iterator E = MI->getParent()->end();
1484 // If this is a volatile load/store, don't mess with it.
1485 if (MI->hasOrderedMemoryRef())
1486 return false;
1487
1488 // Make sure this is a reg+imm (as opposed to an address reloc).
1489 if (!getLdStOffsetOp(MI).isImm())
1490 return false;
1491
1492 // Check if this load/store has a hint to avoid pair formation.
1493 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1494 if (TII->isLdStPairSuppressed(MI))
1495 return false;
1496
1497 // Look ahead up to ScanLimit instructions for a pairable instruction.
1498 LdStPairFlags Flags;
1499 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, ScanLimit);
1500 if (Paired != E) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001501 if (isNarrowLoad(MI)) {
1502 ++NumNarrowLoadsPromoted;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001503 } else if (isNarrowStore(MI)) {
1504 ++NumZeroStoresPromoted;
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001505 } else {
1506 ++NumPairCreated;
1507 if (isUnscaledLdSt(MI))
1508 ++NumUnscaledPairCreated;
1509 }
1510
1511 // Merge the loads into a pair. Keeping the iterator straight is a
1512 // pain, so we let the merge routine tell us what the next instruction
1513 // is after it's done mucking about.
1514 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1515 return true;
1516 }
1517 return false;
1518}
1519
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001520bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1521 bool enableNarrowLdOpt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001522 bool Modified = false;
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001523 // Four tranformations to do here:
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001524 // 1) Find loads that directly read from stores and promote them by
1525 // replacing with mov instructions. If the store is wider than the load,
1526 // the load will be replaced with a bitfield extract.
1527 // e.g.,
1528 // str w1, [x0, #4]
1529 // ldrh w2, [x0, #6]
1530 // ; becomes
1531 // str w1, [x0, #4]
1532 // lsr w2, w1, #16
Tim Northover3b0846e2014-05-24 12:50:23 +00001533 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001534 MBBI != E;) {
1535 MachineInstr *MI = MBBI;
1536 switch (MI->getOpcode()) {
1537 default:
1538 // Just move on to the next instruction.
1539 ++MBBI;
1540 break;
1541 // Scaled instructions.
1542 case AArch64::LDRBBui:
1543 case AArch64::LDRHHui:
1544 case AArch64::LDRWui:
1545 case AArch64::LDRXui:
1546 // Unscaled instructions.
1547 case AArch64::LDURBBi:
1548 case AArch64::LDURHHi:
1549 case AArch64::LDURWi:
1550 case AArch64::LDURXi: {
1551 if (tryToPromoteLoadFromStore(MBBI)) {
1552 Modified = true;
1553 break;
1554 }
1555 ++MBBI;
1556 break;
1557 }
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001558 }
1559 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001560 // 2) Find narrow loads that can be converted into a single wider load
1561 // with bitfield extract instructions.
1562 // e.g.,
1563 // ldrh w0, [x2]
1564 // ldrh w1, [x2, #2]
1565 // ; becomes
1566 // ldr w0, [x2]
1567 // ubfx w1, w0, #16, #16
1568 // and w0, w0, #ffff
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001569 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001570 enableNarrowLdOpt && MBBI != E;) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001571 MachineInstr *MI = MBBI;
1572 switch (MI->getOpcode()) {
1573 default:
1574 // Just move on to the next instruction.
1575 ++MBBI;
1576 break;
1577 // Scaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001578 case AArch64::LDRBBui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001579 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001580 case AArch64::LDRSBWui:
1581 case AArch64::LDRSHWui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001582 case AArch64::STRBBui:
1583 case AArch64::STRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001584 // Unscaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001585 case AArch64::LDURBBi:
1586 case AArch64::LDURHHi:
1587 case AArch64::LDURSBWi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001588 case AArch64::LDURSHWi:
1589 case AArch64::STURBBi:
1590 case AArch64::STURHHi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001591 if (tryToMergeLdStInst(MBBI)) {
1592 Modified = true;
1593 break;
1594 }
1595 ++MBBI;
1596 break;
1597 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001598 }
1599 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001600 // 3) Find loads and stores that can be merged into a single load or store
1601 // pair instruction.
1602 // e.g.,
1603 // ldr x0, [x2]
1604 // ldr x1, [x2, #8]
1605 // ; becomes
1606 // ldp x0, x1, [x2]
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001607 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Tim Northover3b0846e2014-05-24 12:50:23 +00001608 MBBI != E;) {
1609 MachineInstr *MI = MBBI;
1610 switch (MI->getOpcode()) {
1611 default:
1612 // Just move on to the next instruction.
1613 ++MBBI;
1614 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001615 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001616 case AArch64::STRSui:
1617 case AArch64::STRDui:
1618 case AArch64::STRQui:
1619 case AArch64::STRXui:
1620 case AArch64::STRWui:
1621 case AArch64::LDRSui:
1622 case AArch64::LDRDui:
1623 case AArch64::LDRQui:
1624 case AArch64::LDRXui:
1625 case AArch64::LDRWui:
Quentin Colombet29f55332015-01-24 01:25:54 +00001626 case AArch64::LDRSWui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001627 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001628 case AArch64::STURSi:
1629 case AArch64::STURDi:
1630 case AArch64::STURQi:
1631 case AArch64::STURWi:
1632 case AArch64::STURXi:
1633 case AArch64::LDURSi:
1634 case AArch64::LDURDi:
1635 case AArch64::LDURQi:
1636 case AArch64::LDURWi:
Quentin Colombet29f55332015-01-24 01:25:54 +00001637 case AArch64::LDURXi:
1638 case AArch64::LDURSWi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001639 if (tryToMergeLdStInst(MBBI)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001640 Modified = true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001641 break;
1642 }
1643 ++MBBI;
1644 break;
1645 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001646 }
1647 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001648 // 4) Find base register updates that can be merged into the load or store
1649 // as a base-reg writeback.
1650 // e.g.,
1651 // ldr x0, [x2]
1652 // add x2, x2, #4
1653 // ; becomes
1654 // ldr x0, [x2], #4
Tim Northover3b0846e2014-05-24 12:50:23 +00001655 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1656 MBBI != E;) {
1657 MachineInstr *MI = MBBI;
1658 // Do update merging. It's simpler to keep this separate from the above
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001659 // switchs, though not strictly necessary.
Matthias Braunfa3872e2015-05-18 20:27:55 +00001660 unsigned Opc = MI->getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +00001661 switch (Opc) {
1662 default:
1663 // Just move on to the next instruction.
1664 ++MBBI;
1665 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001666 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001667 case AArch64::STRSui:
1668 case AArch64::STRDui:
1669 case AArch64::STRQui:
1670 case AArch64::STRXui:
1671 case AArch64::STRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001672 case AArch64::STRHHui:
1673 case AArch64::STRBBui:
Tim Northover3b0846e2014-05-24 12:50:23 +00001674 case AArch64::LDRSui:
1675 case AArch64::LDRDui:
1676 case AArch64::LDRQui:
1677 case AArch64::LDRXui:
1678 case AArch64::LDRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001679 case AArch64::LDRHHui:
1680 case AArch64::LDRBBui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001681 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001682 case AArch64::STURSi:
1683 case AArch64::STURDi:
1684 case AArch64::STURQi:
1685 case AArch64::STURWi:
1686 case AArch64::STURXi:
1687 case AArch64::LDURSi:
1688 case AArch64::LDURDi:
1689 case AArch64::LDURQi:
1690 case AArch64::LDURWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001691 case AArch64::LDURXi:
1692 // Paired instructions.
1693 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +00001694 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001695 case AArch64::LDPDi:
1696 case AArch64::LDPQi:
1697 case AArch64::LDPWi:
1698 case AArch64::LDPXi:
1699 case AArch64::STPSi:
1700 case AArch64::STPDi:
1701 case AArch64::STPQi:
1702 case AArch64::STPWi:
1703 case AArch64::STPXi: {
Tim Northover3b0846e2014-05-24 12:50:23 +00001704 // Make sure this is a reg+imm (as opposed to an address reloc).
Chad Rosierf77e9092015-08-06 15:50:12 +00001705 if (!getLdStOffsetOp(MI).isImm()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001706 ++MBBI;
1707 break;
1708 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001709 // Look forward to try to form a post-index instruction. For example,
1710 // ldr x0, [x20]
1711 // add x20, x20, #32
1712 // merged into:
1713 // ldr x0, [x20], #32
Tim Northover3b0846e2014-05-24 12:50:23 +00001714 MachineBasicBlock::iterator Update =
Chad Rosier234bf6f2016-01-18 21:56:40 +00001715 findMatchingUpdateInsnForward(MBBI, 0);
Tim Northover3b0846e2014-05-24 12:50:23 +00001716 if (Update != E) {
1717 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001718 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001719 Modified = true;
1720 ++NumPostFolded;
1721 break;
1722 }
1723 // Don't know how to handle pre/post-index versions, so move to the next
1724 // instruction.
Chad Rosier22eb7102015-08-06 17:37:18 +00001725 if (isUnscaledLdSt(Opc)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001726 ++MBBI;
1727 break;
1728 }
1729
1730 // Look back to try to find a pre-index instruction. For example,
1731 // add x0, x0, #8
1732 // ldr x1, [x0]
1733 // merged into:
1734 // ldr x1, [x0, #8]!
Chad Rosier234bf6f2016-01-18 21:56:40 +00001735 Update = findMatchingUpdateInsnBackward(MBBI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001736 if (Update != E) {
1737 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001738 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001739 Modified = true;
1740 ++NumPreFolded;
1741 break;
1742 }
Chad Rosier7a83d772015-10-01 13:09:44 +00001743 // The immediate in the load/store is scaled by the size of the memory
1744 // operation. The immediate in the add we're looking for,
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001745 // however, is not, so adjust here.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001746 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001747
Tim Northover3b0846e2014-05-24 12:50:23 +00001748 // Look forward to try to find a post-index instruction. For example,
1749 // ldr x1, [x0, #64]
1750 // add x0, x0, #64
1751 // merged into:
1752 // ldr x1, [x0, #64]!
Chad Rosier234bf6f2016-01-18 21:56:40 +00001753 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset);
Tim Northover3b0846e2014-05-24 12:50:23 +00001754 if (Update != E) {
1755 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001756 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001757 Modified = true;
1758 ++NumPreFolded;
1759 break;
1760 }
1761
1762 // Nothing found. Just move to the next instruction.
1763 ++MBBI;
1764 break;
1765 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001766 }
1767 }
1768
1769 return Modified;
1770}
1771
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001772bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001773 bool ProfitableArch = Subtarget->isCortexA57();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001774 // FIXME: The benefit from converting narrow loads into a wider load could be
1775 // microarchitectural as it assumes that a single load with two bitfield
1776 // extracts is cheaper than two narrow loads. Currently, this conversion is
1777 // enabled only in cortex-a57 on which performance benefits were verified.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001778 return ProfitableArch && !Subtarget->requiresStrictAlign();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001779}
1780
Tim Northover3b0846e2014-05-24 12:50:23 +00001781bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
Oliver Stannardd414c992015-11-10 11:04:18 +00001782 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1783 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1784 TRI = Subtarget->getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001785
Chad Rosierbba881e2016-02-02 15:02:30 +00001786 // Resize the modified and used register bitfield trackers. We do this once
1787 // per function and then clear the bitfield each time we optimize a load or
1788 // store.
1789 ModifiedRegs.resize(TRI->getNumRegs());
1790 UsedRegs.resize(TRI->getNumRegs());
1791
Tim Northover3b0846e2014-05-24 12:50:23 +00001792 bool Modified = false;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001793 bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
Tim Northover3b0846e2014-05-24 12:50:23 +00001794 for (auto &MBB : Fn)
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001795 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001796
1797 return Modified;
1798}
1799
1800// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1801// loads and stores near one another?
1802
Chad Rosier43f5c842015-08-05 12:40:13 +00001803/// createAArch64LoadStoreOptimizationPass - returns an instance of the
1804/// load / store optimization pass.
Tim Northover3b0846e2014-05-24 12:50:23 +00001805FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1806 return new AArch64LoadStoreOpt();
1807}