blob: b566a7cf878ba5d131497875457cb194666cbb4f [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that performs load / store related peephole
11// optimizations. This pass should be run after register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AArch64InstrInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000016#include "AArch64Subtarget.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000017#include "MCTargetDesc/AArch64AddressingModes.h"
18#include "llvm/ADT/BitVector.h"
Chad Rosierce8e5ab2015-05-21 21:36:46 +000019#include "llvm/ADT/SmallVector.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000020#include "llvm/ADT/Statistic.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000021#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/ErrorHandling.h"
28#include "llvm/Support/raw_ostream.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000029#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Target/TargetRegisterInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000032using namespace llvm;
33
34#define DEBUG_TYPE "aarch64-ldst-opt"
35
36/// AArch64AllocLoadStoreOpt - Post-register allocation pass to combine
37/// load / store instructions to form ldp / stp instructions.
38
39STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
40STATISTIC(NumPostFolded, "Number of post-index updates folded");
41STATISTIC(NumPreFolded, "Number of pre-index updates folded");
42STATISTIC(NumUnscaledPairCreated,
43 "Number of load/store from unscaled generated");
Jun Bum Limc12c2792015-11-19 18:41:27 +000044STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
Jun Bum Lim80ec0d32015-11-20 21:14:07 +000045STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
Jun Bum Lim6755c3b2015-12-22 16:36:16 +000046STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
Tim Northover3b0846e2014-05-24 12:50:23 +000047
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +000048static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
49 cl::init(20), cl::Hidden);
Tim Northover3b0846e2014-05-24 12:50:23 +000050
Chad Rosier96530b32015-08-05 13:44:51 +000051namespace llvm {
52void initializeAArch64LoadStoreOptPass(PassRegistry &);
53}
54
55#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
56
Tim Northover3b0846e2014-05-24 12:50:23 +000057namespace {
Chad Rosier96a18a92015-07-21 17:42:04 +000058
59typedef struct LdStPairFlags {
60 // If a matching instruction is found, MergeForward is set to true if the
61 // merge is to remove the first instruction and replace the second with
62 // a pair-wise insn, and false if the reverse is true.
63 bool MergeForward;
64
65 // SExtIdx gives the index of the result of the load pair that must be
66 // extended. The value of SExtIdx assumes that the paired load produces the
67 // value in this order: (I, returned iterator), i.e., -1 means no value has
68 // to be extended, 0 means I, and 1 means the returned iterator.
69 int SExtIdx;
70
71 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
72
73 void setMergeForward(bool V = true) { MergeForward = V; }
74 bool getMergeForward() const { return MergeForward; }
75
76 void setSExtIdx(int V) { SExtIdx = V; }
77 int getSExtIdx() const { return SExtIdx; }
78
79} LdStPairFlags;
80
Tim Northover3b0846e2014-05-24 12:50:23 +000081struct AArch64LoadStoreOpt : public MachineFunctionPass {
82 static char ID;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +000083 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
Chad Rosier96530b32015-08-05 13:44:51 +000084 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
85 }
Tim Northover3b0846e2014-05-24 12:50:23 +000086
87 const AArch64InstrInfo *TII;
88 const TargetRegisterInfo *TRI;
Oliver Stannardd414c992015-11-10 11:04:18 +000089 const AArch64Subtarget *Subtarget;
Tim Northover3b0846e2014-05-24 12:50:23 +000090
Chad Rosierbba881e2016-02-02 15:02:30 +000091 // Track which registers have been modified and used.
92 BitVector ModifiedRegs, UsedRegs;
93
Tim Northover3b0846e2014-05-24 12:50:23 +000094 // Scan the instructions looking for a load/store that can be combined
95 // with the current instruction into a load/store pair.
96 // Return the matching instruction if one is found, else MBB->end().
Tim Northover3b0846e2014-05-24 12:50:23 +000097 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +000098 LdStPairFlags &Flags,
Tim Northover3b0846e2014-05-24 12:50:23 +000099 unsigned Limit);
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000100
101 // Scan the instructions looking for a store that writes to the address from
102 // which the current load instruction reads. Return true if one is found.
103 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
104 MachineBasicBlock::iterator &StoreI);
105
Tim Northover3b0846e2014-05-24 12:50:23 +0000106 // Merge the two instructions indicated into a single pair-wise instruction.
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000107 // If MergeForward is true, erase the first instruction and fold its
Tim Northover3b0846e2014-05-24 12:50:23 +0000108 // operation into the second. If false, the reverse. Return the instruction
109 // following the first instruction (which may change during processing).
110 MachineBasicBlock::iterator
111 mergePairedInsns(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000112 MachineBasicBlock::iterator Paired,
Chad Rosierfe5399f2015-07-21 17:47:56 +0000113 const LdStPairFlags &Flags);
Tim Northover3b0846e2014-05-24 12:50:23 +0000114
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000115 // Promote the load that reads directly from the address stored to.
116 MachineBasicBlock::iterator
117 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
118 MachineBasicBlock::iterator StoreI);
119
Tim Northover3b0846e2014-05-24 12:50:23 +0000120 // Scan the instruction list to find a base register update that can
121 // be combined with the current instruction (a load or store) using
122 // pre or post indexed addressing with writeback. Scan forwards.
123 MachineBasicBlock::iterator
Chad Rosier234bf6f2016-01-18 21:56:40 +0000124 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
Chad Rosier0b15e7c2015-10-01 13:33:31 +0000125 int UnscaledOffset);
Tim Northover3b0846e2014-05-24 12:50:23 +0000126
127 // Scan the instruction list to find a base register update that can
128 // be combined with the current instruction (a load or store) using
129 // pre or post indexed addressing with writeback. Scan backwards.
130 MachineBasicBlock::iterator
Chad Rosier234bf6f2016-01-18 21:56:40 +0000131 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000132
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000133 // Find an instruction that updates the base register of the ld/st
134 // instruction.
135 bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
136 unsigned BaseReg, int Offset);
137
Chad Rosier2dfd3542015-09-23 13:51:44 +0000138 // Merge a pre- or post-index base register update into a ld/st instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000139 MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +0000140 mergeUpdateInsn(MachineBasicBlock::iterator I,
141 MachineBasicBlock::iterator Update, bool IsPreIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000142
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000143 // Find and merge foldable ldr/str instructions.
144 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
145
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000146 // Find and promote load instructions which read directly from store.
147 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
148
Jun Bum Lim22fe15e2015-11-06 16:27:47 +0000149 // Check if converting two narrow loads into a single wider load with
150 // bitfield extracts could be enabled.
151 bool enableNarrowLdMerge(MachineFunction &Fn);
152
153 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000154
155 bool runOnMachineFunction(MachineFunction &Fn) override;
156
157 const char *getPassName() const override {
Chad Rosier96530b32015-08-05 13:44:51 +0000158 return AARCH64_LOAD_STORE_OPT_NAME;
Tim Northover3b0846e2014-05-24 12:50:23 +0000159 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000160};
161char AArch64LoadStoreOpt::ID = 0;
Jim Grosbach1eee3df2014-08-11 22:42:31 +0000162} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +0000163
Chad Rosier96530b32015-08-05 13:44:51 +0000164INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
165 AARCH64_LOAD_STORE_OPT_NAME, false, false)
166
Chad Rosier22eb7102015-08-06 17:37:18 +0000167static bool isUnscaledLdSt(unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000168 switch (Opc) {
169 default:
170 return false;
171 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000172 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000173 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000174 case AArch64::STURBBi:
175 case AArch64::STURHHi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000176 case AArch64::STURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000177 case AArch64::STURXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000178 case AArch64::LDURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000179 case AArch64::LDURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000180 case AArch64::LDURQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000181 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000182 case AArch64::LDURXi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000183 case AArch64::LDURSWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000184 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000185 case AArch64::LDURBBi:
186 case AArch64::LDURSBWi:
187 case AArch64::LDURSHWi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000188 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000189 }
190}
191
Chad Rosier22eb7102015-08-06 17:37:18 +0000192static bool isUnscaledLdSt(MachineInstr *MI) {
193 return isUnscaledLdSt(MI->getOpcode());
194}
195
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000196static unsigned getBitExtrOpcode(MachineInstr *MI) {
197 switch (MI->getOpcode()) {
198 default:
199 llvm_unreachable("Unexpected opcode.");
200 case AArch64::LDRBBui:
201 case AArch64::LDURBBi:
202 case AArch64::LDRHHui:
203 case AArch64::LDURHHi:
204 return AArch64::UBFMWri;
205 case AArch64::LDRSBWui:
206 case AArch64::LDURSBWi:
207 case AArch64::LDRSHWui:
208 case AArch64::LDURSHWi:
209 return AArch64::SBFMWri;
210 }
211}
212
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000213static bool isNarrowStore(unsigned Opc) {
214 switch (Opc) {
215 default:
216 return false;
217 case AArch64::STRBBui:
218 case AArch64::STURBBi:
219 case AArch64::STRHHui:
220 case AArch64::STURHHi:
221 return true;
222 }
223}
224
225static bool isNarrowStore(MachineInstr *MI) {
226 return isNarrowStore(MI->getOpcode());
227}
228
Jun Bum Limc12c2792015-11-19 18:41:27 +0000229static bool isNarrowLoad(unsigned Opc) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000230 switch (Opc) {
231 default:
232 return false;
233 case AArch64::LDRHHui:
234 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000235 case AArch64::LDRBBui:
236 case AArch64::LDURBBi:
237 case AArch64::LDRSHWui:
238 case AArch64::LDURSHWi:
239 case AArch64::LDRSBWui:
240 case AArch64::LDURSBWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000241 return true;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000242 }
243}
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000244
Jun Bum Limc12c2792015-11-19 18:41:27 +0000245static bool isNarrowLoad(MachineInstr *MI) {
246 return isNarrowLoad(MI->getOpcode());
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000247}
248
Chad Rosier32d4d372015-09-29 16:07:32 +0000249// Scaling factor for unscaled load or store.
250static int getMemScale(MachineInstr *MI) {
Chad Rosier22eb7102015-08-06 17:37:18 +0000251 switch (MI->getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000252 default:
Chad Rosierdabe2532015-09-29 18:26:15 +0000253 llvm_unreachable("Opcode has unknown scale!");
254 case AArch64::LDRBBui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000255 case AArch64::LDURBBi:
256 case AArch64::LDRSBWui:
257 case AArch64::LDURSBWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000258 case AArch64::STRBBui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000259 case AArch64::STURBBi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000260 return 1;
261 case AArch64::LDRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000262 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000263 case AArch64::LDRSHWui:
264 case AArch64::LDURSHWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000265 case AArch64::STRHHui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000266 case AArch64::STURHHi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000267 return 2;
Chad Rosiera4d32172015-09-29 14:57:10 +0000268 case AArch64::LDRSui:
269 case AArch64::LDURSi:
270 case AArch64::LDRSWui:
271 case AArch64::LDURSWi:
272 case AArch64::LDRWui:
273 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000274 case AArch64::STRSui:
275 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000276 case AArch64::STRWui:
277 case AArch64::STURWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000278 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000279 case AArch64::LDPSWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000280 case AArch64::LDPWi:
281 case AArch64::STPSi:
282 case AArch64::STPWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000283 return 4;
Chad Rosiera4d32172015-09-29 14:57:10 +0000284 case AArch64::LDRDui:
285 case AArch64::LDURDi:
286 case AArch64::LDRXui:
287 case AArch64::LDURXi:
288 case AArch64::STRDui:
289 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000290 case AArch64::STRXui:
291 case AArch64::STURXi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000292 case AArch64::LDPDi:
293 case AArch64::LDPXi:
294 case AArch64::STPDi:
295 case AArch64::STPXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000296 return 8;
Tim Northover3b0846e2014-05-24 12:50:23 +0000297 case AArch64::LDRQui:
298 case AArch64::LDURQi:
Chad Rosiera4d32172015-09-29 14:57:10 +0000299 case AArch64::STRQui:
300 case AArch64::STURQi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000301 case AArch64::LDPQi:
302 case AArch64::STPQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000303 return 16;
Tim Northover3b0846e2014-05-24 12:50:23 +0000304 }
305}
306
Quentin Colombet66b61632015-03-06 22:42:10 +0000307static unsigned getMatchingNonSExtOpcode(unsigned Opc,
308 bool *IsValidLdStrOpc = nullptr) {
309 if (IsValidLdStrOpc)
310 *IsValidLdStrOpc = true;
311 switch (Opc) {
312 default:
313 if (IsValidLdStrOpc)
314 *IsValidLdStrOpc = false;
315 return UINT_MAX;
316 case AArch64::STRDui:
317 case AArch64::STURDi:
318 case AArch64::STRQui:
319 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000320 case AArch64::STRBBui:
321 case AArch64::STURBBi:
322 case AArch64::STRHHui:
323 case AArch64::STURHHi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000324 case AArch64::STRWui:
325 case AArch64::STURWi:
326 case AArch64::STRXui:
327 case AArch64::STURXi:
328 case AArch64::LDRDui:
329 case AArch64::LDURDi:
330 case AArch64::LDRQui:
331 case AArch64::LDURQi:
332 case AArch64::LDRWui:
333 case AArch64::LDURWi:
334 case AArch64::LDRXui:
335 case AArch64::LDURXi:
336 case AArch64::STRSui:
337 case AArch64::STURSi:
338 case AArch64::LDRSui:
339 case AArch64::LDURSi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000340 case AArch64::LDRHHui:
341 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000342 case AArch64::LDRBBui:
343 case AArch64::LDURBBi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000344 return Opc;
345 case AArch64::LDRSWui:
346 return AArch64::LDRWui;
347 case AArch64::LDURSWi:
348 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000349 case AArch64::LDRSBWui:
350 return AArch64::LDRBBui;
351 case AArch64::LDRSHWui:
352 return AArch64::LDRHHui;
353 case AArch64::LDURSBWi:
354 return AArch64::LDURBBi;
355 case AArch64::LDURSHWi:
356 return AArch64::LDURHHi;
Quentin Colombet66b61632015-03-06 22:42:10 +0000357 }
358}
359
Tim Northover3b0846e2014-05-24 12:50:23 +0000360static unsigned getMatchingPairOpcode(unsigned Opc) {
361 switch (Opc) {
362 default:
363 llvm_unreachable("Opcode has no pairwise equivalent!");
364 case AArch64::STRSui:
365 case AArch64::STURSi:
366 return AArch64::STPSi;
367 case AArch64::STRDui:
368 case AArch64::STURDi:
369 return AArch64::STPDi;
370 case AArch64::STRQui:
371 case AArch64::STURQi:
372 return AArch64::STPQi;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000373 case AArch64::STRBBui:
374 return AArch64::STRHHui;
375 case AArch64::STRHHui:
376 return AArch64::STRWui;
377 case AArch64::STURBBi:
378 return AArch64::STURHHi;
379 case AArch64::STURHHi:
380 return AArch64::STURWi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000381 case AArch64::STRWui:
382 case AArch64::STURWi:
383 return AArch64::STPWi;
384 case AArch64::STRXui:
385 case AArch64::STURXi:
386 return AArch64::STPXi;
387 case AArch64::LDRSui:
388 case AArch64::LDURSi:
389 return AArch64::LDPSi;
390 case AArch64::LDRDui:
391 case AArch64::LDURDi:
392 return AArch64::LDPDi;
393 case AArch64::LDRQui:
394 case AArch64::LDURQi:
395 return AArch64::LDPQi;
396 case AArch64::LDRWui:
397 case AArch64::LDURWi:
398 return AArch64::LDPWi;
399 case AArch64::LDRXui:
400 case AArch64::LDURXi:
401 return AArch64::LDPXi;
Quentin Colombet29f55332015-01-24 01:25:54 +0000402 case AArch64::LDRSWui:
403 case AArch64::LDURSWi:
404 return AArch64::LDPSWi;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000405 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000406 case AArch64::LDRSHWui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000407 return AArch64::LDRWui;
408 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000409 case AArch64::LDURSHWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000410 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000411 case AArch64::LDRBBui:
412 case AArch64::LDRSBWui:
413 return AArch64::LDRHHui;
414 case AArch64::LDURBBi:
415 case AArch64::LDURSBWi:
416 return AArch64::LDURHHi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000417 }
418}
419
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000420static unsigned isMatchingStore(MachineInstr *LoadInst,
421 MachineInstr *StoreInst) {
422 unsigned LdOpc = LoadInst->getOpcode();
423 unsigned StOpc = StoreInst->getOpcode();
424 switch (LdOpc) {
425 default:
426 llvm_unreachable("Unsupported load instruction!");
427 case AArch64::LDRBBui:
428 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
429 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
430 case AArch64::LDURBBi:
431 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
432 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
433 case AArch64::LDRHHui:
434 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
435 StOpc == AArch64::STRXui;
436 case AArch64::LDURHHi:
437 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
438 StOpc == AArch64::STURXi;
439 case AArch64::LDRWui:
440 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
441 case AArch64::LDURWi:
442 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
443 case AArch64::LDRXui:
444 return StOpc == AArch64::STRXui;
445 case AArch64::LDURXi:
446 return StOpc == AArch64::STURXi;
447 }
448}
449
Tim Northover3b0846e2014-05-24 12:50:23 +0000450static unsigned getPreIndexedOpcode(unsigned Opc) {
451 switch (Opc) {
452 default:
453 llvm_unreachable("Opcode has no pre-indexed equivalent!");
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000454 case AArch64::STRSui:
455 return AArch64::STRSpre;
456 case AArch64::STRDui:
457 return AArch64::STRDpre;
458 case AArch64::STRQui:
459 return AArch64::STRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000460 case AArch64::STRBBui:
461 return AArch64::STRBBpre;
462 case AArch64::STRHHui:
463 return AArch64::STRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000464 case AArch64::STRWui:
465 return AArch64::STRWpre;
466 case AArch64::STRXui:
467 return AArch64::STRXpre;
468 case AArch64::LDRSui:
469 return AArch64::LDRSpre;
470 case AArch64::LDRDui:
471 return AArch64::LDRDpre;
472 case AArch64::LDRQui:
473 return AArch64::LDRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000474 case AArch64::LDRBBui:
475 return AArch64::LDRBBpre;
476 case AArch64::LDRHHui:
477 return AArch64::LDRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000478 case AArch64::LDRWui:
479 return AArch64::LDRWpre;
480 case AArch64::LDRXui:
481 return AArch64::LDRXpre;
Quentin Colombet29f55332015-01-24 01:25:54 +0000482 case AArch64::LDRSWui:
483 return AArch64::LDRSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000484 case AArch64::LDPSi:
485 return AArch64::LDPSpre;
Chad Rosier43150122015-09-29 20:39:55 +0000486 case AArch64::LDPSWi:
487 return AArch64::LDPSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000488 case AArch64::LDPDi:
489 return AArch64::LDPDpre;
490 case AArch64::LDPQi:
491 return AArch64::LDPQpre;
492 case AArch64::LDPWi:
493 return AArch64::LDPWpre;
494 case AArch64::LDPXi:
495 return AArch64::LDPXpre;
496 case AArch64::STPSi:
497 return AArch64::STPSpre;
498 case AArch64::STPDi:
499 return AArch64::STPDpre;
500 case AArch64::STPQi:
501 return AArch64::STPQpre;
502 case AArch64::STPWi:
503 return AArch64::STPWpre;
504 case AArch64::STPXi:
505 return AArch64::STPXpre;
Tim Northover3b0846e2014-05-24 12:50:23 +0000506 }
507}
508
509static unsigned getPostIndexedOpcode(unsigned Opc) {
510 switch (Opc) {
511 default:
512 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
513 case AArch64::STRSui:
514 return AArch64::STRSpost;
515 case AArch64::STRDui:
516 return AArch64::STRDpost;
517 case AArch64::STRQui:
518 return AArch64::STRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000519 case AArch64::STRBBui:
520 return AArch64::STRBBpost;
521 case AArch64::STRHHui:
522 return AArch64::STRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000523 case AArch64::STRWui:
524 return AArch64::STRWpost;
525 case AArch64::STRXui:
526 return AArch64::STRXpost;
527 case AArch64::LDRSui:
528 return AArch64::LDRSpost;
529 case AArch64::LDRDui:
530 return AArch64::LDRDpost;
531 case AArch64::LDRQui:
532 return AArch64::LDRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000533 case AArch64::LDRBBui:
534 return AArch64::LDRBBpost;
535 case AArch64::LDRHHui:
536 return AArch64::LDRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000537 case AArch64::LDRWui:
538 return AArch64::LDRWpost;
539 case AArch64::LDRXui:
540 return AArch64::LDRXpost;
Quentin Colombet29f55332015-01-24 01:25:54 +0000541 case AArch64::LDRSWui:
542 return AArch64::LDRSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000543 case AArch64::LDPSi:
544 return AArch64::LDPSpost;
Chad Rosier43150122015-09-29 20:39:55 +0000545 case AArch64::LDPSWi:
546 return AArch64::LDPSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000547 case AArch64::LDPDi:
548 return AArch64::LDPDpost;
549 case AArch64::LDPQi:
550 return AArch64::LDPQpost;
551 case AArch64::LDPWi:
552 return AArch64::LDPWpost;
553 case AArch64::LDPXi:
554 return AArch64::LDPXpost;
555 case AArch64::STPSi:
556 return AArch64::STPSpost;
557 case AArch64::STPDi:
558 return AArch64::STPDpost;
559 case AArch64::STPQi:
560 return AArch64::STPQpost;
561 case AArch64::STPWi:
562 return AArch64::STPWpost;
563 case AArch64::STPXi:
564 return AArch64::STPXpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000565 }
566}
567
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000568static bool isPairedLdSt(const MachineInstr *MI) {
569 switch (MI->getOpcode()) {
570 default:
571 return false;
572 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000573 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000574 case AArch64::LDPDi:
575 case AArch64::LDPQi:
576 case AArch64::LDPWi:
577 case AArch64::LDPXi:
578 case AArch64::STPSi:
579 case AArch64::STPDi:
580 case AArch64::STPQi:
581 case AArch64::STPWi:
582 case AArch64::STPXi:
583 return true;
584 }
585}
586
587static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
588 unsigned PairedRegOp = 0) {
589 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
590 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
591 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000592}
593
594static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000595 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
596 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000597}
598
599static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000600 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
601 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000602}
603
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000604static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
605 MachineInstr *StoreInst) {
606 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
607 int LoadSize = getMemScale(LoadInst);
608 int StoreSize = getMemScale(StoreInst);
609 int UnscaledStOffset = isUnscaledLdSt(StoreInst)
610 ? getLdStOffsetOp(StoreInst).getImm()
611 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
612 int UnscaledLdOffset = isUnscaledLdSt(LoadInst)
613 ? getLdStOffsetOp(LoadInst).getImm()
614 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
615 return (UnscaledStOffset <= UnscaledLdOffset) &&
616 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
617}
618
Tim Northover3b0846e2014-05-24 12:50:23 +0000619MachineBasicBlock::iterator
620AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
621 MachineBasicBlock::iterator Paired,
Chad Rosier96a18a92015-07-21 17:42:04 +0000622 const LdStPairFlags &Flags) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000623 MachineBasicBlock::iterator NextI = I;
624 ++NextI;
625 // If NextI is the second of the two instructions to be merged, we need
626 // to skip one further. Either way we merge will invalidate the iterator,
627 // and we don't need to scan the new instruction, as it's a pairwise
628 // instruction, which we're not considering for further action anyway.
629 if (NextI == Paired)
630 ++NextI;
631
Chad Rosier96a18a92015-07-21 17:42:04 +0000632 int SExtIdx = Flags.getSExtIdx();
Quentin Colombet66b61632015-03-06 22:42:10 +0000633 unsigned Opc =
634 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
Chad Rosier22eb7102015-08-06 17:37:18 +0000635 bool IsUnscaled = isUnscaledLdSt(Opc);
Chad Rosierf11d0402015-10-01 18:17:12 +0000636 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
Tim Northover3b0846e2014-05-24 12:50:23 +0000637
Chad Rosier96a18a92015-07-21 17:42:04 +0000638 bool MergeForward = Flags.getMergeForward();
Quentin Colombet66b61632015-03-06 22:42:10 +0000639 unsigned NewOpc = getMatchingPairOpcode(Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000640 // Insert our new paired instruction after whichever of the paired
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000641 // instructions MergeForward indicates.
642 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
643 // Also based on MergeForward is from where we copy the base register operand
Tim Northover3b0846e2014-05-24 12:50:23 +0000644 // so we get the flags compatible with the input code.
Chad Rosierf77e9092015-08-06 15:50:12 +0000645 const MachineOperand &BaseRegOp =
646 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000647
Chad Rosier05f80202016-02-04 18:59:49 +0000648 int Offset = getLdStOffsetOp(I).getImm();
649 int PairedOffset = getLdStOffsetOp(Paired).getImm();
650 bool PairedIsUnscaled = isUnscaledLdSt(Paired->getOpcode());
651
652 // We're trying to pair instructions that differ in how they are scaled.
653 // If I is scaled then scale the offset of Paired accordingly.
654 // Otherwise, do the opposite (i.e., make Paired's offset unscaled).
655 if (IsUnscaled != PairedIsUnscaled) {
656 int MemSize = getMemScale(Paired);
657 assert(!(PairedOffset % getMemScale(Paired)) &&
658 "Offset should be a multiple of the stride!");
659 PairedOffset =
660 PairedIsUnscaled ? PairedOffset / MemSize : PairedOffset * MemSize;
661 }
662
Tim Northover3b0846e2014-05-24 12:50:23 +0000663 // Which register is Rt and which is Rt2 depends on the offset order.
664 MachineInstr *RtMI, *Rt2MI;
Chad Rosier05f80202016-02-04 18:59:49 +0000665 if (Offset == PairedOffset + OffsetStride) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000666 RtMI = Paired;
667 Rt2MI = I;
Quentin Colombet66b61632015-03-06 22:42:10 +0000668 // Here we swapped the assumption made for SExtIdx.
669 // I.e., we turn ldp I, Paired into ldp Paired, I.
670 // Update the index accordingly.
671 if (SExtIdx != -1)
672 SExtIdx = (SExtIdx + 1) % 2;
Tim Northover3b0846e2014-05-24 12:50:23 +0000673 } else {
674 RtMI = I;
675 Rt2MI = Paired;
676 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000677
James Molloy5b18b4c2015-10-23 10:41:38 +0000678 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000679
Jun Bum Limc12c2792015-11-19 18:41:27 +0000680 if (isNarrowLoad(Opc)) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000681 // Change the scaled offset from small to large type.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000682 if (!IsUnscaled) {
683 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000684 OffsetImm /= 2;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000685 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000686 MachineInstr *RtNewDest = MergeForward ? I : Paired;
Oliver Stannardd414c992015-11-10 11:04:18 +0000687 // When merging small (< 32 bit) loads for big-endian targets, the order of
688 // the component parts gets swapped.
689 if (!Subtarget->isLittleEndian())
690 std::swap(RtMI, Rt2MI);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000691 // Construct the new load instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000692 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
693 NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
694 TII->get(NewOpc))
695 .addOperand(getLdStRegOp(RtNewDest))
696 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000697 .addImm(OffsetImm)
698 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000699
700 DEBUG(
701 dbgs()
702 << "Creating the new load and extract. Replacing instructions:\n ");
703 DEBUG(I->print(dbgs()));
704 DEBUG(dbgs() << " ");
705 DEBUG(Paired->print(dbgs()));
706 DEBUG(dbgs() << " with instructions:\n ");
707 DEBUG((NewMemMI)->print(dbgs()));
708
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000709 int Width = getMemScale(I) == 1 ? 8 : 16;
710 int LSBLow = 0;
711 int LSBHigh = Width;
712 int ImmsLow = LSBLow + Width - 1;
713 int ImmsHigh = LSBHigh + Width - 1;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000714 MachineInstr *ExtDestMI = MergeForward ? Paired : I;
Oliver Stannardd414c992015-11-10 11:04:18 +0000715 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000716 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000717 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000718 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000719 .addOperand(getLdStRegOp(Rt2MI))
720 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000721 .addImm(LSBHigh)
722 .addImm(ImmsHigh);
723 // Create the bitfield extract for low bits.
724 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
725 // For unsigned, prefer to use AND for low bits.
726 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
727 TII->get(AArch64::ANDWri))
728 .addOperand(getLdStRegOp(RtMI))
729 .addReg(getLdStRegOp(RtNewDest).getReg())
730 .addImm(ImmsLow);
731 } else {
732 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
733 TII->get(getBitExtrOpcode(RtMI)))
734 .addOperand(getLdStRegOp(RtMI))
735 .addReg(getLdStRegOp(RtNewDest).getReg())
736 .addImm(LSBLow)
737 .addImm(ImmsLow);
738 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000739 } else {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000740 // Create the bitfield extract for low bits.
741 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
742 // For unsigned, prefer to use AND for low bits.
743 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
744 TII->get(AArch64::ANDWri))
745 .addOperand(getLdStRegOp(RtMI))
746 .addReg(getLdStRegOp(RtNewDest).getReg())
747 .addImm(ImmsLow);
748 } else {
749 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
750 TII->get(getBitExtrOpcode(RtMI)))
751 .addOperand(getLdStRegOp(RtMI))
752 .addReg(getLdStRegOp(RtNewDest).getReg())
753 .addImm(LSBLow)
754 .addImm(ImmsLow);
755 }
756
757 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000758 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000759 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000760 .addOperand(getLdStRegOp(Rt2MI))
761 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000762 .addImm(LSBHigh)
763 .addImm(ImmsHigh);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000764 }
765 DEBUG(dbgs() << " ");
766 DEBUG((BitExtMI1)->print(dbgs()));
767 DEBUG(dbgs() << " ");
768 DEBUG((BitExtMI2)->print(dbgs()));
769 DEBUG(dbgs() << "\n");
770
771 // Erase the old instructions.
772 I->eraseFromParent();
773 Paired->eraseFromParent();
774 return NextI;
775 }
776
Tim Northover3b0846e2014-05-24 12:50:23 +0000777 // Construct the new instruction.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000778 MachineInstrBuilder MIB;
779 if (isNarrowStore(Opc)) {
780 // Change the scaled offset from small to large type.
781 if (!IsUnscaled) {
782 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
783 OffsetImm /= 2;
784 }
785 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
786 TII->get(NewOpc))
787 .addOperand(getLdStRegOp(I))
788 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000789 .addImm(OffsetImm)
790 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000791 } else {
Chad Rosier05f80202016-02-04 18:59:49 +0000792 // Scale the immediate offset, if necessary.
793 if (isUnscaledLdSt(RtMI->getOpcode())) {
794 assert(!(OffsetImm % getMemScale(RtMI)) &&
795 "Offset should be a multiple of the stride!");
796 OffsetImm /= getMemScale(RtMI);
797 }
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000798 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
799 TII->get(NewOpc))
800 .addOperand(getLdStRegOp(RtMI))
801 .addOperand(getLdStRegOp(Rt2MI))
802 .addOperand(BaseRegOp)
803 .addImm(OffsetImm);
804 }
805
Tim Northover3b0846e2014-05-24 12:50:23 +0000806 (void)MIB;
807
808 // FIXME: Do we need/want to copy the mem operands from the source
809 // instructions? Probably. What uses them after this?
810
811 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
812 DEBUG(I->print(dbgs()));
813 DEBUG(dbgs() << " ");
814 DEBUG(Paired->print(dbgs()));
815 DEBUG(dbgs() << " with instruction:\n ");
Quentin Colombet66b61632015-03-06 22:42:10 +0000816
817 if (SExtIdx != -1) {
818 // Generate the sign extension for the proper result of the ldp.
819 // I.e., with X1, that would be:
820 // %W1<def> = KILL %W1, %X1<imp-def>
821 // %X1<def> = SBFMXri %X1<kill>, 0, 31
822 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
823 // Right now, DstMO has the extended register, since it comes from an
824 // extended opcode.
825 unsigned DstRegX = DstMO.getReg();
826 // Get the W variant of that register.
827 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
828 // Update the result of LDP to use the W instead of the X variant.
829 DstMO.setReg(DstRegW);
830 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
831 DEBUG(dbgs() << "\n");
832 // Make the machine verifier happy by providing a definition for
833 // the X register.
834 // Insert this definition right after the generated LDP, i.e., before
835 // InsertionPoint.
836 MachineInstrBuilder MIBKill =
837 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
838 TII->get(TargetOpcode::KILL), DstRegW)
839 .addReg(DstRegW)
840 .addReg(DstRegX, RegState::Define);
841 MIBKill->getOperand(2).setImplicit();
842 // Create the sign extension.
843 MachineInstrBuilder MIBSXTW =
844 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
845 TII->get(AArch64::SBFMXri), DstRegX)
846 .addReg(DstRegX)
847 .addImm(0)
848 .addImm(31);
849 (void)MIBSXTW;
850 DEBUG(dbgs() << " Extend operand:\n ");
851 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
852 DEBUG(dbgs() << "\n");
853 } else {
854 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
855 DEBUG(dbgs() << "\n");
856 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000857
858 // Erase the old instructions.
859 I->eraseFromParent();
860 Paired->eraseFromParent();
861
862 return NextI;
863}
864
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000865MachineBasicBlock::iterator
866AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
867 MachineBasicBlock::iterator StoreI) {
868 MachineBasicBlock::iterator NextI = LoadI;
869 ++NextI;
870
871 int LoadSize = getMemScale(LoadI);
872 int StoreSize = getMemScale(StoreI);
873 unsigned LdRt = getLdStRegOp(LoadI).getReg();
874 unsigned StRt = getLdStRegOp(StoreI).getReg();
875 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
876
877 assert((IsStoreXReg ||
878 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
879 "Unexpected RegClass");
880
881 MachineInstr *BitExtMI;
882 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
883 // Remove the load, if the destination register of the loads is the same
884 // register for stored value.
885 if (StRt == LdRt && LoadSize == 8) {
886 DEBUG(dbgs() << "Remove load instruction:\n ");
887 DEBUG(LoadI->print(dbgs()));
888 DEBUG(dbgs() << "\n");
889 LoadI->eraseFromParent();
890 return NextI;
891 }
892 // Replace the load with a mov if the load and store are in the same size.
893 BitExtMI =
894 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
895 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
896 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
897 .addReg(StRt)
898 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
899 } else {
900 // FIXME: Currently we disable this transformation in big-endian targets as
901 // performance and correctness are verified only in little-endian.
902 if (!Subtarget->isLittleEndian())
903 return NextI;
904 bool IsUnscaled = isUnscaledLdSt(LoadI);
905 assert(IsUnscaled == isUnscaledLdSt(StoreI) && "Unsupported ld/st match");
906 assert(LoadSize <= StoreSize && "Invalid load size");
907 int UnscaledLdOffset = IsUnscaled
908 ? getLdStOffsetOp(LoadI).getImm()
909 : getLdStOffsetOp(LoadI).getImm() * LoadSize;
910 int UnscaledStOffset = IsUnscaled
911 ? getLdStOffsetOp(StoreI).getImm()
912 : getLdStOffsetOp(StoreI).getImm() * StoreSize;
913 int Width = LoadSize * 8;
914 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
915 int Imms = Immr + Width - 1;
916 unsigned DestReg = IsStoreXReg
917 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
918 &AArch64::GPR64RegClass)
919 : LdRt;
920
921 assert((UnscaledLdOffset >= UnscaledStOffset &&
922 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
923 "Invalid offset");
924
925 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
926 Imms = Immr + Width - 1;
927 if (UnscaledLdOffset == UnscaledStOffset) {
928 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
929 | ((Immr) << 6) // immr
930 | ((Imms) << 0) // imms
931 ;
932
933 BitExtMI =
934 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
935 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
936 DestReg)
937 .addReg(StRt)
938 .addImm(AndMaskEncoded);
939 } else {
940 BitExtMI =
941 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
942 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
943 DestReg)
944 .addReg(StRt)
945 .addImm(Immr)
946 .addImm(Imms);
947 }
948 }
949
950 DEBUG(dbgs() << "Promoting load by replacing :\n ");
951 DEBUG(StoreI->print(dbgs()));
952 DEBUG(dbgs() << " ");
953 DEBUG(LoadI->print(dbgs()));
954 DEBUG(dbgs() << " with instructions:\n ");
955 DEBUG(StoreI->print(dbgs()));
956 DEBUG(dbgs() << " ");
957 DEBUG((BitExtMI)->print(dbgs()));
958 DEBUG(dbgs() << "\n");
959
960 // Erase the old instructions.
961 LoadI->eraseFromParent();
962 return NextI;
963}
964
Tim Northover3b0846e2014-05-24 12:50:23 +0000965/// trackRegDefsUses - Remember what registers the specified instruction uses
966/// and modifies.
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000967static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
Tim Northover3b0846e2014-05-24 12:50:23 +0000968 BitVector &UsedRegs,
969 const TargetRegisterInfo *TRI) {
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000970 for (const MachineOperand &MO : MI->operands()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000971 if (MO.isRegMask())
972 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
973
974 if (!MO.isReg())
975 continue;
976 unsigned Reg = MO.getReg();
977 if (MO.isDef()) {
978 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
979 ModifiedRegs.set(*AI);
980 } else {
981 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
982 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
983 UsedRegs.set(*AI);
984 }
985 }
986}
987
988static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
Chad Rosier3dd0e942015-08-18 16:20:03 +0000989 // Convert the byte-offset used by unscaled into an "element" offset used
990 // by the scaled pair load/store instructions.
Chad Rosier05f80202016-02-04 18:59:49 +0000991 if (IsUnscaled) {
992 // If the byte-offset isn't a multiple of the stride, there's no point
993 // trying to match it.
994 if (Offset % OffsetStride)
995 return false;
Chad Rosier3dd0e942015-08-18 16:20:03 +0000996 Offset /= OffsetStride;
Chad Rosier05f80202016-02-04 18:59:49 +0000997 }
Chad Rosier3dd0e942015-08-18 16:20:03 +0000998 return Offset <= 63 && Offset >= -64;
Tim Northover3b0846e2014-05-24 12:50:23 +0000999}
1000
1001// Do alignment, specialized to power of 2 and for signed ints,
1002// avoiding having to do a C-style cast from uint_64t to int when
Rui Ueyamada00f2f2016-01-14 21:06:47 +00001003// using alignTo from include/llvm/Support/MathExtras.h.
Tim Northover3b0846e2014-05-24 12:50:23 +00001004// FIXME: Move this function to include/MathExtras.h?
1005static int alignTo(int Num, int PowOf2) {
1006 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1007}
1008
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001009static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
1010 const AArch64InstrInfo *TII) {
1011 // One of the instructions must modify memory.
1012 if (!MIa->mayStore() && !MIb->mayStore())
1013 return false;
1014
1015 // Both instructions must be memory operations.
1016 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
1017 return false;
1018
1019 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
1020}
1021
1022static bool mayAlias(MachineInstr *MIa,
1023 SmallVectorImpl<MachineInstr *> &MemInsns,
1024 const AArch64InstrInfo *TII) {
1025 for (auto &MIb : MemInsns)
1026 if (mayAlias(MIa, MIb, TII))
1027 return true;
1028
1029 return false;
1030}
1031
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001032bool AArch64LoadStoreOpt::findMatchingStore(
1033 MachineBasicBlock::iterator I, unsigned Limit,
1034 MachineBasicBlock::iterator &StoreI) {
1035 MachineBasicBlock::iterator E = I->getParent()->begin();
1036 MachineBasicBlock::iterator MBBI = I;
1037 MachineInstr *FirstMI = I;
1038 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1039
1040 // Track which registers have been modified and used between the first insn
1041 // and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001042 ModifiedRegs.reset();
1043 UsedRegs.reset();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001044
Chad Rosier1142f3c2016-02-02 15:22:55 +00001045 // FIXME: We miss the case where the matching store is the first instruction
1046 // in the basic block.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001047 for (unsigned Count = 0; MBBI != E && Count < Limit;) {
1048 --MBBI;
1049 MachineInstr *MI = MBBI;
1050 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1051 // optimization by changing how far we scan.
1052 if (MI->isDebugValue())
1053 continue;
1054 // Now that we know this is a real instruction, count it.
1055 ++Count;
1056
1057 // If the load instruction reads directly from the address to which the
1058 // store instruction writes and the stored value is not modified, we can
1059 // promote the load. Since we do not handle stores with pre-/post-index,
1060 // it's unnecessary to check if BaseReg is modified by the store itself.
1061 if (MI->mayStore() && isMatchingStore(FirstMI, MI) &&
1062 BaseReg == getLdStBaseOp(MI).getReg() &&
1063 isLdOffsetInRangeOfSt(FirstMI, MI) &&
1064 !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
1065 StoreI = MBBI;
1066 return true;
1067 }
1068
1069 if (MI->isCall())
1070 return false;
1071
1072 // Update modified / uses register lists.
1073 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1074
1075 // Otherwise, if the base register is modified, we have no match, so
1076 // return early.
1077 if (ModifiedRegs[BaseReg])
1078 return false;
1079
1080 // If we encounter a store aliased with the load, return early.
1081 if (MI->mayStore() && mayAlias(FirstMI, MI, TII))
1082 return false;
1083 }
1084 return false;
1085}
1086
Chad Rosier05f80202016-02-04 18:59:49 +00001087
1088static bool canMergeOpc(unsigned Opc, unsigned PairOpc, LdStPairFlags &Flags) {
1089 // Opcodes match: nothing more to check.
1090 if (Opc == PairOpc)
1091 return true;
1092
1093 // Try to match a sign-extended load/store with a zero-extended load/store.
1094 bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1095 unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
1096 assert(IsValidLdStrOpc &&
1097 "Given Opc should be a Load or Store with an immediate");
1098 // Opc will be the first instruction in the pair.
1099 if (NonSExtOpc == getMatchingNonSExtOpcode(PairOpc, &PairIsValidLdStrOpc)) {
1100 Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
1101 return true;
1102 }
1103
1104 // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1105
1106 // If the second instruction isn't even a load/store, bail out.
1107 if (!PairIsValidLdStrOpc)
1108 return false;
1109
1110 // Try to match an unscaled load/store with a scaled load/store.
1111 return isUnscaledLdSt(Opc) != isUnscaledLdSt(PairOpc) &&
1112 getMatchingPairOpcode(Opc) == getMatchingPairOpcode(PairOpc);
1113}
Tim Northover3b0846e2014-05-24 12:50:23 +00001114/// findMatchingInsn - Scan the instructions looking for a load/store that can
1115/// be combined with the current instruction into a load/store pair.
1116MachineBasicBlock::iterator
1117AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001118 LdStPairFlags &Flags, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001119 MachineBasicBlock::iterator E = I->getParent()->end();
1120 MachineBasicBlock::iterator MBBI = I;
1121 MachineInstr *FirstMI = I;
1122 ++MBBI;
1123
Matthias Braunfa3872e2015-05-18 20:27:55 +00001124 unsigned Opc = FirstMI->getOpcode();
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +00001125 bool MayLoad = FirstMI->mayLoad();
Chad Rosier22eb7102015-08-06 17:37:18 +00001126 bool IsUnscaled = isUnscaledLdSt(FirstMI);
Chad Rosierf77e9092015-08-06 15:50:12 +00001127 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1128 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1129 int Offset = getLdStOffsetOp(FirstMI).getImm();
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001130 bool IsNarrowStore = isNarrowStore(Opc);
1131
1132 // For narrow stores, find only the case where the stored value is WZR.
1133 if (IsNarrowStore && Reg != AArch64::WZR)
1134 return E;
Tim Northover3b0846e2014-05-24 12:50:23 +00001135
1136 // Early exit if the first instruction modifies the base register.
1137 // e.g., ldr x0, [x0]
Tim Northover3b0846e2014-05-24 12:50:23 +00001138 if (FirstMI->modifiesRegister(BaseReg, TRI))
1139 return E;
Chad Rosiercaed6db2015-08-10 17:17:19 +00001140
1141 // Early exit if the offset if not possible to match. (6 bits of positive
1142 // range, plus allow an extra one in case we find a later insn that matches
1143 // with Offset-1)
Chad Rosierf11d0402015-10-01 18:17:12 +00001144 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001145 if (!(isNarrowLoad(Opc) || IsNarrowStore) &&
1146 !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
Tim Northover3b0846e2014-05-24 12:50:23 +00001147 return E;
1148
1149 // Track which registers have been modified and used between the first insn
1150 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001151 ModifiedRegs.reset();
1152 UsedRegs.reset();
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001153
1154 // Remember any instructions that read/write memory between FirstMI and MI.
1155 SmallVector<MachineInstr *, 4> MemInsns;
1156
Tim Northover3b0846e2014-05-24 12:50:23 +00001157 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1158 MachineInstr *MI = MBBI;
1159 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1160 // optimization by changing how far we scan.
1161 if (MI->isDebugValue())
1162 continue;
1163
1164 // Now that we know this is a real instruction, count it.
1165 ++Count;
1166
Chad Rosier18896c02016-02-04 16:01:40 +00001167 Flags.setSExtIdx(-1);
Chad Rosier05f80202016-02-04 18:59:49 +00001168 if (canMergeOpc(Opc, MI->getOpcode(), Flags) &&
1169 getLdStOffsetOp(MI).isImm()) {
Chad Rosierc56a9132015-08-10 18:42:45 +00001170 assert(MI->mayLoadOrStore() && "Expected memory operation.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001171 // If we've found another instruction with the same opcode, check to see
1172 // if the base and offset are compatible with our starting instruction.
1173 // These instructions all have scaled immediate operands, so we just
1174 // check for +1/-1. Make sure to check the new instruction offset is
1175 // actually an immediate and not a symbolic reference destined for
1176 // a relocation.
1177 //
1178 // Pairwise instructions have a 7-bit signed offset field. Single insns
1179 // have a 12-bit unsigned offset field. To be a valid combine, the
1180 // final offset must be in range.
Chad Rosierf77e9092015-08-06 15:50:12 +00001181 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1182 int MIOffset = getLdStOffsetOp(MI).getImm();
Chad Rosier05f80202016-02-04 18:59:49 +00001183
1184 // We're trying to pair instructions that differ in how they are scaled.
1185 // If FirstMI is scaled then scale the offset of MI accordingly.
1186 // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1187 bool MIIsUnscaled = isUnscaledLdSt(MI);
1188 if (IsUnscaled != MIIsUnscaled) {
1189 int MemSize = getMemScale(MI);
1190 if (MIIsUnscaled) {
1191 // If the unscaled offset isn't a multiple of the MemSize, we can't
1192 // pair the operations together: bail and keep looking.
1193 if (MIOffset % MemSize)
1194 continue;
1195 MIOffset /= MemSize;
1196 } else {
1197 MIOffset *= MemSize;
1198 }
1199 }
1200
Tim Northover3b0846e2014-05-24 12:50:23 +00001201 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1202 (Offset + OffsetStride == MIOffset))) {
1203 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1204 // If this is a volatile load/store that otherwise matched, stop looking
1205 // as something is going on that we don't have enough information to
1206 // safely transform. Similarly, stop if we see a hint to avoid pairs.
1207 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1208 return E;
1209 // If the resultant immediate offset of merging these instructions
1210 // is out of range for a pairwise instruction, bail and keep looking.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001211 bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
1212 if (!IsNarrowLoad &&
Chad Rosier05f80202016-02-04 18:59:49 +00001213 !inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001214 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001215 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001216 continue;
1217 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001218
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001219 if (IsNarrowLoad || IsNarrowStore) {
1220 // If the alignment requirements of the scaled wide load/store
1221 // instruction can't express the offset of the scaled narrow
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001222 // input, bail and keep looking.
1223 if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
1224 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1225 MemInsns.push_back(MI);
1226 continue;
1227 }
1228 } else {
1229 // If the alignment requirements of the paired (scaled) instruction
1230 // can't express the offset of the unscaled input, bail and keep
1231 // looking.
1232 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1233 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1234 MemInsns.push_back(MI);
1235 continue;
1236 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001237 }
1238 // If the destination register of the loads is the same register, bail
1239 // and keep looking. A load-pair instruction with both destination
1240 // registers the same is UNPREDICTABLE and will result in an exception.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001241 // For narrow stores, allow only when the stored value is the same
1242 // (i.e., WZR).
1243 if ((MayLoad && Reg == getLdStRegOp(MI).getReg()) ||
1244 (IsNarrowStore && Reg != getLdStRegOp(MI).getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001245 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001246 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001247 continue;
1248 }
1249
1250 // If the Rt of the second instruction was not modified or used between
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001251 // the two instructions and none of the instructions between the second
1252 // and first alias with the second, we can combine the second into the
1253 // first.
Chad Rosierf77e9092015-08-06 15:50:12 +00001254 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1255 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001256 !mayAlias(MI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001257 Flags.setMergeForward(false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001258 return MBBI;
1259 }
1260
1261 // Likewise, if the Rt of the first instruction is not modified or used
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001262 // between the two instructions and none of the instructions between the
1263 // first and the second alias with the first, we can combine the first
1264 // into the second.
Chad Rosierf77e9092015-08-06 15:50:12 +00001265 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
Chad Rosier5f668e12015-09-03 14:19:43 +00001266 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001267 !mayAlias(FirstMI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001268 Flags.setMergeForward(true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001269 return MBBI;
1270 }
1271 // Unable to combine these instructions due to interference in between.
1272 // Keep looking.
1273 }
1274 }
1275
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001276 // If the instruction wasn't a matching load or store. Stop searching if we
1277 // encounter a call instruction that might modify memory.
1278 if (MI->isCall())
Tim Northover3b0846e2014-05-24 12:50:23 +00001279 return E;
1280
1281 // Update modified / uses register lists.
1282 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1283
1284 // Otherwise, if the base register is modified, we have no match, so
1285 // return early.
1286 if (ModifiedRegs[BaseReg])
1287 return E;
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001288
1289 // Update list of instructions that read/write memory.
1290 if (MI->mayLoadOrStore())
1291 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001292 }
1293 return E;
1294}
1295
1296MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +00001297AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1298 MachineBasicBlock::iterator Update,
1299 bool IsPreIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001300 assert((Update->getOpcode() == AArch64::ADDXri ||
1301 Update->getOpcode() == AArch64::SUBXri) &&
1302 "Unexpected base register update instruction to merge!");
1303 MachineBasicBlock::iterator NextI = I;
1304 // Return the instruction following the merged instruction, which is
1305 // the instruction following our unmerged load. Unless that's the add/sub
1306 // instruction we're merging, in which case it's the one after that.
1307 if (++NextI == Update)
1308 ++NextI;
1309
1310 int Value = Update->getOperand(2).getImm();
1311 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
Chad Rosier2dfd3542015-09-23 13:51:44 +00001312 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
Tim Northover3b0846e2014-05-24 12:50:23 +00001313 if (Update->getOpcode() == AArch64::SUBXri)
1314 Value = -Value;
1315
Chad Rosier2dfd3542015-09-23 13:51:44 +00001316 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1317 : getPostIndexedOpcode(I->getOpcode());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001318 MachineInstrBuilder MIB;
1319 if (!isPairedLdSt(I)) {
1320 // Non-paired instruction.
1321 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1322 .addOperand(getLdStRegOp(Update))
1323 .addOperand(getLdStRegOp(I))
1324 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001325 .addImm(Value)
1326 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001327 } else {
1328 // Paired instruction.
Chad Rosier32d4d372015-09-29 16:07:32 +00001329 int Scale = getMemScale(I);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001330 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1331 .addOperand(getLdStRegOp(Update))
1332 .addOperand(getLdStRegOp(I, 0))
1333 .addOperand(getLdStRegOp(I, 1))
1334 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001335 .addImm(Value / Scale)
1336 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001337 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001338 (void)MIB;
1339
Chad Rosier2dfd3542015-09-23 13:51:44 +00001340 if (IsPreIdx)
1341 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1342 else
1343 DEBUG(dbgs() << "Creating post-indexed load/store.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001344 DEBUG(dbgs() << " Replacing instructions:\n ");
1345 DEBUG(I->print(dbgs()));
1346 DEBUG(dbgs() << " ");
1347 DEBUG(Update->print(dbgs()));
1348 DEBUG(dbgs() << " with instruction:\n ");
1349 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1350 DEBUG(dbgs() << "\n");
1351
1352 // Erase the old instructions for the block.
1353 I->eraseFromParent();
1354 Update->eraseFromParent();
1355
1356 return NextI;
1357}
1358
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001359bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
1360 MachineInstr *MI,
1361 unsigned BaseReg, int Offset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001362 switch (MI->getOpcode()) {
1363 default:
1364 break;
1365 case AArch64::SUBXri:
1366 // Negate the offset for a SUB instruction.
1367 Offset *= -1;
1368 // FALLTHROUGH
1369 case AArch64::ADDXri:
1370 // Make sure it's a vanilla immediate operand, not a relocation or
1371 // anything else we can't handle.
1372 if (!MI->getOperand(2).isImm())
1373 break;
1374 // Watch out for 1 << 12 shifted value.
1375 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
1376 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001377
1378 // The update instruction source and destination register must be the
1379 // same as the load/store base register.
1380 if (MI->getOperand(0).getReg() != BaseReg ||
1381 MI->getOperand(1).getReg() != BaseReg)
1382 break;
1383
1384 bool IsPairedInsn = isPairedLdSt(MemMI);
1385 int UpdateOffset = MI->getOperand(2).getImm();
1386 // For non-paired load/store instructions, the immediate must fit in a
1387 // signed 9-bit integer.
1388 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1389 break;
1390
1391 // For paired load/store instructions, the immediate must be a multiple of
1392 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1393 // integer.
1394 if (IsPairedInsn) {
Chad Rosier32d4d372015-09-29 16:07:32 +00001395 int Scale = getMemScale(MemMI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001396 if (UpdateOffset % Scale != 0)
1397 break;
1398
1399 int ScaledOffset = UpdateOffset / Scale;
1400 if (ScaledOffset > 64 || ScaledOffset < -64)
1401 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001402 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001403
1404 // If we have a non-zero Offset, we check that it matches the amount
1405 // we're adding to the register.
1406 if (!Offset || Offset == MI->getOperand(2).getImm())
1407 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001408 break;
1409 }
1410 return false;
1411}
1412
1413MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
Chad Rosier234bf6f2016-01-18 21:56:40 +00001414 MachineBasicBlock::iterator I, int UnscaledOffset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001415 MachineBasicBlock::iterator E = I->getParent()->end();
1416 MachineInstr *MemMI = I;
1417 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001418
Chad Rosierf77e9092015-08-06 15:50:12 +00001419 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001420 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001421
Chad Rosierb7c5b912015-10-01 13:43:05 +00001422 // Scan forward looking for post-index opportunities. Updating instructions
1423 // can't be formed if the memory instruction doesn't have the offset we're
1424 // looking for.
1425 if (MIUnscaledOffset != UnscaledOffset)
1426 return E;
1427
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001428 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001429 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001430 bool IsPairedInsn = isPairedLdSt(MemMI);
1431 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1432 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1433 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1434 return E;
1435 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001436
Tim Northover3b0846e2014-05-24 12:50:23 +00001437 // Track which registers have been modified and used between the first insn
1438 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001439 ModifiedRegs.reset();
1440 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001441 ++MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001442 for (; MBBI != E; ++MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001443 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001444 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001445 if (MI->isDebugValue())
1446 continue;
1447
Tim Northover3b0846e2014-05-24 12:50:23 +00001448 // If we found a match, return it.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001449 if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001450 return MBBI;
1451
1452 // Update the status of what the instruction clobbered and used.
1453 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1454
1455 // Otherwise, if the base register is used or modified, we have no match, so
1456 // return early.
1457 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1458 return E;
1459 }
1460 return E;
1461}
1462
1463MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
Chad Rosier234bf6f2016-01-18 21:56:40 +00001464 MachineBasicBlock::iterator I) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001465 MachineBasicBlock::iterator B = I->getParent()->begin();
1466 MachineBasicBlock::iterator E = I->getParent()->end();
1467 MachineInstr *MemMI = I;
1468 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001469
Chad Rosierf77e9092015-08-06 15:50:12 +00001470 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1471 int Offset = getLdStOffsetOp(MemMI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001472
1473 // If the load/store is the first instruction in the block, there's obviously
1474 // not any matching update. Ditto if the memory offset isn't zero.
1475 if (MBBI == B || Offset != 0)
1476 return E;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001477 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001478 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001479 bool IsPairedInsn = isPairedLdSt(MemMI);
1480 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1481 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1482 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1483 return E;
1484 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001485
1486 // Track which registers have been modified and used between the first insn
1487 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001488 ModifiedRegs.reset();
1489 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001490 --MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001491 for (; MBBI != B; --MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001492 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001493 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001494 if (MI->isDebugValue())
1495 continue;
1496
Tim Northover3b0846e2014-05-24 12:50:23 +00001497 // If we found a match, return it.
Chad Rosier11c825f2015-09-30 19:44:40 +00001498 if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001499 return MBBI;
1500
1501 // Update the status of what the instruction clobbered and used.
1502 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1503
1504 // Otherwise, if the base register is used or modified, we have no match, so
1505 // return early.
1506 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1507 return E;
1508 }
1509 return E;
1510}
1511
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001512bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1513 MachineBasicBlock::iterator &MBBI) {
1514 MachineInstr *MI = MBBI;
1515 // If this is a volatile load, don't mess with it.
1516 if (MI->hasOrderedMemoryRef())
1517 return false;
1518
1519 // Make sure this is a reg+imm.
1520 // FIXME: It is possible to extend it to handle reg+reg cases.
1521 if (!getLdStOffsetOp(MI).isImm())
1522 return false;
1523
1524 // Look backward up to ScanLimit instructions.
1525 MachineBasicBlock::iterator StoreI;
1526 if (findMatchingStore(MBBI, ScanLimit, StoreI)) {
1527 ++NumLoadsFromStoresPromoted;
1528 // Promote the load. Keeping the iterator straight is a
1529 // pain, so we let the merge routine tell us what the next instruction
1530 // is after it's done mucking about.
1531 MBBI = promoteLoadFromStore(MBBI, StoreI);
1532 return true;
1533 }
1534 return false;
1535}
1536
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001537bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1538 MachineBasicBlock::iterator &MBBI) {
1539 MachineInstr *MI = MBBI;
1540 MachineBasicBlock::iterator E = MI->getParent()->end();
1541 // If this is a volatile load/store, don't mess with it.
1542 if (MI->hasOrderedMemoryRef())
1543 return false;
1544
1545 // Make sure this is a reg+imm (as opposed to an address reloc).
1546 if (!getLdStOffsetOp(MI).isImm())
1547 return false;
1548
1549 // Check if this load/store has a hint to avoid pair formation.
1550 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1551 if (TII->isLdStPairSuppressed(MI))
1552 return false;
1553
1554 // Look ahead up to ScanLimit instructions for a pairable instruction.
1555 LdStPairFlags Flags;
1556 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, ScanLimit);
1557 if (Paired != E) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001558 if (isNarrowLoad(MI)) {
1559 ++NumNarrowLoadsPromoted;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001560 } else if (isNarrowStore(MI)) {
1561 ++NumZeroStoresPromoted;
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001562 } else {
1563 ++NumPairCreated;
1564 if (isUnscaledLdSt(MI))
1565 ++NumUnscaledPairCreated;
1566 }
1567
1568 // Merge the loads into a pair. Keeping the iterator straight is a
1569 // pain, so we let the merge routine tell us what the next instruction
1570 // is after it's done mucking about.
1571 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1572 return true;
1573 }
1574 return false;
1575}
1576
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001577bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1578 bool enableNarrowLdOpt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001579 bool Modified = false;
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001580 // Four tranformations to do here:
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001581 // 1) Find loads that directly read from stores and promote them by
1582 // replacing with mov instructions. If the store is wider than the load,
1583 // the load will be replaced with a bitfield extract.
1584 // e.g.,
1585 // str w1, [x0, #4]
1586 // ldrh w2, [x0, #6]
1587 // ; becomes
1588 // str w1, [x0, #4]
1589 // lsr w2, w1, #16
Tim Northover3b0846e2014-05-24 12:50:23 +00001590 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001591 MBBI != E;) {
1592 MachineInstr *MI = MBBI;
1593 switch (MI->getOpcode()) {
1594 default:
1595 // Just move on to the next instruction.
1596 ++MBBI;
1597 break;
1598 // Scaled instructions.
1599 case AArch64::LDRBBui:
1600 case AArch64::LDRHHui:
1601 case AArch64::LDRWui:
1602 case AArch64::LDRXui:
1603 // Unscaled instructions.
1604 case AArch64::LDURBBi:
1605 case AArch64::LDURHHi:
1606 case AArch64::LDURWi:
1607 case AArch64::LDURXi: {
1608 if (tryToPromoteLoadFromStore(MBBI)) {
1609 Modified = true;
1610 break;
1611 }
1612 ++MBBI;
1613 break;
1614 }
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001615 }
1616 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001617 // 2) Find narrow loads that can be converted into a single wider load
1618 // with bitfield extract instructions.
1619 // e.g.,
1620 // ldrh w0, [x2]
1621 // ldrh w1, [x2, #2]
1622 // ; becomes
1623 // ldr w0, [x2]
1624 // ubfx w1, w0, #16, #16
1625 // and w0, w0, #ffff
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001626 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001627 enableNarrowLdOpt && MBBI != E;) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001628 MachineInstr *MI = MBBI;
1629 switch (MI->getOpcode()) {
1630 default:
1631 // Just move on to the next instruction.
1632 ++MBBI;
1633 break;
1634 // Scaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001635 case AArch64::LDRBBui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001636 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001637 case AArch64::LDRSBWui:
1638 case AArch64::LDRSHWui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001639 case AArch64::STRBBui:
1640 case AArch64::STRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001641 // Unscaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001642 case AArch64::LDURBBi:
1643 case AArch64::LDURHHi:
1644 case AArch64::LDURSBWi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001645 case AArch64::LDURSHWi:
1646 case AArch64::STURBBi:
1647 case AArch64::STURHHi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001648 if (tryToMergeLdStInst(MBBI)) {
1649 Modified = true;
1650 break;
1651 }
1652 ++MBBI;
1653 break;
1654 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001655 }
1656 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001657 // 3) Find loads and stores that can be merged into a single load or store
1658 // pair instruction.
1659 // e.g.,
1660 // ldr x0, [x2]
1661 // ldr x1, [x2, #8]
1662 // ; becomes
1663 // ldp x0, x1, [x2]
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001664 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Tim Northover3b0846e2014-05-24 12:50:23 +00001665 MBBI != E;) {
1666 MachineInstr *MI = MBBI;
1667 switch (MI->getOpcode()) {
1668 default:
1669 // Just move on to the next instruction.
1670 ++MBBI;
1671 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001672 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001673 case AArch64::STRSui:
1674 case AArch64::STRDui:
1675 case AArch64::STRQui:
1676 case AArch64::STRXui:
1677 case AArch64::STRWui:
1678 case AArch64::LDRSui:
1679 case AArch64::LDRDui:
1680 case AArch64::LDRQui:
1681 case AArch64::LDRXui:
1682 case AArch64::LDRWui:
Quentin Colombet29f55332015-01-24 01:25:54 +00001683 case AArch64::LDRSWui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001684 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001685 case AArch64::STURSi:
1686 case AArch64::STURDi:
1687 case AArch64::STURQi:
1688 case AArch64::STURWi:
1689 case AArch64::STURXi:
1690 case AArch64::LDURSi:
1691 case AArch64::LDURDi:
1692 case AArch64::LDURQi:
1693 case AArch64::LDURWi:
Quentin Colombet29f55332015-01-24 01:25:54 +00001694 case AArch64::LDURXi:
1695 case AArch64::LDURSWi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001696 if (tryToMergeLdStInst(MBBI)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001697 Modified = true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001698 break;
1699 }
1700 ++MBBI;
1701 break;
1702 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001703 }
1704 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001705 // 4) Find base register updates that can be merged into the load or store
1706 // as a base-reg writeback.
1707 // e.g.,
1708 // ldr x0, [x2]
1709 // add x2, x2, #4
1710 // ; becomes
1711 // ldr x0, [x2], #4
Tim Northover3b0846e2014-05-24 12:50:23 +00001712 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1713 MBBI != E;) {
1714 MachineInstr *MI = MBBI;
1715 // Do update merging. It's simpler to keep this separate from the above
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001716 // switchs, though not strictly necessary.
Matthias Braunfa3872e2015-05-18 20:27:55 +00001717 unsigned Opc = MI->getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +00001718 switch (Opc) {
1719 default:
1720 // Just move on to the next instruction.
1721 ++MBBI;
1722 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001723 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001724 case AArch64::STRSui:
1725 case AArch64::STRDui:
1726 case AArch64::STRQui:
1727 case AArch64::STRXui:
1728 case AArch64::STRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001729 case AArch64::STRHHui:
1730 case AArch64::STRBBui:
Tim Northover3b0846e2014-05-24 12:50:23 +00001731 case AArch64::LDRSui:
1732 case AArch64::LDRDui:
1733 case AArch64::LDRQui:
1734 case AArch64::LDRXui:
1735 case AArch64::LDRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001736 case AArch64::LDRHHui:
1737 case AArch64::LDRBBui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001738 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001739 case AArch64::STURSi:
1740 case AArch64::STURDi:
1741 case AArch64::STURQi:
1742 case AArch64::STURWi:
1743 case AArch64::STURXi:
1744 case AArch64::LDURSi:
1745 case AArch64::LDURDi:
1746 case AArch64::LDURQi:
1747 case AArch64::LDURWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001748 case AArch64::LDURXi:
1749 // Paired instructions.
1750 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +00001751 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001752 case AArch64::LDPDi:
1753 case AArch64::LDPQi:
1754 case AArch64::LDPWi:
1755 case AArch64::LDPXi:
1756 case AArch64::STPSi:
1757 case AArch64::STPDi:
1758 case AArch64::STPQi:
1759 case AArch64::STPWi:
1760 case AArch64::STPXi: {
Tim Northover3b0846e2014-05-24 12:50:23 +00001761 // Make sure this is a reg+imm (as opposed to an address reloc).
Chad Rosierf77e9092015-08-06 15:50:12 +00001762 if (!getLdStOffsetOp(MI).isImm()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001763 ++MBBI;
1764 break;
1765 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001766 // Look forward to try to form a post-index instruction. For example,
1767 // ldr x0, [x20]
1768 // add x20, x20, #32
1769 // merged into:
1770 // ldr x0, [x20], #32
Tim Northover3b0846e2014-05-24 12:50:23 +00001771 MachineBasicBlock::iterator Update =
Chad Rosier234bf6f2016-01-18 21:56:40 +00001772 findMatchingUpdateInsnForward(MBBI, 0);
Tim Northover3b0846e2014-05-24 12:50:23 +00001773 if (Update != E) {
1774 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001775 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001776 Modified = true;
1777 ++NumPostFolded;
1778 break;
1779 }
1780 // Don't know how to handle pre/post-index versions, so move to the next
1781 // instruction.
Chad Rosier22eb7102015-08-06 17:37:18 +00001782 if (isUnscaledLdSt(Opc)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001783 ++MBBI;
1784 break;
1785 }
1786
1787 // Look back to try to find a pre-index instruction. For example,
1788 // add x0, x0, #8
1789 // ldr x1, [x0]
1790 // merged into:
1791 // ldr x1, [x0, #8]!
Chad Rosier234bf6f2016-01-18 21:56:40 +00001792 Update = findMatchingUpdateInsnBackward(MBBI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001793 if (Update != E) {
1794 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001795 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001796 Modified = true;
1797 ++NumPreFolded;
1798 break;
1799 }
Chad Rosier7a83d772015-10-01 13:09:44 +00001800 // The immediate in the load/store is scaled by the size of the memory
1801 // operation. The immediate in the add we're looking for,
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001802 // however, is not, so adjust here.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001803 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001804
Tim Northover3b0846e2014-05-24 12:50:23 +00001805 // Look forward to try to find a post-index instruction. For example,
1806 // ldr x1, [x0, #64]
1807 // add x0, x0, #64
1808 // merged into:
1809 // ldr x1, [x0, #64]!
Chad Rosier234bf6f2016-01-18 21:56:40 +00001810 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset);
Tim Northover3b0846e2014-05-24 12:50:23 +00001811 if (Update != E) {
1812 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001813 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001814 Modified = true;
1815 ++NumPreFolded;
1816 break;
1817 }
1818
1819 // Nothing found. Just move to the next instruction.
1820 ++MBBI;
1821 break;
1822 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001823 }
1824 }
1825
1826 return Modified;
1827}
1828
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001829bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001830 bool ProfitableArch = Subtarget->isCortexA57();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001831 // FIXME: The benefit from converting narrow loads into a wider load could be
1832 // microarchitectural as it assumes that a single load with two bitfield
1833 // extracts is cheaper than two narrow loads. Currently, this conversion is
1834 // enabled only in cortex-a57 on which performance benefits were verified.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001835 return ProfitableArch && !Subtarget->requiresStrictAlign();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001836}
1837
Tim Northover3b0846e2014-05-24 12:50:23 +00001838bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
Oliver Stannardd414c992015-11-10 11:04:18 +00001839 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1840 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1841 TRI = Subtarget->getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001842
Chad Rosierbba881e2016-02-02 15:02:30 +00001843 // Resize the modified and used register bitfield trackers. We do this once
1844 // per function and then clear the bitfield each time we optimize a load or
1845 // store.
1846 ModifiedRegs.resize(TRI->getNumRegs());
1847 UsedRegs.resize(TRI->getNumRegs());
1848
Tim Northover3b0846e2014-05-24 12:50:23 +00001849 bool Modified = false;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001850 bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
Tim Northover3b0846e2014-05-24 12:50:23 +00001851 for (auto &MBB : Fn)
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001852 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001853
1854 return Modified;
1855}
1856
1857// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1858// loads and stores near one another?
1859
Chad Rosier43f5c842015-08-05 12:40:13 +00001860/// createAArch64LoadStoreOptimizationPass - returns an instance of the
1861/// load / store optimization pass.
Tim Northover3b0846e2014-05-24 12:50:23 +00001862FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1863 return new AArch64LoadStoreOpt();
1864}