blob: adc0899577d7350e9a1570548efc28236f5a5277 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that performs load / store related peephole
11// optimizations. This pass should be run after register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AArch64InstrInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000016#include "AArch64Subtarget.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000017#include "MCTargetDesc/AArch64AddressingModes.h"
18#include "llvm/ADT/BitVector.h"
Chad Rosierce8e5ab2015-05-21 21:36:46 +000019#include "llvm/ADT/SmallVector.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000020#include "llvm/ADT/Statistic.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000021#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/ErrorHandling.h"
28#include "llvm/Support/raw_ostream.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000029#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Target/TargetRegisterInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000032using namespace llvm;
33
34#define DEBUG_TYPE "aarch64-ldst-opt"
35
Tim Northover3b0846e2014-05-24 12:50:23 +000036STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
37STATISTIC(NumPostFolded, "Number of post-index updates folded");
38STATISTIC(NumPreFolded, "Number of pre-index updates folded");
39STATISTIC(NumUnscaledPairCreated,
40 "Number of load/store from unscaled generated");
Jun Bum Limc12c2792015-11-19 18:41:27 +000041STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
Jun Bum Lim80ec0d32015-11-20 21:14:07 +000042STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
Jun Bum Lim6755c3b2015-12-22 16:36:16 +000043STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
Tim Northover3b0846e2014-05-24 12:50:23 +000044
Chad Rosier35706ad2016-02-04 21:26:02 +000045// The LdStLimit limits how far we search for load/store pairs.
46static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +000047 cl::init(20), cl::Hidden);
Tim Northover3b0846e2014-05-24 12:50:23 +000048
Chad Rosier35706ad2016-02-04 21:26:02 +000049// The UpdateLimit limits how far we search for update instructions when we form
50// pre-/post-index instructions.
51static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
52 cl::Hidden);
53
Chad Rosier96530b32015-08-05 13:44:51 +000054namespace llvm {
55void initializeAArch64LoadStoreOptPass(PassRegistry &);
56}
57
58#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
59
Tim Northover3b0846e2014-05-24 12:50:23 +000060namespace {
Chad Rosier96a18a92015-07-21 17:42:04 +000061
62typedef struct LdStPairFlags {
63 // If a matching instruction is found, MergeForward is set to true if the
64 // merge is to remove the first instruction and replace the second with
65 // a pair-wise insn, and false if the reverse is true.
66 bool MergeForward;
67
68 // SExtIdx gives the index of the result of the load pair that must be
69 // extended. The value of SExtIdx assumes that the paired load produces the
70 // value in this order: (I, returned iterator), i.e., -1 means no value has
71 // to be extended, 0 means I, and 1 means the returned iterator.
72 int SExtIdx;
73
74 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
75
76 void setMergeForward(bool V = true) { MergeForward = V; }
77 bool getMergeForward() const { return MergeForward; }
78
79 void setSExtIdx(int V) { SExtIdx = V; }
80 int getSExtIdx() const { return SExtIdx; }
81
82} LdStPairFlags;
83
Tim Northover3b0846e2014-05-24 12:50:23 +000084struct AArch64LoadStoreOpt : public MachineFunctionPass {
85 static char ID;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +000086 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
Chad Rosier96530b32015-08-05 13:44:51 +000087 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
88 }
Tim Northover3b0846e2014-05-24 12:50:23 +000089
90 const AArch64InstrInfo *TII;
91 const TargetRegisterInfo *TRI;
Oliver Stannardd414c992015-11-10 11:04:18 +000092 const AArch64Subtarget *Subtarget;
Tim Northover3b0846e2014-05-24 12:50:23 +000093
Chad Rosierbba881e2016-02-02 15:02:30 +000094 // Track which registers have been modified and used.
95 BitVector ModifiedRegs, UsedRegs;
96
Tim Northover3b0846e2014-05-24 12:50:23 +000097 // Scan the instructions looking for a load/store that can be combined
98 // with the current instruction into a load/store pair.
99 // Return the matching instruction if one is found, else MBB->end().
Tim Northover3b0846e2014-05-24 12:50:23 +0000100 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000101 LdStPairFlags &Flags,
Tim Northover3b0846e2014-05-24 12:50:23 +0000102 unsigned Limit);
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000103
104 // Scan the instructions looking for a store that writes to the address from
105 // which the current load instruction reads. Return true if one is found.
106 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
107 MachineBasicBlock::iterator &StoreI);
108
Chad Rosierb5933d72016-02-09 19:02:12 +0000109 // Merge the two instructions indicated into a wider instruction.
110 MachineBasicBlock::iterator
111 mergeNarrowInsns(MachineBasicBlock::iterator I,
112 MachineBasicBlock::iterator Paired,
113 const LdStPairFlags &Flags);
114
Tim Northover3b0846e2014-05-24 12:50:23 +0000115 // Merge the two instructions indicated into a single pair-wise instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000116 MachineBasicBlock::iterator
117 mergePairedInsns(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000118 MachineBasicBlock::iterator Paired,
Chad Rosierfe5399f2015-07-21 17:47:56 +0000119 const LdStPairFlags &Flags);
Tim Northover3b0846e2014-05-24 12:50:23 +0000120
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000121 // Promote the load that reads directly from the address stored to.
122 MachineBasicBlock::iterator
123 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
124 MachineBasicBlock::iterator StoreI);
125
Tim Northover3b0846e2014-05-24 12:50:23 +0000126 // Scan the instruction list to find a base register update that can
127 // be combined with the current instruction (a load or store) using
128 // pre or post indexed addressing with writeback. Scan forwards.
129 MachineBasicBlock::iterator
Chad Rosier234bf6f2016-01-18 21:56:40 +0000130 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
Chad Rosier35706ad2016-02-04 21:26:02 +0000131 int UnscaledOffset, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000132
133 // Scan the instruction list to find a base register update that can
134 // be combined with the current instruction (a load or store) using
135 // pre or post indexed addressing with writeback. Scan backwards.
136 MachineBasicBlock::iterator
Chad Rosier35706ad2016-02-04 21:26:02 +0000137 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000138
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000139 // Find an instruction that updates the base register of the ld/st
140 // instruction.
141 bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
142 unsigned BaseReg, int Offset);
143
Chad Rosier2dfd3542015-09-23 13:51:44 +0000144 // Merge a pre- or post-index base register update into a ld/st instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000145 MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +0000146 mergeUpdateInsn(MachineBasicBlock::iterator I,
147 MachineBasicBlock::iterator Update, bool IsPreIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000148
Chad Rosier24c46ad2016-02-09 18:10:20 +0000149 // Is this a candidate for ld/st merging or pairing? For example, we don't
150 // touch volatiles or load/stores that have a hint to avoid pair formation.
151 bool isCandidateToMergeOrPair(MachineInstr *MI);
152
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000153 // Find and merge foldable ldr/str instructions.
154 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
155
Chad Rosier24c46ad2016-02-09 18:10:20 +0000156 // Find and pair ldr/str instructions.
157 bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
158
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000159 // Find and promote load instructions which read directly from store.
160 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
161
Jun Bum Lim22fe15e2015-11-06 16:27:47 +0000162 // Check if converting two narrow loads into a single wider load with
163 // bitfield extracts could be enabled.
164 bool enableNarrowLdMerge(MachineFunction &Fn);
165
166 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000167
168 bool runOnMachineFunction(MachineFunction &Fn) override;
169
170 const char *getPassName() const override {
Chad Rosier96530b32015-08-05 13:44:51 +0000171 return AARCH64_LOAD_STORE_OPT_NAME;
Tim Northover3b0846e2014-05-24 12:50:23 +0000172 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000173};
174char AArch64LoadStoreOpt::ID = 0;
Jim Grosbach1eee3df2014-08-11 22:42:31 +0000175} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +0000176
Chad Rosier96530b32015-08-05 13:44:51 +0000177INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
178 AARCH64_LOAD_STORE_OPT_NAME, false, false)
179
Chad Rosier22eb7102015-08-06 17:37:18 +0000180static bool isUnscaledLdSt(unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000181 switch (Opc) {
182 default:
183 return false;
184 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000185 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000186 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000187 case AArch64::STURBBi:
188 case AArch64::STURHHi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000189 case AArch64::STURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000190 case AArch64::STURXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000191 case AArch64::LDURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000192 case AArch64::LDURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000193 case AArch64::LDURQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000194 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000195 case AArch64::LDURXi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000196 case AArch64::LDURSWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000197 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000198 case AArch64::LDURBBi:
199 case AArch64::LDURSBWi:
200 case AArch64::LDURSHWi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000201 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000202 }
203}
204
Chad Rosier22eb7102015-08-06 17:37:18 +0000205static bool isUnscaledLdSt(MachineInstr *MI) {
206 return isUnscaledLdSt(MI->getOpcode());
207}
208
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000209static unsigned getBitExtrOpcode(MachineInstr *MI) {
210 switch (MI->getOpcode()) {
211 default:
212 llvm_unreachable("Unexpected opcode.");
213 case AArch64::LDRBBui:
214 case AArch64::LDURBBi:
215 case AArch64::LDRHHui:
216 case AArch64::LDURHHi:
217 return AArch64::UBFMWri;
218 case AArch64::LDRSBWui:
219 case AArch64::LDURSBWi:
220 case AArch64::LDRSHWui:
221 case AArch64::LDURSHWi:
222 return AArch64::SBFMWri;
223 }
224}
225
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000226static bool isNarrowStore(unsigned Opc) {
227 switch (Opc) {
228 default:
229 return false;
230 case AArch64::STRBBui:
231 case AArch64::STURBBi:
232 case AArch64::STRHHui:
233 case AArch64::STURHHi:
234 return true;
235 }
236}
237
238static bool isNarrowStore(MachineInstr *MI) {
239 return isNarrowStore(MI->getOpcode());
240}
241
Jun Bum Limc12c2792015-11-19 18:41:27 +0000242static bool isNarrowLoad(unsigned Opc) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000243 switch (Opc) {
244 default:
245 return false;
246 case AArch64::LDRHHui:
247 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000248 case AArch64::LDRBBui:
249 case AArch64::LDURBBi:
250 case AArch64::LDRSHWui:
251 case AArch64::LDURSHWi:
252 case AArch64::LDRSBWui:
253 case AArch64::LDURSBWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000254 return true;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000255 }
256}
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000257
Jun Bum Limc12c2792015-11-19 18:41:27 +0000258static bool isNarrowLoad(MachineInstr *MI) {
259 return isNarrowLoad(MI->getOpcode());
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000260}
261
Chad Rosier32d4d372015-09-29 16:07:32 +0000262// Scaling factor for unscaled load or store.
263static int getMemScale(MachineInstr *MI) {
Chad Rosier22eb7102015-08-06 17:37:18 +0000264 switch (MI->getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000265 default:
Chad Rosierdabe2532015-09-29 18:26:15 +0000266 llvm_unreachable("Opcode has unknown scale!");
267 case AArch64::LDRBBui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000268 case AArch64::LDURBBi:
269 case AArch64::LDRSBWui:
270 case AArch64::LDURSBWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000271 case AArch64::STRBBui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000272 case AArch64::STURBBi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000273 return 1;
274 case AArch64::LDRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000275 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000276 case AArch64::LDRSHWui:
277 case AArch64::LDURSHWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000278 case AArch64::STRHHui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000279 case AArch64::STURHHi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000280 return 2;
Chad Rosiera4d32172015-09-29 14:57:10 +0000281 case AArch64::LDRSui:
282 case AArch64::LDURSi:
283 case AArch64::LDRSWui:
284 case AArch64::LDURSWi:
285 case AArch64::LDRWui:
286 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000287 case AArch64::STRSui:
288 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000289 case AArch64::STRWui:
290 case AArch64::STURWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000291 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000292 case AArch64::LDPSWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000293 case AArch64::LDPWi:
294 case AArch64::STPSi:
295 case AArch64::STPWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000296 return 4;
Chad Rosiera4d32172015-09-29 14:57:10 +0000297 case AArch64::LDRDui:
298 case AArch64::LDURDi:
299 case AArch64::LDRXui:
300 case AArch64::LDURXi:
301 case AArch64::STRDui:
302 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000303 case AArch64::STRXui:
304 case AArch64::STURXi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000305 case AArch64::LDPDi:
306 case AArch64::LDPXi:
307 case AArch64::STPDi:
308 case AArch64::STPXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000309 return 8;
Tim Northover3b0846e2014-05-24 12:50:23 +0000310 case AArch64::LDRQui:
311 case AArch64::LDURQi:
Chad Rosiera4d32172015-09-29 14:57:10 +0000312 case AArch64::STRQui:
313 case AArch64::STURQi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000314 case AArch64::LDPQi:
315 case AArch64::STPQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000316 return 16;
Tim Northover3b0846e2014-05-24 12:50:23 +0000317 }
318}
319
Quentin Colombet66b61632015-03-06 22:42:10 +0000320static unsigned getMatchingNonSExtOpcode(unsigned Opc,
321 bool *IsValidLdStrOpc = nullptr) {
322 if (IsValidLdStrOpc)
323 *IsValidLdStrOpc = true;
324 switch (Opc) {
325 default:
326 if (IsValidLdStrOpc)
327 *IsValidLdStrOpc = false;
328 return UINT_MAX;
329 case AArch64::STRDui:
330 case AArch64::STURDi:
331 case AArch64::STRQui:
332 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000333 case AArch64::STRBBui:
334 case AArch64::STURBBi:
335 case AArch64::STRHHui:
336 case AArch64::STURHHi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000337 case AArch64::STRWui:
338 case AArch64::STURWi:
339 case AArch64::STRXui:
340 case AArch64::STURXi:
341 case AArch64::LDRDui:
342 case AArch64::LDURDi:
343 case AArch64::LDRQui:
344 case AArch64::LDURQi:
345 case AArch64::LDRWui:
346 case AArch64::LDURWi:
347 case AArch64::LDRXui:
348 case AArch64::LDURXi:
349 case AArch64::STRSui:
350 case AArch64::STURSi:
351 case AArch64::LDRSui:
352 case AArch64::LDURSi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000353 case AArch64::LDRHHui:
354 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000355 case AArch64::LDRBBui:
356 case AArch64::LDURBBi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000357 return Opc;
358 case AArch64::LDRSWui:
359 return AArch64::LDRWui;
360 case AArch64::LDURSWi:
361 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000362 case AArch64::LDRSBWui:
363 return AArch64::LDRBBui;
364 case AArch64::LDRSHWui:
365 return AArch64::LDRHHui;
366 case AArch64::LDURSBWi:
367 return AArch64::LDURBBi;
368 case AArch64::LDURSHWi:
369 return AArch64::LDURHHi;
Quentin Colombet66b61632015-03-06 22:42:10 +0000370 }
371}
372
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000373static unsigned getMatchingWideOpcode(unsigned Opc) {
374 switch (Opc) {
375 default:
376 llvm_unreachable("Opcode has no wide equivalent!");
377 case AArch64::STRBBui:
378 return AArch64::STRHHui;
379 case AArch64::STRHHui:
380 return AArch64::STRWui;
381 case AArch64::STURBBi:
382 return AArch64::STURHHi;
383 case AArch64::STURHHi:
384 return AArch64::STURWi;
385 case AArch64::LDRHHui:
386 case AArch64::LDRSHWui:
387 return AArch64::LDRWui;
388 case AArch64::LDURHHi:
389 case AArch64::LDURSHWi:
390 return AArch64::LDURWi;
391 case AArch64::LDRBBui:
392 case AArch64::LDRSBWui:
393 return AArch64::LDRHHui;
394 case AArch64::LDURBBi:
395 case AArch64::LDURSBWi:
396 return AArch64::LDURHHi;
397 }
398}
399
Tim Northover3b0846e2014-05-24 12:50:23 +0000400static unsigned getMatchingPairOpcode(unsigned Opc) {
401 switch (Opc) {
402 default:
403 llvm_unreachable("Opcode has no pairwise equivalent!");
404 case AArch64::STRSui:
405 case AArch64::STURSi:
406 return AArch64::STPSi;
407 case AArch64::STRDui:
408 case AArch64::STURDi:
409 return AArch64::STPDi;
410 case AArch64::STRQui:
411 case AArch64::STURQi:
412 return AArch64::STPQi;
413 case AArch64::STRWui:
414 case AArch64::STURWi:
415 return AArch64::STPWi;
416 case AArch64::STRXui:
417 case AArch64::STURXi:
418 return AArch64::STPXi;
419 case AArch64::LDRSui:
420 case AArch64::LDURSi:
421 return AArch64::LDPSi;
422 case AArch64::LDRDui:
423 case AArch64::LDURDi:
424 return AArch64::LDPDi;
425 case AArch64::LDRQui:
426 case AArch64::LDURQi:
427 return AArch64::LDPQi;
428 case AArch64::LDRWui:
429 case AArch64::LDURWi:
430 return AArch64::LDPWi;
431 case AArch64::LDRXui:
432 case AArch64::LDURXi:
433 return AArch64::LDPXi;
Quentin Colombet29f55332015-01-24 01:25:54 +0000434 case AArch64::LDRSWui:
435 case AArch64::LDURSWi:
436 return AArch64::LDPSWi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000437 }
438}
439
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000440static unsigned isMatchingStore(MachineInstr *LoadInst,
441 MachineInstr *StoreInst) {
442 unsigned LdOpc = LoadInst->getOpcode();
443 unsigned StOpc = StoreInst->getOpcode();
444 switch (LdOpc) {
445 default:
446 llvm_unreachable("Unsupported load instruction!");
447 case AArch64::LDRBBui:
448 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
449 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
450 case AArch64::LDURBBi:
451 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
452 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
453 case AArch64::LDRHHui:
454 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
455 StOpc == AArch64::STRXui;
456 case AArch64::LDURHHi:
457 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
458 StOpc == AArch64::STURXi;
459 case AArch64::LDRWui:
460 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
461 case AArch64::LDURWi:
462 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
463 case AArch64::LDRXui:
464 return StOpc == AArch64::STRXui;
465 case AArch64::LDURXi:
466 return StOpc == AArch64::STURXi;
467 }
468}
469
Tim Northover3b0846e2014-05-24 12:50:23 +0000470static unsigned getPreIndexedOpcode(unsigned Opc) {
471 switch (Opc) {
472 default:
473 llvm_unreachable("Opcode has no pre-indexed equivalent!");
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000474 case AArch64::STRSui:
475 return AArch64::STRSpre;
476 case AArch64::STRDui:
477 return AArch64::STRDpre;
478 case AArch64::STRQui:
479 return AArch64::STRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000480 case AArch64::STRBBui:
481 return AArch64::STRBBpre;
482 case AArch64::STRHHui:
483 return AArch64::STRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000484 case AArch64::STRWui:
485 return AArch64::STRWpre;
486 case AArch64::STRXui:
487 return AArch64::STRXpre;
488 case AArch64::LDRSui:
489 return AArch64::LDRSpre;
490 case AArch64::LDRDui:
491 return AArch64::LDRDpre;
492 case AArch64::LDRQui:
493 return AArch64::LDRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000494 case AArch64::LDRBBui:
495 return AArch64::LDRBBpre;
496 case AArch64::LDRHHui:
497 return AArch64::LDRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000498 case AArch64::LDRWui:
499 return AArch64::LDRWpre;
500 case AArch64::LDRXui:
501 return AArch64::LDRXpre;
Quentin Colombet29f55332015-01-24 01:25:54 +0000502 case AArch64::LDRSWui:
503 return AArch64::LDRSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000504 case AArch64::LDPSi:
505 return AArch64::LDPSpre;
Chad Rosier43150122015-09-29 20:39:55 +0000506 case AArch64::LDPSWi:
507 return AArch64::LDPSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000508 case AArch64::LDPDi:
509 return AArch64::LDPDpre;
510 case AArch64::LDPQi:
511 return AArch64::LDPQpre;
512 case AArch64::LDPWi:
513 return AArch64::LDPWpre;
514 case AArch64::LDPXi:
515 return AArch64::LDPXpre;
516 case AArch64::STPSi:
517 return AArch64::STPSpre;
518 case AArch64::STPDi:
519 return AArch64::STPDpre;
520 case AArch64::STPQi:
521 return AArch64::STPQpre;
522 case AArch64::STPWi:
523 return AArch64::STPWpre;
524 case AArch64::STPXi:
525 return AArch64::STPXpre;
Tim Northover3b0846e2014-05-24 12:50:23 +0000526 }
527}
528
529static unsigned getPostIndexedOpcode(unsigned Opc) {
530 switch (Opc) {
531 default:
532 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
533 case AArch64::STRSui:
534 return AArch64::STRSpost;
535 case AArch64::STRDui:
536 return AArch64::STRDpost;
537 case AArch64::STRQui:
538 return AArch64::STRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000539 case AArch64::STRBBui:
540 return AArch64::STRBBpost;
541 case AArch64::STRHHui:
542 return AArch64::STRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000543 case AArch64::STRWui:
544 return AArch64::STRWpost;
545 case AArch64::STRXui:
546 return AArch64::STRXpost;
547 case AArch64::LDRSui:
548 return AArch64::LDRSpost;
549 case AArch64::LDRDui:
550 return AArch64::LDRDpost;
551 case AArch64::LDRQui:
552 return AArch64::LDRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000553 case AArch64::LDRBBui:
554 return AArch64::LDRBBpost;
555 case AArch64::LDRHHui:
556 return AArch64::LDRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000557 case AArch64::LDRWui:
558 return AArch64::LDRWpost;
559 case AArch64::LDRXui:
560 return AArch64::LDRXpost;
Quentin Colombet29f55332015-01-24 01:25:54 +0000561 case AArch64::LDRSWui:
562 return AArch64::LDRSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000563 case AArch64::LDPSi:
564 return AArch64::LDPSpost;
Chad Rosier43150122015-09-29 20:39:55 +0000565 case AArch64::LDPSWi:
566 return AArch64::LDPSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000567 case AArch64::LDPDi:
568 return AArch64::LDPDpost;
569 case AArch64::LDPQi:
570 return AArch64::LDPQpost;
571 case AArch64::LDPWi:
572 return AArch64::LDPWpost;
573 case AArch64::LDPXi:
574 return AArch64::LDPXpost;
575 case AArch64::STPSi:
576 return AArch64::STPSpost;
577 case AArch64::STPDi:
578 return AArch64::STPDpost;
579 case AArch64::STPQi:
580 return AArch64::STPQpost;
581 case AArch64::STPWi:
582 return AArch64::STPWpost;
583 case AArch64::STPXi:
584 return AArch64::STPXpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000585 }
586}
587
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000588static bool isPairedLdSt(const MachineInstr *MI) {
589 switch (MI->getOpcode()) {
590 default:
591 return false;
592 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000593 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000594 case AArch64::LDPDi:
595 case AArch64::LDPQi:
596 case AArch64::LDPWi:
597 case AArch64::LDPXi:
598 case AArch64::STPSi:
599 case AArch64::STPDi:
600 case AArch64::STPQi:
601 case AArch64::STPWi:
602 case AArch64::STPXi:
603 return true;
604 }
605}
606
607static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
608 unsigned PairedRegOp = 0) {
609 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
610 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
611 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000612}
613
614static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000615 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
616 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000617}
618
619static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000620 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
621 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000622}
623
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000624static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
625 MachineInstr *StoreInst) {
626 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
627 int LoadSize = getMemScale(LoadInst);
628 int StoreSize = getMemScale(StoreInst);
629 int UnscaledStOffset = isUnscaledLdSt(StoreInst)
630 ? getLdStOffsetOp(StoreInst).getImm()
631 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
632 int UnscaledLdOffset = isUnscaledLdSt(LoadInst)
633 ? getLdStOffsetOp(LoadInst).getImm()
634 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
635 return (UnscaledStOffset <= UnscaledLdOffset) &&
636 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
637}
638
Tim Northover3b0846e2014-05-24 12:50:23 +0000639MachineBasicBlock::iterator
Chad Rosierb5933d72016-02-09 19:02:12 +0000640AArch64LoadStoreOpt::mergeNarrowInsns(MachineBasicBlock::iterator I,
Tim Northover3b0846e2014-05-24 12:50:23 +0000641 MachineBasicBlock::iterator Paired,
Chad Rosier96a18a92015-07-21 17:42:04 +0000642 const LdStPairFlags &Flags) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000643 MachineBasicBlock::iterator NextI = I;
644 ++NextI;
645 // If NextI is the second of the two instructions to be merged, we need
646 // to skip one further. Either way we merge will invalidate the iterator,
647 // and we don't need to scan the new instruction, as it's a pairwise
648 // instruction, which we're not considering for further action anyway.
649 if (NextI == Paired)
650 ++NextI;
651
Chad Rosierb5933d72016-02-09 19:02:12 +0000652 unsigned Opc = I->getOpcode();
Chad Rosier22eb7102015-08-06 17:37:18 +0000653 bool IsUnscaled = isUnscaledLdSt(Opc);
Chad Rosierf11d0402015-10-01 18:17:12 +0000654 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
Tim Northover3b0846e2014-05-24 12:50:23 +0000655
Chad Rosier96a18a92015-07-21 17:42:04 +0000656 bool MergeForward = Flags.getMergeForward();
Tim Northover3b0846e2014-05-24 12:50:23 +0000657 // Insert our new paired instruction after whichever of the paired
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000658 // instructions MergeForward indicates.
659 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
660 // Also based on MergeForward is from where we copy the base register operand
Tim Northover3b0846e2014-05-24 12:50:23 +0000661 // so we get the flags compatible with the input code.
Chad Rosierf77e9092015-08-06 15:50:12 +0000662 const MachineOperand &BaseRegOp =
663 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000664
665 // Which register is Rt and which is Rt2 depends on the offset order.
666 MachineInstr *RtMI, *Rt2MI;
Renato Golin6274e522016-02-05 12:14:30 +0000667 if (getLdStOffsetOp(I).getImm() ==
668 getLdStOffsetOp(Paired).getImm() + OffsetStride) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000669 RtMI = Paired;
670 Rt2MI = I;
671 } else {
672 RtMI = I;
673 Rt2MI = Paired;
674 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000675
James Molloy5b18b4c2015-10-23 10:41:38 +0000676 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
Jun Bum Limc12c2792015-11-19 18:41:27 +0000677 if (isNarrowLoad(Opc)) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000678 // Change the scaled offset from small to large type.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000679 if (!IsUnscaled) {
680 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000681 OffsetImm /= 2;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000682 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000683 MachineInstr *RtNewDest = MergeForward ? I : Paired;
Oliver Stannardd414c992015-11-10 11:04:18 +0000684 // When merging small (< 32 bit) loads for big-endian targets, the order of
685 // the component parts gets swapped.
686 if (!Subtarget->isLittleEndian())
687 std::swap(RtMI, Rt2MI);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000688 // Construct the new load instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000689 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
690 NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim1de2d442016-02-05 20:02:03 +0000691 TII->get(getMatchingWideOpcode(Opc)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000692 .addOperand(getLdStRegOp(RtNewDest))
693 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000694 .addImm(OffsetImm)
695 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000696
697 DEBUG(
698 dbgs()
699 << "Creating the new load and extract. Replacing instructions:\n ");
700 DEBUG(I->print(dbgs()));
701 DEBUG(dbgs() << " ");
702 DEBUG(Paired->print(dbgs()));
703 DEBUG(dbgs() << " with instructions:\n ");
704 DEBUG((NewMemMI)->print(dbgs()));
705
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000706 int Width = getMemScale(I) == 1 ? 8 : 16;
707 int LSBLow = 0;
708 int LSBHigh = Width;
709 int ImmsLow = LSBLow + Width - 1;
710 int ImmsHigh = LSBHigh + Width - 1;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000711 MachineInstr *ExtDestMI = MergeForward ? Paired : I;
Oliver Stannardd414c992015-11-10 11:04:18 +0000712 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000713 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000714 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000715 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000716 .addOperand(getLdStRegOp(Rt2MI))
717 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000718 .addImm(LSBHigh)
719 .addImm(ImmsHigh);
720 // Create the bitfield extract for low bits.
721 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
722 // For unsigned, prefer to use AND for low bits.
723 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
724 TII->get(AArch64::ANDWri))
725 .addOperand(getLdStRegOp(RtMI))
726 .addReg(getLdStRegOp(RtNewDest).getReg())
727 .addImm(ImmsLow);
728 } else {
729 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
730 TII->get(getBitExtrOpcode(RtMI)))
731 .addOperand(getLdStRegOp(RtMI))
732 .addReg(getLdStRegOp(RtNewDest).getReg())
733 .addImm(LSBLow)
734 .addImm(ImmsLow);
735 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000736 } else {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000737 // Create the bitfield extract for low bits.
738 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
739 // For unsigned, prefer to use AND for low bits.
740 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
741 TII->get(AArch64::ANDWri))
742 .addOperand(getLdStRegOp(RtMI))
743 .addReg(getLdStRegOp(RtNewDest).getReg())
744 .addImm(ImmsLow);
745 } else {
746 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
747 TII->get(getBitExtrOpcode(RtMI)))
748 .addOperand(getLdStRegOp(RtMI))
749 .addReg(getLdStRegOp(RtNewDest).getReg())
750 .addImm(LSBLow)
751 .addImm(ImmsLow);
752 }
753
754 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000755 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000756 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000757 .addOperand(getLdStRegOp(Rt2MI))
758 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000759 .addImm(LSBHigh)
760 .addImm(ImmsHigh);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000761 }
762 DEBUG(dbgs() << " ");
763 DEBUG((BitExtMI1)->print(dbgs()));
764 DEBUG(dbgs() << " ");
765 DEBUG((BitExtMI2)->print(dbgs()));
766 DEBUG(dbgs() << "\n");
767
768 // Erase the old instructions.
769 I->eraseFromParent();
770 Paired->eraseFromParent();
771 return NextI;
772 }
773
Tim Northover3b0846e2014-05-24 12:50:23 +0000774 // Construct the new instruction.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000775 MachineInstrBuilder MIB;
Chad Rosierb5933d72016-02-09 19:02:12 +0000776 assert(isNarrowStore(Opc) && "Expected narrow store");
777 // Change the scaled offset from small to large type.
778 if (!IsUnscaled) {
779 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
780 OffsetImm /= 2;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000781 }
Chad Rosierb5933d72016-02-09 19:02:12 +0000782 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
783 TII->get(getMatchingWideOpcode(Opc)))
784 .addOperand(getLdStRegOp(I))
785 .addOperand(BaseRegOp)
786 .addImm(OffsetImm)
787 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000788
Tim Northover3b0846e2014-05-24 12:50:23 +0000789 (void)MIB;
790
Chad Rosierb5933d72016-02-09 19:02:12 +0000791 DEBUG(dbgs() << "Creating wider load/store. Replacing instructions:\n ");
792 DEBUG(I->print(dbgs()));
793 DEBUG(dbgs() << " ");
794 DEBUG(Paired->print(dbgs()));
795 DEBUG(dbgs() << " with instruction:\n ");
796 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
797 DEBUG(dbgs() << "\n");
798
799 // Erase the old instructions.
800 I->eraseFromParent();
801 Paired->eraseFromParent();
802 return NextI;
803}
804
805MachineBasicBlock::iterator
806AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
807 MachineBasicBlock::iterator Paired,
808 const LdStPairFlags &Flags) {
809 MachineBasicBlock::iterator NextI = I;
810 ++NextI;
811 // If NextI is the second of the two instructions to be merged, we need
812 // to skip one further. Either way we merge will invalidate the iterator,
813 // and we don't need to scan the new instruction, as it's a pairwise
814 // instruction, which we're not considering for further action anyway.
815 if (NextI == Paired)
816 ++NextI;
817
818 int SExtIdx = Flags.getSExtIdx();
819 unsigned Opc =
820 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
821 bool IsUnscaled = isUnscaledLdSt(Opc);
822 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
823
824 bool MergeForward = Flags.getMergeForward();
825 // Insert our new paired instruction after whichever of the paired
826 // instructions MergeForward indicates.
827 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
828 // Also based on MergeForward is from where we copy the base register operand
829 // so we get the flags compatible with the input code.
830 const MachineOperand &BaseRegOp =
831 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
832
833 // Which register is Rt and which is Rt2 depends on the offset order.
834 MachineInstr *RtMI, *Rt2MI;
835 if (getLdStOffsetOp(I).getImm() ==
836 getLdStOffsetOp(Paired).getImm() + OffsetStride) {
837 RtMI = Paired;
838 Rt2MI = I;
839 // Here we swapped the assumption made for SExtIdx.
840 // I.e., we turn ldp I, Paired into ldp Paired, I.
841 // Update the index accordingly.
842 if (SExtIdx != -1)
843 SExtIdx = (SExtIdx + 1) % 2;
844 } else {
845 RtMI = I;
846 Rt2MI = Paired;
847 }
848 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
849 // Handle Unscaled.
850 if (IsUnscaled)
851 OffsetImm /= OffsetStride;
852
853 // Construct the new instruction.
854 MachineInstrBuilder MIB;
855 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
856 TII->get(getMatchingPairOpcode(Opc)))
857 .addOperand(getLdStRegOp(RtMI))
858 .addOperand(getLdStRegOp(Rt2MI))
859 .addOperand(BaseRegOp)
860 .addImm(OffsetImm);
861 // FIXME: Copy the mem operands from the source instructions. The MI scheduler
862 // needs these to reason about loads/stores.
863
864 (void)MIB;
Tim Northover3b0846e2014-05-24 12:50:23 +0000865
866 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
867 DEBUG(I->print(dbgs()));
868 DEBUG(dbgs() << " ");
869 DEBUG(Paired->print(dbgs()));
870 DEBUG(dbgs() << " with instruction:\n ");
Quentin Colombet66b61632015-03-06 22:42:10 +0000871
872 if (SExtIdx != -1) {
873 // Generate the sign extension for the proper result of the ldp.
874 // I.e., with X1, that would be:
875 // %W1<def> = KILL %W1, %X1<imp-def>
876 // %X1<def> = SBFMXri %X1<kill>, 0, 31
877 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
878 // Right now, DstMO has the extended register, since it comes from an
879 // extended opcode.
880 unsigned DstRegX = DstMO.getReg();
881 // Get the W variant of that register.
882 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
883 // Update the result of LDP to use the W instead of the X variant.
884 DstMO.setReg(DstRegW);
885 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
886 DEBUG(dbgs() << "\n");
887 // Make the machine verifier happy by providing a definition for
888 // the X register.
889 // Insert this definition right after the generated LDP, i.e., before
890 // InsertionPoint.
891 MachineInstrBuilder MIBKill =
892 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
893 TII->get(TargetOpcode::KILL), DstRegW)
894 .addReg(DstRegW)
895 .addReg(DstRegX, RegState::Define);
896 MIBKill->getOperand(2).setImplicit();
897 // Create the sign extension.
898 MachineInstrBuilder MIBSXTW =
899 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
900 TII->get(AArch64::SBFMXri), DstRegX)
901 .addReg(DstRegX)
902 .addImm(0)
903 .addImm(31);
904 (void)MIBSXTW;
905 DEBUG(dbgs() << " Extend operand:\n ");
906 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
907 DEBUG(dbgs() << "\n");
908 } else {
909 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
910 DEBUG(dbgs() << "\n");
911 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000912
913 // Erase the old instructions.
914 I->eraseFromParent();
915 Paired->eraseFromParent();
916
917 return NextI;
918}
919
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000920MachineBasicBlock::iterator
921AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
922 MachineBasicBlock::iterator StoreI) {
923 MachineBasicBlock::iterator NextI = LoadI;
924 ++NextI;
925
926 int LoadSize = getMemScale(LoadI);
927 int StoreSize = getMemScale(StoreI);
928 unsigned LdRt = getLdStRegOp(LoadI).getReg();
929 unsigned StRt = getLdStRegOp(StoreI).getReg();
930 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
931
932 assert((IsStoreXReg ||
933 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
934 "Unexpected RegClass");
935
936 MachineInstr *BitExtMI;
937 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
938 // Remove the load, if the destination register of the loads is the same
939 // register for stored value.
940 if (StRt == LdRt && LoadSize == 8) {
941 DEBUG(dbgs() << "Remove load instruction:\n ");
942 DEBUG(LoadI->print(dbgs()));
943 DEBUG(dbgs() << "\n");
944 LoadI->eraseFromParent();
945 return NextI;
946 }
947 // Replace the load with a mov if the load and store are in the same size.
948 BitExtMI =
949 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
950 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
951 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
952 .addReg(StRt)
953 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
954 } else {
955 // FIXME: Currently we disable this transformation in big-endian targets as
956 // performance and correctness are verified only in little-endian.
957 if (!Subtarget->isLittleEndian())
958 return NextI;
959 bool IsUnscaled = isUnscaledLdSt(LoadI);
960 assert(IsUnscaled == isUnscaledLdSt(StoreI) && "Unsupported ld/st match");
961 assert(LoadSize <= StoreSize && "Invalid load size");
962 int UnscaledLdOffset = IsUnscaled
963 ? getLdStOffsetOp(LoadI).getImm()
964 : getLdStOffsetOp(LoadI).getImm() * LoadSize;
965 int UnscaledStOffset = IsUnscaled
966 ? getLdStOffsetOp(StoreI).getImm()
967 : getLdStOffsetOp(StoreI).getImm() * StoreSize;
968 int Width = LoadSize * 8;
969 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
970 int Imms = Immr + Width - 1;
971 unsigned DestReg = IsStoreXReg
972 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
973 &AArch64::GPR64RegClass)
974 : LdRt;
975
976 assert((UnscaledLdOffset >= UnscaledStOffset &&
977 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
978 "Invalid offset");
979
980 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
981 Imms = Immr + Width - 1;
982 if (UnscaledLdOffset == UnscaledStOffset) {
983 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
984 | ((Immr) << 6) // immr
985 | ((Imms) << 0) // imms
986 ;
987
988 BitExtMI =
989 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
990 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
991 DestReg)
992 .addReg(StRt)
993 .addImm(AndMaskEncoded);
994 } else {
995 BitExtMI =
996 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
997 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
998 DestReg)
999 .addReg(StRt)
1000 .addImm(Immr)
1001 .addImm(Imms);
1002 }
1003 }
1004
1005 DEBUG(dbgs() << "Promoting load by replacing :\n ");
1006 DEBUG(StoreI->print(dbgs()));
1007 DEBUG(dbgs() << " ");
1008 DEBUG(LoadI->print(dbgs()));
1009 DEBUG(dbgs() << " with instructions:\n ");
1010 DEBUG(StoreI->print(dbgs()));
1011 DEBUG(dbgs() << " ");
1012 DEBUG((BitExtMI)->print(dbgs()));
1013 DEBUG(dbgs() << "\n");
1014
1015 // Erase the old instructions.
1016 LoadI->eraseFromParent();
1017 return NextI;
1018}
1019
Tim Northover3b0846e2014-05-24 12:50:23 +00001020/// trackRegDefsUses - Remember what registers the specified instruction uses
1021/// and modifies.
Pete Cooper7be8f8f2015-08-03 19:04:32 +00001022static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
Tim Northover3b0846e2014-05-24 12:50:23 +00001023 BitVector &UsedRegs,
1024 const TargetRegisterInfo *TRI) {
Pete Cooper7be8f8f2015-08-03 19:04:32 +00001025 for (const MachineOperand &MO : MI->operands()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001026 if (MO.isRegMask())
1027 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
1028
1029 if (!MO.isReg())
1030 continue;
1031 unsigned Reg = MO.getReg();
1032 if (MO.isDef()) {
1033 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1034 ModifiedRegs.set(*AI);
1035 } else {
1036 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
1037 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1038 UsedRegs.set(*AI);
1039 }
1040 }
1041}
1042
1043static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
Chad Rosier3dd0e942015-08-18 16:20:03 +00001044 // Convert the byte-offset used by unscaled into an "element" offset used
1045 // by the scaled pair load/store instructions.
Renato Golin6274e522016-02-05 12:14:30 +00001046 if (IsUnscaled)
Chad Rosier3dd0e942015-08-18 16:20:03 +00001047 Offset /= OffsetStride;
Renato Golin6274e522016-02-05 12:14:30 +00001048
Chad Rosier3dd0e942015-08-18 16:20:03 +00001049 return Offset <= 63 && Offset >= -64;
Tim Northover3b0846e2014-05-24 12:50:23 +00001050}
1051
1052// Do alignment, specialized to power of 2 and for signed ints,
1053// avoiding having to do a C-style cast from uint_64t to int when
Rui Ueyamada00f2f2016-01-14 21:06:47 +00001054// using alignTo from include/llvm/Support/MathExtras.h.
Tim Northover3b0846e2014-05-24 12:50:23 +00001055// FIXME: Move this function to include/MathExtras.h?
1056static int alignTo(int Num, int PowOf2) {
1057 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1058}
1059
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001060static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
1061 const AArch64InstrInfo *TII) {
1062 // One of the instructions must modify memory.
1063 if (!MIa->mayStore() && !MIb->mayStore())
1064 return false;
1065
1066 // Both instructions must be memory operations.
1067 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
1068 return false;
1069
1070 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
1071}
1072
1073static bool mayAlias(MachineInstr *MIa,
1074 SmallVectorImpl<MachineInstr *> &MemInsns,
1075 const AArch64InstrInfo *TII) {
1076 for (auto &MIb : MemInsns)
1077 if (mayAlias(MIa, MIb, TII))
1078 return true;
1079
1080 return false;
1081}
1082
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001083bool AArch64LoadStoreOpt::findMatchingStore(
1084 MachineBasicBlock::iterator I, unsigned Limit,
1085 MachineBasicBlock::iterator &StoreI) {
1086 MachineBasicBlock::iterator E = I->getParent()->begin();
1087 MachineBasicBlock::iterator MBBI = I;
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001088 MachineInstr *LoadMI = I;
1089 unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001090
1091 // Track which registers have been modified and used between the first insn
1092 // and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001093 ModifiedRegs.reset();
1094 UsedRegs.reset();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001095
Chad Rosier1142f3c2016-02-02 15:22:55 +00001096 // FIXME: We miss the case where the matching store is the first instruction
1097 // in the basic block.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001098 for (unsigned Count = 0; MBBI != E && Count < Limit;) {
1099 --MBBI;
1100 MachineInstr *MI = MBBI;
1101 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1102 // optimization by changing how far we scan.
1103 if (MI->isDebugValue())
1104 continue;
1105 // Now that we know this is a real instruction, count it.
1106 ++Count;
1107
1108 // If the load instruction reads directly from the address to which the
1109 // store instruction writes and the stored value is not modified, we can
1110 // promote the load. Since we do not handle stores with pre-/post-index,
1111 // it's unnecessary to check if BaseReg is modified by the store itself.
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001112 if (MI->mayStore() && isMatchingStore(LoadMI, MI) &&
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001113 BaseReg == getLdStBaseOp(MI).getReg() &&
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001114 isLdOffsetInRangeOfSt(LoadMI, MI) &&
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001115 !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
1116 StoreI = MBBI;
1117 return true;
1118 }
1119
1120 if (MI->isCall())
1121 return false;
1122
1123 // Update modified / uses register lists.
1124 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1125
1126 // Otherwise, if the base register is modified, we have no match, so
1127 // return early.
1128 if (ModifiedRegs[BaseReg])
1129 return false;
1130
1131 // If we encounter a store aliased with the load, return early.
Chad Rosier5c6a66c2016-02-09 15:59:57 +00001132 if (MI->mayStore() && mayAlias(LoadMI, MI, TII))
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001133 return false;
1134 }
1135 return false;
1136}
1137
Tim Northover3b0846e2014-05-24 12:50:23 +00001138/// findMatchingInsn - Scan the instructions looking for a load/store that can
1139/// be combined with the current instruction into a load/store pair.
1140MachineBasicBlock::iterator
1141AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001142 LdStPairFlags &Flags, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001143 MachineBasicBlock::iterator E = I->getParent()->end();
1144 MachineBasicBlock::iterator MBBI = I;
1145 MachineInstr *FirstMI = I;
1146 ++MBBI;
1147
Matthias Braunfa3872e2015-05-18 20:27:55 +00001148 unsigned Opc = FirstMI->getOpcode();
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +00001149 bool MayLoad = FirstMI->mayLoad();
Chad Rosier22eb7102015-08-06 17:37:18 +00001150 bool IsUnscaled = isUnscaledLdSt(FirstMI);
Chad Rosierf77e9092015-08-06 15:50:12 +00001151 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1152 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1153 int Offset = getLdStOffsetOp(FirstMI).getImm();
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001154 bool IsNarrowStore = isNarrowStore(Opc);
1155
1156 // For narrow stores, find only the case where the stored value is WZR.
1157 if (IsNarrowStore && Reg != AArch64::WZR)
1158 return E;
Tim Northover3b0846e2014-05-24 12:50:23 +00001159
1160 // Early exit if the first instruction modifies the base register.
1161 // e.g., ldr x0, [x0]
Tim Northover3b0846e2014-05-24 12:50:23 +00001162 if (FirstMI->modifiesRegister(BaseReg, TRI))
1163 return E;
Chad Rosiercaed6db2015-08-10 17:17:19 +00001164
Jun Bum Lim1de2d442016-02-05 20:02:03 +00001165 // Early exit if the offset is not possible to match. (6 bits of positive
Chad Rosiercaed6db2015-08-10 17:17:19 +00001166 // range, plus allow an extra one in case we find a later insn that matches
1167 // with Offset-1)
Chad Rosierf11d0402015-10-01 18:17:12 +00001168 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001169 if (!(isNarrowLoad(Opc) || IsNarrowStore) &&
1170 !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
Tim Northover3b0846e2014-05-24 12:50:23 +00001171 return E;
1172
1173 // Track which registers have been modified and used between the first insn
1174 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001175 ModifiedRegs.reset();
1176 UsedRegs.reset();
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001177
1178 // Remember any instructions that read/write memory between FirstMI and MI.
1179 SmallVector<MachineInstr *, 4> MemInsns;
1180
Tim Northover3b0846e2014-05-24 12:50:23 +00001181 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1182 MachineInstr *MI = MBBI;
1183 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1184 // optimization by changing how far we scan.
1185 if (MI->isDebugValue())
1186 continue;
1187
1188 // Now that we know this is a real instruction, count it.
1189 ++Count;
1190
Renato Golin6274e522016-02-05 12:14:30 +00001191 bool CanMergeOpc = Opc == MI->getOpcode();
Chad Rosier18896c02016-02-04 16:01:40 +00001192 Flags.setSExtIdx(-1);
Renato Golin6274e522016-02-05 12:14:30 +00001193 if (!CanMergeOpc) {
1194 bool IsValidLdStrOpc;
1195 unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
1196 assert(IsValidLdStrOpc &&
1197 "Given Opc should be a Load or Store with an immediate");
1198 // Opc will be the first instruction in the pair.
1199 Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
1200 CanMergeOpc = NonSExtOpc == getMatchingNonSExtOpcode(MI->getOpcode());
1201 }
1202
1203 if (CanMergeOpc && getLdStOffsetOp(MI).isImm()) {
Chad Rosierc56a9132015-08-10 18:42:45 +00001204 assert(MI->mayLoadOrStore() && "Expected memory operation.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001205 // If we've found another instruction with the same opcode, check to see
1206 // if the base and offset are compatible with our starting instruction.
1207 // These instructions all have scaled immediate operands, so we just
1208 // check for +1/-1. Make sure to check the new instruction offset is
1209 // actually an immediate and not a symbolic reference destined for
1210 // a relocation.
1211 //
1212 // Pairwise instructions have a 7-bit signed offset field. Single insns
1213 // have a 12-bit unsigned offset field. To be a valid combine, the
1214 // final offset must be in range.
Chad Rosierf77e9092015-08-06 15:50:12 +00001215 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1216 int MIOffset = getLdStOffsetOp(MI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001217 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1218 (Offset + OffsetStride == MIOffset))) {
1219 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1220 // If this is a volatile load/store that otherwise matched, stop looking
1221 // as something is going on that we don't have enough information to
1222 // safely transform. Similarly, stop if we see a hint to avoid pairs.
1223 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1224 return E;
1225 // If the resultant immediate offset of merging these instructions
1226 // is out of range for a pairwise instruction, bail and keep looking.
Renato Golin6274e522016-02-05 12:14:30 +00001227 bool MIIsUnscaled = isUnscaledLdSt(MI);
Jun Bum Limc12c2792015-11-19 18:41:27 +00001228 bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
1229 if (!IsNarrowLoad &&
Renato Golin6274e522016-02-05 12:14:30 +00001230 !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001231 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001232 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001233 continue;
1234 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001235
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001236 if (IsNarrowLoad || IsNarrowStore) {
1237 // If the alignment requirements of the scaled wide load/store
1238 // instruction can't express the offset of the scaled narrow
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001239 // input, bail and keep looking.
1240 if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
1241 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1242 MemInsns.push_back(MI);
1243 continue;
1244 }
1245 } else {
1246 // If the alignment requirements of the paired (scaled) instruction
1247 // can't express the offset of the unscaled input, bail and keep
1248 // looking.
1249 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1250 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1251 MemInsns.push_back(MI);
1252 continue;
1253 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001254 }
1255 // If the destination register of the loads is the same register, bail
1256 // and keep looking. A load-pair instruction with both destination
1257 // registers the same is UNPREDICTABLE and will result in an exception.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001258 // For narrow stores, allow only when the stored value is the same
1259 // (i.e., WZR).
1260 if ((MayLoad && Reg == getLdStRegOp(MI).getReg()) ||
1261 (IsNarrowStore && Reg != getLdStRegOp(MI).getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001262 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001263 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001264 continue;
1265 }
1266
1267 // If the Rt of the second instruction was not modified or used between
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001268 // the two instructions and none of the instructions between the second
1269 // and first alias with the second, we can combine the second into the
1270 // first.
Chad Rosierf77e9092015-08-06 15:50:12 +00001271 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1272 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001273 !mayAlias(MI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001274 Flags.setMergeForward(false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001275 return MBBI;
1276 }
1277
1278 // Likewise, if the Rt of the first instruction is not modified or used
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001279 // between the two instructions and none of the instructions between the
1280 // first and the second alias with the first, we can combine the first
1281 // into the second.
Chad Rosierf77e9092015-08-06 15:50:12 +00001282 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
Chad Rosier5f668e12015-09-03 14:19:43 +00001283 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001284 !mayAlias(FirstMI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001285 Flags.setMergeForward(true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001286 return MBBI;
1287 }
1288 // Unable to combine these instructions due to interference in between.
1289 // Keep looking.
1290 }
1291 }
1292
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001293 // If the instruction wasn't a matching load or store. Stop searching if we
1294 // encounter a call instruction that might modify memory.
1295 if (MI->isCall())
Tim Northover3b0846e2014-05-24 12:50:23 +00001296 return E;
1297
1298 // Update modified / uses register lists.
1299 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1300
1301 // Otherwise, if the base register is modified, we have no match, so
1302 // return early.
1303 if (ModifiedRegs[BaseReg])
1304 return E;
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001305
1306 // Update list of instructions that read/write memory.
1307 if (MI->mayLoadOrStore())
1308 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001309 }
1310 return E;
1311}
1312
1313MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +00001314AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1315 MachineBasicBlock::iterator Update,
1316 bool IsPreIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001317 assert((Update->getOpcode() == AArch64::ADDXri ||
1318 Update->getOpcode() == AArch64::SUBXri) &&
1319 "Unexpected base register update instruction to merge!");
1320 MachineBasicBlock::iterator NextI = I;
1321 // Return the instruction following the merged instruction, which is
1322 // the instruction following our unmerged load. Unless that's the add/sub
1323 // instruction we're merging, in which case it's the one after that.
1324 if (++NextI == Update)
1325 ++NextI;
1326
1327 int Value = Update->getOperand(2).getImm();
1328 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
Chad Rosier2dfd3542015-09-23 13:51:44 +00001329 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
Tim Northover3b0846e2014-05-24 12:50:23 +00001330 if (Update->getOpcode() == AArch64::SUBXri)
1331 Value = -Value;
1332
Chad Rosier2dfd3542015-09-23 13:51:44 +00001333 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1334 : getPostIndexedOpcode(I->getOpcode());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001335 MachineInstrBuilder MIB;
1336 if (!isPairedLdSt(I)) {
1337 // Non-paired instruction.
1338 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1339 .addOperand(getLdStRegOp(Update))
1340 .addOperand(getLdStRegOp(I))
1341 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001342 .addImm(Value)
1343 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001344 } else {
1345 // Paired instruction.
Chad Rosier32d4d372015-09-29 16:07:32 +00001346 int Scale = getMemScale(I);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001347 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1348 .addOperand(getLdStRegOp(Update))
1349 .addOperand(getLdStRegOp(I, 0))
1350 .addOperand(getLdStRegOp(I, 1))
1351 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001352 .addImm(Value / Scale)
1353 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001354 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001355 (void)MIB;
1356
Chad Rosier2dfd3542015-09-23 13:51:44 +00001357 if (IsPreIdx)
1358 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1359 else
1360 DEBUG(dbgs() << "Creating post-indexed load/store.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001361 DEBUG(dbgs() << " Replacing instructions:\n ");
1362 DEBUG(I->print(dbgs()));
1363 DEBUG(dbgs() << " ");
1364 DEBUG(Update->print(dbgs()));
1365 DEBUG(dbgs() << " with instruction:\n ");
1366 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1367 DEBUG(dbgs() << "\n");
1368
1369 // Erase the old instructions for the block.
1370 I->eraseFromParent();
1371 Update->eraseFromParent();
1372
1373 return NextI;
1374}
1375
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001376bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
1377 MachineInstr *MI,
1378 unsigned BaseReg, int Offset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001379 switch (MI->getOpcode()) {
1380 default:
1381 break;
1382 case AArch64::SUBXri:
1383 // Negate the offset for a SUB instruction.
1384 Offset *= -1;
1385 // FALLTHROUGH
1386 case AArch64::ADDXri:
1387 // Make sure it's a vanilla immediate operand, not a relocation or
1388 // anything else we can't handle.
1389 if (!MI->getOperand(2).isImm())
1390 break;
1391 // Watch out for 1 << 12 shifted value.
1392 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
1393 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001394
1395 // The update instruction source and destination register must be the
1396 // same as the load/store base register.
1397 if (MI->getOperand(0).getReg() != BaseReg ||
1398 MI->getOperand(1).getReg() != BaseReg)
1399 break;
1400
1401 bool IsPairedInsn = isPairedLdSt(MemMI);
1402 int UpdateOffset = MI->getOperand(2).getImm();
1403 // For non-paired load/store instructions, the immediate must fit in a
1404 // signed 9-bit integer.
1405 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1406 break;
1407
1408 // For paired load/store instructions, the immediate must be a multiple of
1409 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1410 // integer.
1411 if (IsPairedInsn) {
Chad Rosier32d4d372015-09-29 16:07:32 +00001412 int Scale = getMemScale(MemMI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001413 if (UpdateOffset % Scale != 0)
1414 break;
1415
1416 int ScaledOffset = UpdateOffset / Scale;
1417 if (ScaledOffset > 64 || ScaledOffset < -64)
1418 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001419 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001420
1421 // If we have a non-zero Offset, we check that it matches the amount
1422 // we're adding to the register.
1423 if (!Offset || Offset == MI->getOperand(2).getImm())
1424 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001425 break;
1426 }
1427 return false;
1428}
1429
1430MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001431 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001432 MachineBasicBlock::iterator E = I->getParent()->end();
1433 MachineInstr *MemMI = I;
1434 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001435
Chad Rosierf77e9092015-08-06 15:50:12 +00001436 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001437 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001438
Chad Rosierb7c5b912015-10-01 13:43:05 +00001439 // Scan forward looking for post-index opportunities. Updating instructions
1440 // can't be formed if the memory instruction doesn't have the offset we're
1441 // looking for.
1442 if (MIUnscaledOffset != UnscaledOffset)
1443 return E;
1444
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001445 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001446 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001447 bool IsPairedInsn = isPairedLdSt(MemMI);
1448 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1449 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1450 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1451 return E;
1452 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001453
Tim Northover3b0846e2014-05-24 12:50:23 +00001454 // Track which registers have been modified and used between the first insn
1455 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001456 ModifiedRegs.reset();
1457 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001458 ++MBBI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001459 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001460 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001461 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001462 if (MI->isDebugValue())
1463 continue;
1464
Chad Rosier35706ad2016-02-04 21:26:02 +00001465 // Now that we know this is a real instruction, count it.
1466 ++Count;
1467
Tim Northover3b0846e2014-05-24 12:50:23 +00001468 // If we found a match, return it.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001469 if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001470 return MBBI;
1471
1472 // Update the status of what the instruction clobbered and used.
1473 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1474
1475 // Otherwise, if the base register is used or modified, we have no match, so
1476 // return early.
1477 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1478 return E;
1479 }
1480 return E;
1481}
1482
1483MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001484 MachineBasicBlock::iterator I, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001485 MachineBasicBlock::iterator B = I->getParent()->begin();
1486 MachineBasicBlock::iterator E = I->getParent()->end();
1487 MachineInstr *MemMI = I;
1488 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001489
Chad Rosierf77e9092015-08-06 15:50:12 +00001490 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1491 int Offset = getLdStOffsetOp(MemMI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001492
1493 // If the load/store is the first instruction in the block, there's obviously
1494 // not any matching update. Ditto if the memory offset isn't zero.
1495 if (MBBI == B || Offset != 0)
1496 return E;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001497 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001498 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001499 bool IsPairedInsn = isPairedLdSt(MemMI);
1500 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1501 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1502 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1503 return E;
1504 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001505
1506 // Track which registers have been modified and used between the first insn
1507 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001508 ModifiedRegs.reset();
1509 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001510 --MBBI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001511 for (unsigned Count = 0; MBBI != B && Count < Limit; --MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001512 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001513 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001514 if (MI->isDebugValue())
1515 continue;
1516
Chad Rosier35706ad2016-02-04 21:26:02 +00001517 // Now that we know this is a real instruction, count it.
1518 ++Count;
1519
Tim Northover3b0846e2014-05-24 12:50:23 +00001520 // If we found a match, return it.
Chad Rosier11c825f2015-09-30 19:44:40 +00001521 if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001522 return MBBI;
1523
1524 // Update the status of what the instruction clobbered and used.
1525 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1526
1527 // Otherwise, if the base register is used or modified, we have no match, so
1528 // return early.
1529 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1530 return E;
1531 }
1532 return E;
1533}
1534
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001535bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1536 MachineBasicBlock::iterator &MBBI) {
1537 MachineInstr *MI = MBBI;
1538 // If this is a volatile load, don't mess with it.
1539 if (MI->hasOrderedMemoryRef())
1540 return false;
1541
1542 // Make sure this is a reg+imm.
1543 // FIXME: It is possible to extend it to handle reg+reg cases.
1544 if (!getLdStOffsetOp(MI).isImm())
1545 return false;
1546
Chad Rosier35706ad2016-02-04 21:26:02 +00001547 // Look backward up to LdStLimit instructions.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001548 MachineBasicBlock::iterator StoreI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001549 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001550 ++NumLoadsFromStoresPromoted;
1551 // Promote the load. Keeping the iterator straight is a
1552 // pain, so we let the merge routine tell us what the next instruction
1553 // is after it's done mucking about.
1554 MBBI = promoteLoadFromStore(MBBI, StoreI);
1555 return true;
1556 }
1557 return false;
1558}
1559
Chad Rosier24c46ad2016-02-09 18:10:20 +00001560bool AArch64LoadStoreOpt::isCandidateToMergeOrPair(MachineInstr *MI) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001561 // If this is a volatile load/store, don't mess with it.
1562 if (MI->hasOrderedMemoryRef())
1563 return false;
1564
1565 // Make sure this is a reg+imm (as opposed to an address reloc).
1566 if (!getLdStOffsetOp(MI).isImm())
1567 return false;
1568
1569 // Check if this load/store has a hint to avoid pair formation.
1570 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1571 if (TII->isLdStPairSuppressed(MI))
1572 return false;
1573
Chad Rosier24c46ad2016-02-09 18:10:20 +00001574 return true;
1575}
1576
1577// Find narrow loads that can be converted into a single wider load with
1578// bitfield extract instructions. Also merge adjacent zero stores into a wider
1579// store.
1580bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1581 MachineBasicBlock::iterator &MBBI) {
1582 assert((isNarrowLoad(MBBI) || isNarrowStore(MBBI)) && "Expected narrow op.");
1583 MachineInstr *MI = MBBI;
1584 MachineBasicBlock::iterator E = MI->getParent()->end();
1585
1586 if (!isCandidateToMergeOrPair(MI))
1587 return false;
1588
1589 // Look ahead up to LdStLimit instructions for a mergable instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001590 LdStPairFlags Flags;
Chad Rosier35706ad2016-02-04 21:26:02 +00001591 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit);
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001592 if (Paired != E) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001593 if (isNarrowLoad(MI)) {
1594 ++NumNarrowLoadsPromoted;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001595 } else if (isNarrowStore(MI)) {
1596 ++NumZeroStoresPromoted;
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001597 }
Chad Rosier24c46ad2016-02-09 18:10:20 +00001598 // Keeping the iterator straight is a pain, so we let the merge routine tell
1599 // us what the next instruction is after it's done mucking about.
Chad Rosierb5933d72016-02-09 19:02:12 +00001600 MBBI = mergeNarrowInsns(MBBI, Paired, Flags);
Chad Rosier24c46ad2016-02-09 18:10:20 +00001601 return true;
1602 }
1603 return false;
1604}
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001605
Chad Rosier24c46ad2016-02-09 18:10:20 +00001606// Find loads and stores that can be merged into a single load or store pair
1607// instruction.
1608bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
1609 MachineInstr *MI = MBBI;
1610 MachineBasicBlock::iterator E = MI->getParent()->end();
1611
1612 if (!isCandidateToMergeOrPair(MI))
1613 return false;
1614
1615 // Look ahead up to LdStLimit instructions for a pairable instruction.
1616 LdStPairFlags Flags;
1617 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit);
1618 if (Paired != E) {
1619 ++NumPairCreated;
1620 if (isUnscaledLdSt(MI))
1621 ++NumUnscaledPairCreated;
1622 // Keeping the iterator straight is a pain, so we let the merge routine tell
1623 // us what the next instruction is after it's done mucking about.
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001624 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1625 return true;
1626 }
1627 return false;
1628}
1629
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001630bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1631 bool enableNarrowLdOpt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001632 bool Modified = false;
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001633 // Four tranformations to do here:
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001634 // 1) Find loads that directly read from stores and promote them by
1635 // replacing with mov instructions. If the store is wider than the load,
1636 // the load will be replaced with a bitfield extract.
1637 // e.g.,
1638 // str w1, [x0, #4]
1639 // ldrh w2, [x0, #6]
1640 // ; becomes
1641 // str w1, [x0, #4]
1642 // lsr w2, w1, #16
Tim Northover3b0846e2014-05-24 12:50:23 +00001643 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001644 MBBI != E;) {
1645 MachineInstr *MI = MBBI;
1646 switch (MI->getOpcode()) {
1647 default:
1648 // Just move on to the next instruction.
1649 ++MBBI;
1650 break;
1651 // Scaled instructions.
1652 case AArch64::LDRBBui:
1653 case AArch64::LDRHHui:
1654 case AArch64::LDRWui:
1655 case AArch64::LDRXui:
1656 // Unscaled instructions.
1657 case AArch64::LDURBBi:
1658 case AArch64::LDURHHi:
1659 case AArch64::LDURWi:
1660 case AArch64::LDURXi: {
1661 if (tryToPromoteLoadFromStore(MBBI)) {
1662 Modified = true;
1663 break;
1664 }
1665 ++MBBI;
1666 break;
1667 }
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001668 }
1669 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001670 // 2) Find narrow loads that can be converted into a single wider load
1671 // with bitfield extract instructions.
1672 // e.g.,
1673 // ldrh w0, [x2]
1674 // ldrh w1, [x2, #2]
1675 // ; becomes
1676 // ldr w0, [x2]
1677 // ubfx w1, w0, #16, #16
1678 // and w0, w0, #ffff
Jun Bum Lim1de2d442016-02-05 20:02:03 +00001679 //
1680 // Also merge adjacent zero stores into a wider store.
1681 // e.g.,
1682 // strh wzr, [x0]
1683 // strh wzr, [x0, #2]
1684 // ; becomes
1685 // str wzr, [x0]
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001686 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001687 enableNarrowLdOpt && MBBI != E;) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001688 MachineInstr *MI = MBBI;
1689 switch (MI->getOpcode()) {
1690 default:
1691 // Just move on to the next instruction.
1692 ++MBBI;
1693 break;
1694 // Scaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001695 case AArch64::LDRBBui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001696 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001697 case AArch64::LDRSBWui:
1698 case AArch64::LDRSHWui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001699 case AArch64::STRBBui:
1700 case AArch64::STRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001701 // Unscaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001702 case AArch64::LDURBBi:
1703 case AArch64::LDURHHi:
1704 case AArch64::LDURSBWi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001705 case AArch64::LDURSHWi:
1706 case AArch64::STURBBi:
1707 case AArch64::STURHHi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001708 if (tryToMergeLdStInst(MBBI)) {
1709 Modified = true;
1710 break;
1711 }
1712 ++MBBI;
1713 break;
1714 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001715 }
1716 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001717 // 3) Find loads and stores that can be merged into a single load or store
1718 // pair instruction.
1719 // e.g.,
1720 // ldr x0, [x2]
1721 // ldr x1, [x2, #8]
1722 // ; becomes
1723 // ldp x0, x1, [x2]
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001724 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Tim Northover3b0846e2014-05-24 12:50:23 +00001725 MBBI != E;) {
1726 MachineInstr *MI = MBBI;
1727 switch (MI->getOpcode()) {
1728 default:
1729 // Just move on to the next instruction.
1730 ++MBBI;
1731 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001732 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001733 case AArch64::STRSui:
1734 case AArch64::STRDui:
1735 case AArch64::STRQui:
1736 case AArch64::STRXui:
1737 case AArch64::STRWui:
1738 case AArch64::LDRSui:
1739 case AArch64::LDRDui:
1740 case AArch64::LDRQui:
1741 case AArch64::LDRXui:
1742 case AArch64::LDRWui:
Quentin Colombet29f55332015-01-24 01:25:54 +00001743 case AArch64::LDRSWui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001744 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001745 case AArch64::STURSi:
1746 case AArch64::STURDi:
1747 case AArch64::STURQi:
1748 case AArch64::STURWi:
1749 case AArch64::STURXi:
1750 case AArch64::LDURSi:
1751 case AArch64::LDURDi:
1752 case AArch64::LDURQi:
1753 case AArch64::LDURWi:
Quentin Colombet29f55332015-01-24 01:25:54 +00001754 case AArch64::LDURXi:
1755 case AArch64::LDURSWi: {
Chad Rosier24c46ad2016-02-09 18:10:20 +00001756 if (tryToPairLdStInst(MBBI)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001757 Modified = true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001758 break;
1759 }
1760 ++MBBI;
1761 break;
1762 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001763 }
1764 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001765 // 4) Find base register updates that can be merged into the load or store
1766 // as a base-reg writeback.
1767 // e.g.,
1768 // ldr x0, [x2]
1769 // add x2, x2, #4
1770 // ; becomes
1771 // ldr x0, [x2], #4
Tim Northover3b0846e2014-05-24 12:50:23 +00001772 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1773 MBBI != E;) {
1774 MachineInstr *MI = MBBI;
1775 // Do update merging. It's simpler to keep this separate from the above
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001776 // switchs, though not strictly necessary.
Matthias Braunfa3872e2015-05-18 20:27:55 +00001777 unsigned Opc = MI->getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +00001778 switch (Opc) {
1779 default:
1780 // Just move on to the next instruction.
1781 ++MBBI;
1782 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001783 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001784 case AArch64::STRSui:
1785 case AArch64::STRDui:
1786 case AArch64::STRQui:
1787 case AArch64::STRXui:
1788 case AArch64::STRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001789 case AArch64::STRHHui:
1790 case AArch64::STRBBui:
Tim Northover3b0846e2014-05-24 12:50:23 +00001791 case AArch64::LDRSui:
1792 case AArch64::LDRDui:
1793 case AArch64::LDRQui:
1794 case AArch64::LDRXui:
1795 case AArch64::LDRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001796 case AArch64::LDRHHui:
1797 case AArch64::LDRBBui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001798 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001799 case AArch64::STURSi:
1800 case AArch64::STURDi:
1801 case AArch64::STURQi:
1802 case AArch64::STURWi:
1803 case AArch64::STURXi:
1804 case AArch64::LDURSi:
1805 case AArch64::LDURDi:
1806 case AArch64::LDURQi:
1807 case AArch64::LDURWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001808 case AArch64::LDURXi:
1809 // Paired instructions.
1810 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +00001811 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001812 case AArch64::LDPDi:
1813 case AArch64::LDPQi:
1814 case AArch64::LDPWi:
1815 case AArch64::LDPXi:
1816 case AArch64::STPSi:
1817 case AArch64::STPDi:
1818 case AArch64::STPQi:
1819 case AArch64::STPWi:
1820 case AArch64::STPXi: {
Tim Northover3b0846e2014-05-24 12:50:23 +00001821 // Make sure this is a reg+imm (as opposed to an address reloc).
Chad Rosierf77e9092015-08-06 15:50:12 +00001822 if (!getLdStOffsetOp(MI).isImm()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001823 ++MBBI;
1824 break;
1825 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001826 // Look forward to try to form a post-index instruction. For example,
1827 // ldr x0, [x20]
1828 // add x20, x20, #32
1829 // merged into:
1830 // ldr x0, [x20], #32
Tim Northover3b0846e2014-05-24 12:50:23 +00001831 MachineBasicBlock::iterator Update =
Chad Rosier35706ad2016-02-04 21:26:02 +00001832 findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001833 if (Update != E) {
1834 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001835 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001836 Modified = true;
1837 ++NumPostFolded;
1838 break;
1839 }
1840 // Don't know how to handle pre/post-index versions, so move to the next
1841 // instruction.
Chad Rosier22eb7102015-08-06 17:37:18 +00001842 if (isUnscaledLdSt(Opc)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001843 ++MBBI;
1844 break;
1845 }
1846
1847 // Look back to try to find a pre-index instruction. For example,
1848 // add x0, x0, #8
1849 // ldr x1, [x0]
1850 // merged into:
1851 // ldr x1, [x0, #8]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001852 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001853 if (Update != E) {
1854 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001855 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001856 Modified = true;
1857 ++NumPreFolded;
1858 break;
1859 }
Chad Rosier7a83d772015-10-01 13:09:44 +00001860 // The immediate in the load/store is scaled by the size of the memory
1861 // operation. The immediate in the add we're looking for,
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001862 // however, is not, so adjust here.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001863 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001864
Tim Northover3b0846e2014-05-24 12:50:23 +00001865 // Look forward to try to find a post-index instruction. For example,
1866 // ldr x1, [x0, #64]
1867 // add x0, x0, #64
1868 // merged into:
1869 // ldr x1, [x0, #64]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001870 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001871 if (Update != E) {
1872 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001873 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001874 Modified = true;
1875 ++NumPreFolded;
1876 break;
1877 }
1878
1879 // Nothing found. Just move to the next instruction.
1880 ++MBBI;
1881 break;
1882 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001883 }
1884 }
1885
1886 return Modified;
1887}
1888
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001889bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001890 bool ProfitableArch = Subtarget->isCortexA57();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001891 // FIXME: The benefit from converting narrow loads into a wider load could be
1892 // microarchitectural as it assumes that a single load with two bitfield
1893 // extracts is cheaper than two narrow loads. Currently, this conversion is
1894 // enabled only in cortex-a57 on which performance benefits were verified.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001895 return ProfitableArch && !Subtarget->requiresStrictAlign();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001896}
1897
Tim Northover3b0846e2014-05-24 12:50:23 +00001898bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
Oliver Stannardd414c992015-11-10 11:04:18 +00001899 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1900 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1901 TRI = Subtarget->getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001902
Chad Rosierbba881e2016-02-02 15:02:30 +00001903 // Resize the modified and used register bitfield trackers. We do this once
1904 // per function and then clear the bitfield each time we optimize a load or
1905 // store.
1906 ModifiedRegs.resize(TRI->getNumRegs());
1907 UsedRegs.resize(TRI->getNumRegs());
1908
Tim Northover3b0846e2014-05-24 12:50:23 +00001909 bool Modified = false;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001910 bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
Tim Northover3b0846e2014-05-24 12:50:23 +00001911 for (auto &MBB : Fn)
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001912 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001913
1914 return Modified;
1915}
1916
1917// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1918// loads and stores near one another?
1919
Chad Rosier43f5c842015-08-05 12:40:13 +00001920/// createAArch64LoadStoreOptimizationPass - returns an instance of the
1921/// load / store optimization pass.
Tim Northover3b0846e2014-05-24 12:50:23 +00001922FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1923 return new AArch64LoadStoreOpt();
1924}