blob: 6b835bebab2656e50d6ec488dcabeb27356f3852 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that performs load / store related peephole
11// optimizations. This pass should be run after register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AArch64InstrInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000016#include "AArch64Subtarget.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000017#include "MCTargetDesc/AArch64AddressingModes.h"
18#include "llvm/ADT/BitVector.h"
Chad Rosierce8e5ab2015-05-21 21:36:46 +000019#include "llvm/ADT/SmallVector.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000020#include "llvm/ADT/Statistic.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000021#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/ErrorHandling.h"
28#include "llvm/Support/raw_ostream.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000029#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Target/TargetRegisterInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000032using namespace llvm;
33
34#define DEBUG_TYPE "aarch64-ldst-opt"
35
36/// AArch64AllocLoadStoreOpt - Post-register allocation pass to combine
37/// load / store instructions to form ldp / stp instructions.
38
39STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
40STATISTIC(NumPostFolded, "Number of post-index updates folded");
41STATISTIC(NumPreFolded, "Number of pre-index updates folded");
42STATISTIC(NumUnscaledPairCreated,
43 "Number of load/store from unscaled generated");
Jun Bum Limc12c2792015-11-19 18:41:27 +000044STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
Tim Northover3b0846e2014-05-24 12:50:23 +000045
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +000046static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
47 cl::init(20), cl::Hidden);
Tim Northover3b0846e2014-05-24 12:50:23 +000048
Chad Rosier96530b32015-08-05 13:44:51 +000049namespace llvm {
50void initializeAArch64LoadStoreOptPass(PassRegistry &);
51}
52
53#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
54
Tim Northover3b0846e2014-05-24 12:50:23 +000055namespace {
Chad Rosier96a18a92015-07-21 17:42:04 +000056
57typedef struct LdStPairFlags {
58 // If a matching instruction is found, MergeForward is set to true if the
59 // merge is to remove the first instruction and replace the second with
60 // a pair-wise insn, and false if the reverse is true.
61 bool MergeForward;
62
63 // SExtIdx gives the index of the result of the load pair that must be
64 // extended. The value of SExtIdx assumes that the paired load produces the
65 // value in this order: (I, returned iterator), i.e., -1 means no value has
66 // to be extended, 0 means I, and 1 means the returned iterator.
67 int SExtIdx;
68
69 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
70
71 void setMergeForward(bool V = true) { MergeForward = V; }
72 bool getMergeForward() const { return MergeForward; }
73
74 void setSExtIdx(int V) { SExtIdx = V; }
75 int getSExtIdx() const { return SExtIdx; }
76
77} LdStPairFlags;
78
Tim Northover3b0846e2014-05-24 12:50:23 +000079struct AArch64LoadStoreOpt : public MachineFunctionPass {
80 static char ID;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +000081 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
Chad Rosier96530b32015-08-05 13:44:51 +000082 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
83 }
Tim Northover3b0846e2014-05-24 12:50:23 +000084
85 const AArch64InstrInfo *TII;
86 const TargetRegisterInfo *TRI;
Oliver Stannardd414c992015-11-10 11:04:18 +000087 const AArch64Subtarget *Subtarget;
Tim Northover3b0846e2014-05-24 12:50:23 +000088
89 // Scan the instructions looking for a load/store that can be combined
90 // with the current instruction into a load/store pair.
91 // Return the matching instruction if one is found, else MBB->end().
Tim Northover3b0846e2014-05-24 12:50:23 +000092 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +000093 LdStPairFlags &Flags,
Tim Northover3b0846e2014-05-24 12:50:23 +000094 unsigned Limit);
95 // Merge the two instructions indicated into a single pair-wise instruction.
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +000096 // If MergeForward is true, erase the first instruction and fold its
Tim Northover3b0846e2014-05-24 12:50:23 +000097 // operation into the second. If false, the reverse. Return the instruction
98 // following the first instruction (which may change during processing).
99 MachineBasicBlock::iterator
100 mergePairedInsns(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000101 MachineBasicBlock::iterator Paired,
Chad Rosierfe5399f2015-07-21 17:47:56 +0000102 const LdStPairFlags &Flags);
Tim Northover3b0846e2014-05-24 12:50:23 +0000103
104 // Scan the instruction list to find a base register update that can
105 // be combined with the current instruction (a load or store) using
106 // pre or post indexed addressing with writeback. Scan forwards.
107 MachineBasicBlock::iterator
108 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I, unsigned Limit,
Chad Rosier0b15e7c2015-10-01 13:33:31 +0000109 int UnscaledOffset);
Tim Northover3b0846e2014-05-24 12:50:23 +0000110
111 // Scan the instruction list to find a base register update that can
112 // be combined with the current instruction (a load or store) using
113 // pre or post indexed addressing with writeback. Scan backwards.
114 MachineBasicBlock::iterator
115 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
116
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000117 // Find an instruction that updates the base register of the ld/st
118 // instruction.
119 bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
120 unsigned BaseReg, int Offset);
121
Chad Rosier2dfd3542015-09-23 13:51:44 +0000122 // Merge a pre- or post-index base register update into a ld/st instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000123 MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +0000124 mergeUpdateInsn(MachineBasicBlock::iterator I,
125 MachineBasicBlock::iterator Update, bool IsPreIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000126
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000127 // Find and merge foldable ldr/str instructions.
128 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
129
Jun Bum Lim22fe15e2015-11-06 16:27:47 +0000130 // Check if converting two narrow loads into a single wider load with
131 // bitfield extracts could be enabled.
132 bool enableNarrowLdMerge(MachineFunction &Fn);
133
134 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000135
136 bool runOnMachineFunction(MachineFunction &Fn) override;
137
138 const char *getPassName() const override {
Chad Rosier96530b32015-08-05 13:44:51 +0000139 return AARCH64_LOAD_STORE_OPT_NAME;
Tim Northover3b0846e2014-05-24 12:50:23 +0000140 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000141};
142char AArch64LoadStoreOpt::ID = 0;
Jim Grosbach1eee3df2014-08-11 22:42:31 +0000143} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +0000144
Chad Rosier96530b32015-08-05 13:44:51 +0000145INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
146 AARCH64_LOAD_STORE_OPT_NAME, false, false)
147
Chad Rosier22eb7102015-08-06 17:37:18 +0000148static bool isUnscaledLdSt(unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000149 switch (Opc) {
150 default:
151 return false;
152 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000153 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000154 case AArch64::STURQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000155 case AArch64::STURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000156 case AArch64::STURXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000157 case AArch64::LDURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000158 case AArch64::LDURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000159 case AArch64::LDURQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000160 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000161 case AArch64::LDURXi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000162 case AArch64::LDURSWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000163 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000164 case AArch64::LDURBBi:
165 case AArch64::LDURSBWi:
166 case AArch64::LDURSHWi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000167 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000168 }
169}
170
Chad Rosier22eb7102015-08-06 17:37:18 +0000171static bool isUnscaledLdSt(MachineInstr *MI) {
172 return isUnscaledLdSt(MI->getOpcode());
173}
174
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000175static unsigned getBitExtrOpcode(MachineInstr *MI) {
176 switch (MI->getOpcode()) {
177 default:
178 llvm_unreachable("Unexpected opcode.");
179 case AArch64::LDRBBui:
180 case AArch64::LDURBBi:
181 case AArch64::LDRHHui:
182 case AArch64::LDURHHi:
183 return AArch64::UBFMWri;
184 case AArch64::LDRSBWui:
185 case AArch64::LDURSBWi:
186 case AArch64::LDRSHWui:
187 case AArch64::LDURSHWi:
188 return AArch64::SBFMWri;
189 }
190}
191
Jun Bum Limc12c2792015-11-19 18:41:27 +0000192static bool isNarrowLoad(unsigned Opc) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000193 switch (Opc) {
194 default:
195 return false;
196 case AArch64::LDRHHui:
197 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000198 case AArch64::LDRBBui:
199 case AArch64::LDURBBi:
200 case AArch64::LDRSHWui:
201 case AArch64::LDURSHWi:
202 case AArch64::LDRSBWui:
203 case AArch64::LDURSBWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000204 return true;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000205 }
206}
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000207
Jun Bum Limc12c2792015-11-19 18:41:27 +0000208static bool isNarrowLoad(MachineInstr *MI) {
209 return isNarrowLoad(MI->getOpcode());
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000210}
211
Chad Rosier32d4d372015-09-29 16:07:32 +0000212// Scaling factor for unscaled load or store.
213static int getMemScale(MachineInstr *MI) {
Chad Rosier22eb7102015-08-06 17:37:18 +0000214 switch (MI->getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000215 default:
Chad Rosierdabe2532015-09-29 18:26:15 +0000216 llvm_unreachable("Opcode has unknown scale!");
217 case AArch64::LDRBBui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000218 case AArch64::LDURBBi:
219 case AArch64::LDRSBWui:
220 case AArch64::LDURSBWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000221 case AArch64::STRBBui:
222 return 1;
223 case AArch64::LDRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000224 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000225 case AArch64::LDRSHWui:
226 case AArch64::LDURSHWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000227 case AArch64::STRHHui:
228 return 2;
Chad Rosiera4d32172015-09-29 14:57:10 +0000229 case AArch64::LDRSui:
230 case AArch64::LDURSi:
231 case AArch64::LDRSWui:
232 case AArch64::LDURSWi:
233 case AArch64::LDRWui:
234 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000235 case AArch64::STRSui:
236 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000237 case AArch64::STRWui:
238 case AArch64::STURWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000239 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000240 case AArch64::LDPSWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000241 case AArch64::LDPWi:
242 case AArch64::STPSi:
243 case AArch64::STPWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000244 return 4;
Chad Rosiera4d32172015-09-29 14:57:10 +0000245 case AArch64::LDRDui:
246 case AArch64::LDURDi:
247 case AArch64::LDRXui:
248 case AArch64::LDURXi:
249 case AArch64::STRDui:
250 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000251 case AArch64::STRXui:
252 case AArch64::STURXi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000253 case AArch64::LDPDi:
254 case AArch64::LDPXi:
255 case AArch64::STPDi:
256 case AArch64::STPXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000257 return 8;
Tim Northover3b0846e2014-05-24 12:50:23 +0000258 case AArch64::LDRQui:
259 case AArch64::LDURQi:
Chad Rosiera4d32172015-09-29 14:57:10 +0000260 case AArch64::STRQui:
261 case AArch64::STURQi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000262 case AArch64::LDPQi:
263 case AArch64::STPQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000264 return 16;
Tim Northover3b0846e2014-05-24 12:50:23 +0000265 }
266}
267
Quentin Colombet66b61632015-03-06 22:42:10 +0000268static unsigned getMatchingNonSExtOpcode(unsigned Opc,
269 bool *IsValidLdStrOpc = nullptr) {
270 if (IsValidLdStrOpc)
271 *IsValidLdStrOpc = true;
272 switch (Opc) {
273 default:
274 if (IsValidLdStrOpc)
275 *IsValidLdStrOpc = false;
276 return UINT_MAX;
277 case AArch64::STRDui:
278 case AArch64::STURDi:
279 case AArch64::STRQui:
280 case AArch64::STURQi:
281 case AArch64::STRWui:
282 case AArch64::STURWi:
283 case AArch64::STRXui:
284 case AArch64::STURXi:
285 case AArch64::LDRDui:
286 case AArch64::LDURDi:
287 case AArch64::LDRQui:
288 case AArch64::LDURQi:
289 case AArch64::LDRWui:
290 case AArch64::LDURWi:
291 case AArch64::LDRXui:
292 case AArch64::LDURXi:
293 case AArch64::STRSui:
294 case AArch64::STURSi:
295 case AArch64::LDRSui:
296 case AArch64::LDURSi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000297 case AArch64::LDRHHui:
298 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000299 case AArch64::LDRBBui:
300 case AArch64::LDURBBi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000301 return Opc;
302 case AArch64::LDRSWui:
303 return AArch64::LDRWui;
304 case AArch64::LDURSWi:
305 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000306 case AArch64::LDRSBWui:
307 return AArch64::LDRBBui;
308 case AArch64::LDRSHWui:
309 return AArch64::LDRHHui;
310 case AArch64::LDURSBWi:
311 return AArch64::LDURBBi;
312 case AArch64::LDURSHWi:
313 return AArch64::LDURHHi;
Quentin Colombet66b61632015-03-06 22:42:10 +0000314 }
315}
316
Tim Northover3b0846e2014-05-24 12:50:23 +0000317static unsigned getMatchingPairOpcode(unsigned Opc) {
318 switch (Opc) {
319 default:
320 llvm_unreachable("Opcode has no pairwise equivalent!");
321 case AArch64::STRSui:
322 case AArch64::STURSi:
323 return AArch64::STPSi;
324 case AArch64::STRDui:
325 case AArch64::STURDi:
326 return AArch64::STPDi;
327 case AArch64::STRQui:
328 case AArch64::STURQi:
329 return AArch64::STPQi;
330 case AArch64::STRWui:
331 case AArch64::STURWi:
332 return AArch64::STPWi;
333 case AArch64::STRXui:
334 case AArch64::STURXi:
335 return AArch64::STPXi;
336 case AArch64::LDRSui:
337 case AArch64::LDURSi:
338 return AArch64::LDPSi;
339 case AArch64::LDRDui:
340 case AArch64::LDURDi:
341 return AArch64::LDPDi;
342 case AArch64::LDRQui:
343 case AArch64::LDURQi:
344 return AArch64::LDPQi;
345 case AArch64::LDRWui:
346 case AArch64::LDURWi:
347 return AArch64::LDPWi;
348 case AArch64::LDRXui:
349 case AArch64::LDURXi:
350 return AArch64::LDPXi;
Quentin Colombet29f55332015-01-24 01:25:54 +0000351 case AArch64::LDRSWui:
352 case AArch64::LDURSWi:
353 return AArch64::LDPSWi;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000354 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000355 case AArch64::LDRSHWui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000356 return AArch64::LDRWui;
357 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000358 case AArch64::LDURSHWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000359 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000360 case AArch64::LDRBBui:
361 case AArch64::LDRSBWui:
362 return AArch64::LDRHHui;
363 case AArch64::LDURBBi:
364 case AArch64::LDURSBWi:
365 return AArch64::LDURHHi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000366 }
367}
368
369static unsigned getPreIndexedOpcode(unsigned Opc) {
370 switch (Opc) {
371 default:
372 llvm_unreachable("Opcode has no pre-indexed equivalent!");
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000373 case AArch64::STRSui:
374 return AArch64::STRSpre;
375 case AArch64::STRDui:
376 return AArch64::STRDpre;
377 case AArch64::STRQui:
378 return AArch64::STRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000379 case AArch64::STRBBui:
380 return AArch64::STRBBpre;
381 case AArch64::STRHHui:
382 return AArch64::STRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000383 case AArch64::STRWui:
384 return AArch64::STRWpre;
385 case AArch64::STRXui:
386 return AArch64::STRXpre;
387 case AArch64::LDRSui:
388 return AArch64::LDRSpre;
389 case AArch64::LDRDui:
390 return AArch64::LDRDpre;
391 case AArch64::LDRQui:
392 return AArch64::LDRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000393 case AArch64::LDRBBui:
394 return AArch64::LDRBBpre;
395 case AArch64::LDRHHui:
396 return AArch64::LDRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000397 case AArch64::LDRWui:
398 return AArch64::LDRWpre;
399 case AArch64::LDRXui:
400 return AArch64::LDRXpre;
Quentin Colombet29f55332015-01-24 01:25:54 +0000401 case AArch64::LDRSWui:
402 return AArch64::LDRSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000403 case AArch64::LDPSi:
404 return AArch64::LDPSpre;
Chad Rosier43150122015-09-29 20:39:55 +0000405 case AArch64::LDPSWi:
406 return AArch64::LDPSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000407 case AArch64::LDPDi:
408 return AArch64::LDPDpre;
409 case AArch64::LDPQi:
410 return AArch64::LDPQpre;
411 case AArch64::LDPWi:
412 return AArch64::LDPWpre;
413 case AArch64::LDPXi:
414 return AArch64::LDPXpre;
415 case AArch64::STPSi:
416 return AArch64::STPSpre;
417 case AArch64::STPDi:
418 return AArch64::STPDpre;
419 case AArch64::STPQi:
420 return AArch64::STPQpre;
421 case AArch64::STPWi:
422 return AArch64::STPWpre;
423 case AArch64::STPXi:
424 return AArch64::STPXpre;
Tim Northover3b0846e2014-05-24 12:50:23 +0000425 }
426}
427
428static unsigned getPostIndexedOpcode(unsigned Opc) {
429 switch (Opc) {
430 default:
431 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
432 case AArch64::STRSui:
433 return AArch64::STRSpost;
434 case AArch64::STRDui:
435 return AArch64::STRDpost;
436 case AArch64::STRQui:
437 return AArch64::STRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000438 case AArch64::STRBBui:
439 return AArch64::STRBBpost;
440 case AArch64::STRHHui:
441 return AArch64::STRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000442 case AArch64::STRWui:
443 return AArch64::STRWpost;
444 case AArch64::STRXui:
445 return AArch64::STRXpost;
446 case AArch64::LDRSui:
447 return AArch64::LDRSpost;
448 case AArch64::LDRDui:
449 return AArch64::LDRDpost;
450 case AArch64::LDRQui:
451 return AArch64::LDRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000452 case AArch64::LDRBBui:
453 return AArch64::LDRBBpost;
454 case AArch64::LDRHHui:
455 return AArch64::LDRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000456 case AArch64::LDRWui:
457 return AArch64::LDRWpost;
458 case AArch64::LDRXui:
459 return AArch64::LDRXpost;
Quentin Colombet29f55332015-01-24 01:25:54 +0000460 case AArch64::LDRSWui:
461 return AArch64::LDRSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000462 case AArch64::LDPSi:
463 return AArch64::LDPSpost;
Chad Rosier43150122015-09-29 20:39:55 +0000464 case AArch64::LDPSWi:
465 return AArch64::LDPSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000466 case AArch64::LDPDi:
467 return AArch64::LDPDpost;
468 case AArch64::LDPQi:
469 return AArch64::LDPQpost;
470 case AArch64::LDPWi:
471 return AArch64::LDPWpost;
472 case AArch64::LDPXi:
473 return AArch64::LDPXpost;
474 case AArch64::STPSi:
475 return AArch64::STPSpost;
476 case AArch64::STPDi:
477 return AArch64::STPDpost;
478 case AArch64::STPQi:
479 return AArch64::STPQpost;
480 case AArch64::STPWi:
481 return AArch64::STPWpost;
482 case AArch64::STPXi:
483 return AArch64::STPXpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000484 }
485}
486
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000487static bool isPairedLdSt(const MachineInstr *MI) {
488 switch (MI->getOpcode()) {
489 default:
490 return false;
491 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000492 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000493 case AArch64::LDPDi:
494 case AArch64::LDPQi:
495 case AArch64::LDPWi:
496 case AArch64::LDPXi:
497 case AArch64::STPSi:
498 case AArch64::STPDi:
499 case AArch64::STPQi:
500 case AArch64::STPWi:
501 case AArch64::STPXi:
502 return true;
503 }
504}
505
506static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
507 unsigned PairedRegOp = 0) {
508 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
509 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
510 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000511}
512
513static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000514 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
515 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000516}
517
518static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000519 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
520 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000521}
522
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000523// Copy MachineMemOperands from Op0 and Op1 to a new array assigned to MI.
524static void concatenateMemOperands(MachineInstr *MI, MachineInstr *Op0,
525 MachineInstr *Op1) {
526 assert(MI->memoperands_empty() && "expected a new machineinstr");
527 size_t numMemRefs = (Op0->memoperands_end() - Op0->memoperands_begin()) +
528 (Op1->memoperands_end() - Op1->memoperands_begin());
529
530 MachineFunction *MF = MI->getParent()->getParent();
531 MachineSDNode::mmo_iterator MemBegin = MF->allocateMemRefsArray(numMemRefs);
532 MachineSDNode::mmo_iterator MemEnd =
533 std::copy(Op0->memoperands_begin(), Op0->memoperands_end(), MemBegin);
534 MemEnd = std::copy(Op1->memoperands_begin(), Op1->memoperands_end(), MemEnd);
535 MI->setMemRefs(MemBegin, MemEnd);
536}
537
Tim Northover3b0846e2014-05-24 12:50:23 +0000538MachineBasicBlock::iterator
539AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
540 MachineBasicBlock::iterator Paired,
Chad Rosier96a18a92015-07-21 17:42:04 +0000541 const LdStPairFlags &Flags) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000542 MachineBasicBlock::iterator NextI = I;
543 ++NextI;
544 // If NextI is the second of the two instructions to be merged, we need
545 // to skip one further. Either way we merge will invalidate the iterator,
546 // and we don't need to scan the new instruction, as it's a pairwise
547 // instruction, which we're not considering for further action anyway.
548 if (NextI == Paired)
549 ++NextI;
550
Chad Rosier96a18a92015-07-21 17:42:04 +0000551 int SExtIdx = Flags.getSExtIdx();
Quentin Colombet66b61632015-03-06 22:42:10 +0000552 unsigned Opc =
553 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
Chad Rosier22eb7102015-08-06 17:37:18 +0000554 bool IsUnscaled = isUnscaledLdSt(Opc);
Chad Rosierf11d0402015-10-01 18:17:12 +0000555 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
Tim Northover3b0846e2014-05-24 12:50:23 +0000556
Chad Rosier96a18a92015-07-21 17:42:04 +0000557 bool MergeForward = Flags.getMergeForward();
Quentin Colombet66b61632015-03-06 22:42:10 +0000558 unsigned NewOpc = getMatchingPairOpcode(Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000559 // Insert our new paired instruction after whichever of the paired
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000560 // instructions MergeForward indicates.
561 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
562 // Also based on MergeForward is from where we copy the base register operand
Tim Northover3b0846e2014-05-24 12:50:23 +0000563 // so we get the flags compatible with the input code.
Chad Rosierf77e9092015-08-06 15:50:12 +0000564 const MachineOperand &BaseRegOp =
565 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000566
567 // Which register is Rt and which is Rt2 depends on the offset order.
568 MachineInstr *RtMI, *Rt2MI;
Chad Rosier08ef4622015-09-03 16:41:28 +0000569 if (getLdStOffsetOp(I).getImm() ==
570 getLdStOffsetOp(Paired).getImm() + OffsetStride) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000571 RtMI = Paired;
572 Rt2MI = I;
Quentin Colombet66b61632015-03-06 22:42:10 +0000573 // Here we swapped the assumption made for SExtIdx.
574 // I.e., we turn ldp I, Paired into ldp Paired, I.
575 // Update the index accordingly.
576 if (SExtIdx != -1)
577 SExtIdx = (SExtIdx + 1) % 2;
Tim Northover3b0846e2014-05-24 12:50:23 +0000578 } else {
579 RtMI = I;
580 Rt2MI = Paired;
581 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000582
James Molloy5b18b4c2015-10-23 10:41:38 +0000583 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000584
Jun Bum Limc12c2792015-11-19 18:41:27 +0000585 if (isNarrowLoad(Opc)) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000586 // Change the scaled offset from small to large type.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000587 if (!IsUnscaled) {
588 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000589 OffsetImm /= 2;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000590 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000591 MachineInstr *RtNewDest = MergeForward ? I : Paired;
Oliver Stannardd414c992015-11-10 11:04:18 +0000592 // When merging small (< 32 bit) loads for big-endian targets, the order of
593 // the component parts gets swapped.
594 if (!Subtarget->isLittleEndian())
595 std::swap(RtMI, Rt2MI);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000596 // Construct the new load instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000597 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
598 NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
599 TII->get(NewOpc))
600 .addOperand(getLdStRegOp(RtNewDest))
601 .addOperand(BaseRegOp)
602 .addImm(OffsetImm);
603
604 // Copy MachineMemOperands from the original loads.
605 concatenateMemOperands(NewMemMI, I, Paired);
606
607 DEBUG(
608 dbgs()
609 << "Creating the new load and extract. Replacing instructions:\n ");
610 DEBUG(I->print(dbgs()));
611 DEBUG(dbgs() << " ");
612 DEBUG(Paired->print(dbgs()));
613 DEBUG(dbgs() << " with instructions:\n ");
614 DEBUG((NewMemMI)->print(dbgs()));
615
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000616 int Width = getMemScale(I) == 1 ? 8 : 16;
617 int LSBLow = 0;
618 int LSBHigh = Width;
619 int ImmsLow = LSBLow + Width - 1;
620 int ImmsHigh = LSBHigh + Width - 1;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000621 MachineInstr *ExtDestMI = MergeForward ? Paired : I;
Oliver Stannardd414c992015-11-10 11:04:18 +0000622 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000623 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000624 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000625 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000626 .addOperand(getLdStRegOp(Rt2MI))
627 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000628 .addImm(LSBHigh)
629 .addImm(ImmsHigh);
630 // Create the bitfield extract for low bits.
631 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
632 // For unsigned, prefer to use AND for low bits.
633 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
634 TII->get(AArch64::ANDWri))
635 .addOperand(getLdStRegOp(RtMI))
636 .addReg(getLdStRegOp(RtNewDest).getReg())
637 .addImm(ImmsLow);
638 } else {
639 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
640 TII->get(getBitExtrOpcode(RtMI)))
641 .addOperand(getLdStRegOp(RtMI))
642 .addReg(getLdStRegOp(RtNewDest).getReg())
643 .addImm(LSBLow)
644 .addImm(ImmsLow);
645 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000646 } else {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000647 // Create the bitfield extract for low bits.
648 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
649 // For unsigned, prefer to use AND for low bits.
650 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
651 TII->get(AArch64::ANDWri))
652 .addOperand(getLdStRegOp(RtMI))
653 .addReg(getLdStRegOp(RtNewDest).getReg())
654 .addImm(ImmsLow);
655 } else {
656 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
657 TII->get(getBitExtrOpcode(RtMI)))
658 .addOperand(getLdStRegOp(RtMI))
659 .addReg(getLdStRegOp(RtNewDest).getReg())
660 .addImm(LSBLow)
661 .addImm(ImmsLow);
662 }
663
664 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000665 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000666 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000667 .addOperand(getLdStRegOp(Rt2MI))
668 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000669 .addImm(LSBHigh)
670 .addImm(ImmsHigh);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000671 }
672 DEBUG(dbgs() << " ");
673 DEBUG((BitExtMI1)->print(dbgs()));
674 DEBUG(dbgs() << " ");
675 DEBUG((BitExtMI2)->print(dbgs()));
676 DEBUG(dbgs() << "\n");
677
678 // Erase the old instructions.
679 I->eraseFromParent();
680 Paired->eraseFromParent();
681 return NextI;
682 }
683
684 // Handle Unscaled
Chad Rosierf11d0402015-10-01 18:17:12 +0000685 if (IsUnscaled)
Chad Rosier08ef4622015-09-03 16:41:28 +0000686 OffsetImm /= OffsetStride;
Tim Northover3b0846e2014-05-24 12:50:23 +0000687
688 // Construct the new instruction.
689 MachineInstrBuilder MIB = BuildMI(*I->getParent(), InsertionPoint,
690 I->getDebugLoc(), TII->get(NewOpc))
Chad Rosierf77e9092015-08-06 15:50:12 +0000691 .addOperand(getLdStRegOp(RtMI))
692 .addOperand(getLdStRegOp(Rt2MI))
Tim Northover3b0846e2014-05-24 12:50:23 +0000693 .addOperand(BaseRegOp)
694 .addImm(OffsetImm);
695 (void)MIB;
696
697 // FIXME: Do we need/want to copy the mem operands from the source
698 // instructions? Probably. What uses them after this?
699
700 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
701 DEBUG(I->print(dbgs()));
702 DEBUG(dbgs() << " ");
703 DEBUG(Paired->print(dbgs()));
704 DEBUG(dbgs() << " with instruction:\n ");
Quentin Colombet66b61632015-03-06 22:42:10 +0000705
706 if (SExtIdx != -1) {
707 // Generate the sign extension for the proper result of the ldp.
708 // I.e., with X1, that would be:
709 // %W1<def> = KILL %W1, %X1<imp-def>
710 // %X1<def> = SBFMXri %X1<kill>, 0, 31
711 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
712 // Right now, DstMO has the extended register, since it comes from an
713 // extended opcode.
714 unsigned DstRegX = DstMO.getReg();
715 // Get the W variant of that register.
716 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
717 // Update the result of LDP to use the W instead of the X variant.
718 DstMO.setReg(DstRegW);
719 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
720 DEBUG(dbgs() << "\n");
721 // Make the machine verifier happy by providing a definition for
722 // the X register.
723 // Insert this definition right after the generated LDP, i.e., before
724 // InsertionPoint.
725 MachineInstrBuilder MIBKill =
726 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
727 TII->get(TargetOpcode::KILL), DstRegW)
728 .addReg(DstRegW)
729 .addReg(DstRegX, RegState::Define);
730 MIBKill->getOperand(2).setImplicit();
731 // Create the sign extension.
732 MachineInstrBuilder MIBSXTW =
733 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
734 TII->get(AArch64::SBFMXri), DstRegX)
735 .addReg(DstRegX)
736 .addImm(0)
737 .addImm(31);
738 (void)MIBSXTW;
739 DEBUG(dbgs() << " Extend operand:\n ");
740 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
741 DEBUG(dbgs() << "\n");
742 } else {
743 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
744 DEBUG(dbgs() << "\n");
745 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000746
747 // Erase the old instructions.
748 I->eraseFromParent();
749 Paired->eraseFromParent();
750
751 return NextI;
752}
753
754/// trackRegDefsUses - Remember what registers the specified instruction uses
755/// and modifies.
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000756static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
Tim Northover3b0846e2014-05-24 12:50:23 +0000757 BitVector &UsedRegs,
758 const TargetRegisterInfo *TRI) {
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000759 for (const MachineOperand &MO : MI->operands()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000760 if (MO.isRegMask())
761 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
762
763 if (!MO.isReg())
764 continue;
765 unsigned Reg = MO.getReg();
766 if (MO.isDef()) {
767 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
768 ModifiedRegs.set(*AI);
769 } else {
770 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
771 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
772 UsedRegs.set(*AI);
773 }
774 }
775}
776
777static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
Chad Rosier3dd0e942015-08-18 16:20:03 +0000778 // Convert the byte-offset used by unscaled into an "element" offset used
779 // by the scaled pair load/store instructions.
Chad Rosier08ef4622015-09-03 16:41:28 +0000780 if (IsUnscaled)
Chad Rosier3dd0e942015-08-18 16:20:03 +0000781 Offset /= OffsetStride;
782
783 return Offset <= 63 && Offset >= -64;
Tim Northover3b0846e2014-05-24 12:50:23 +0000784}
785
786// Do alignment, specialized to power of 2 and for signed ints,
787// avoiding having to do a C-style cast from uint_64t to int when
788// using RoundUpToAlignment from include/llvm/Support/MathExtras.h.
789// FIXME: Move this function to include/MathExtras.h?
790static int alignTo(int Num, int PowOf2) {
791 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
792}
793
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000794static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
795 const AArch64InstrInfo *TII) {
796 // One of the instructions must modify memory.
797 if (!MIa->mayStore() && !MIb->mayStore())
798 return false;
799
800 // Both instructions must be memory operations.
801 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
802 return false;
803
804 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
805}
806
807static bool mayAlias(MachineInstr *MIa,
808 SmallVectorImpl<MachineInstr *> &MemInsns,
809 const AArch64InstrInfo *TII) {
810 for (auto &MIb : MemInsns)
811 if (mayAlias(MIa, MIb, TII))
812 return true;
813
814 return false;
815}
816
Tim Northover3b0846e2014-05-24 12:50:23 +0000817/// findMatchingInsn - Scan the instructions looking for a load/store that can
818/// be combined with the current instruction into a load/store pair.
819MachineBasicBlock::iterator
820AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000821 LdStPairFlags &Flags, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000822 MachineBasicBlock::iterator E = I->getParent()->end();
823 MachineBasicBlock::iterator MBBI = I;
824 MachineInstr *FirstMI = I;
825 ++MBBI;
826
Matthias Braunfa3872e2015-05-18 20:27:55 +0000827 unsigned Opc = FirstMI->getOpcode();
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000828 bool MayLoad = FirstMI->mayLoad();
Chad Rosier22eb7102015-08-06 17:37:18 +0000829 bool IsUnscaled = isUnscaledLdSt(FirstMI);
Chad Rosierf77e9092015-08-06 15:50:12 +0000830 unsigned Reg = getLdStRegOp(FirstMI).getReg();
831 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
832 int Offset = getLdStOffsetOp(FirstMI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +0000833
834 // Early exit if the first instruction modifies the base register.
835 // e.g., ldr x0, [x0]
Tim Northover3b0846e2014-05-24 12:50:23 +0000836 if (FirstMI->modifiesRegister(BaseReg, TRI))
837 return E;
Chad Rosiercaed6db2015-08-10 17:17:19 +0000838
839 // Early exit if the offset if not possible to match. (6 bits of positive
840 // range, plus allow an extra one in case we find a later insn that matches
841 // with Offset-1)
Chad Rosierf11d0402015-10-01 18:17:12 +0000842 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
Jun Bum Limc12c2792015-11-19 18:41:27 +0000843 if (!isNarrowLoad(Opc) && !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
Tim Northover3b0846e2014-05-24 12:50:23 +0000844 return E;
845
846 // Track which registers have been modified and used between the first insn
847 // (inclusive) and the second insn.
848 BitVector ModifiedRegs, UsedRegs;
849 ModifiedRegs.resize(TRI->getNumRegs());
850 UsedRegs.resize(TRI->getNumRegs());
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000851
852 // Remember any instructions that read/write memory between FirstMI and MI.
853 SmallVector<MachineInstr *, 4> MemInsns;
854
Tim Northover3b0846e2014-05-24 12:50:23 +0000855 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
856 MachineInstr *MI = MBBI;
857 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
858 // optimization by changing how far we scan.
859 if (MI->isDebugValue())
860 continue;
861
862 // Now that we know this is a real instruction, count it.
863 ++Count;
864
Chad Rosier08ef4622015-09-03 16:41:28 +0000865 bool CanMergeOpc = Opc == MI->getOpcode();
866 Flags.setSExtIdx(-1);
867 if (!CanMergeOpc) {
868 bool IsValidLdStrOpc;
869 unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
870 assert(IsValidLdStrOpc &&
871 "Given Opc should be a Load or Store with an immediate");
872 // Opc will be the first instruction in the pair.
873 Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
874 CanMergeOpc = NonSExtOpc == getMatchingNonSExtOpcode(MI->getOpcode());
875 }
876
877 if (CanMergeOpc && getLdStOffsetOp(MI).isImm()) {
Chad Rosierc56a9132015-08-10 18:42:45 +0000878 assert(MI->mayLoadOrStore() && "Expected memory operation.");
Tim Northover3b0846e2014-05-24 12:50:23 +0000879 // If we've found another instruction with the same opcode, check to see
880 // if the base and offset are compatible with our starting instruction.
881 // These instructions all have scaled immediate operands, so we just
882 // check for +1/-1. Make sure to check the new instruction offset is
883 // actually an immediate and not a symbolic reference destined for
884 // a relocation.
885 //
886 // Pairwise instructions have a 7-bit signed offset field. Single insns
887 // have a 12-bit unsigned offset field. To be a valid combine, the
888 // final offset must be in range.
Chad Rosierf77e9092015-08-06 15:50:12 +0000889 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
890 int MIOffset = getLdStOffsetOp(MI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +0000891 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
892 (Offset + OffsetStride == MIOffset))) {
893 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
894 // If this is a volatile load/store that otherwise matched, stop looking
895 // as something is going on that we don't have enough information to
896 // safely transform. Similarly, stop if we see a hint to avoid pairs.
897 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
898 return E;
899 // If the resultant immediate offset of merging these instructions
900 // is out of range for a pairwise instruction, bail and keep looking.
Chad Rosier08ef4622015-09-03 16:41:28 +0000901 bool MIIsUnscaled = isUnscaledLdSt(MI);
Jun Bum Limc12c2792015-11-19 18:41:27 +0000902 bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
903 if (!IsNarrowLoad &&
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000904 !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000905 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +0000906 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000907 continue;
908 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000909
Jun Bum Limc12c2792015-11-19 18:41:27 +0000910 if (IsNarrowLoad) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000911 // If the alignment requirements of the larger type scaled load
912 // instruction can't express the scaled offset of the smaller type
913 // input, bail and keep looking.
914 if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
915 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
916 MemInsns.push_back(MI);
917 continue;
918 }
919 } else {
920 // If the alignment requirements of the paired (scaled) instruction
921 // can't express the offset of the unscaled input, bail and keep
922 // looking.
923 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
924 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
925 MemInsns.push_back(MI);
926 continue;
927 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000928 }
929 // If the destination register of the loads is the same register, bail
930 // and keep looking. A load-pair instruction with both destination
931 // registers the same is UNPREDICTABLE and will result in an exception.
Chad Rosierf77e9092015-08-06 15:50:12 +0000932 if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000933 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +0000934 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000935 continue;
936 }
937
938 // If the Rt of the second instruction was not modified or used between
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000939 // the two instructions and none of the instructions between the second
940 // and first alias with the second, we can combine the second into the
941 // first.
Chad Rosierf77e9092015-08-06 15:50:12 +0000942 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
943 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000944 !mayAlias(MI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +0000945 Flags.setMergeForward(false);
Tim Northover3b0846e2014-05-24 12:50:23 +0000946 return MBBI;
947 }
948
949 // Likewise, if the Rt of the first instruction is not modified or used
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000950 // between the two instructions and none of the instructions between the
951 // first and the second alias with the first, we can combine the first
952 // into the second.
Chad Rosierf77e9092015-08-06 15:50:12 +0000953 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
Chad Rosier5f668e12015-09-03 14:19:43 +0000954 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000955 !mayAlias(FirstMI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +0000956 Flags.setMergeForward(true);
Tim Northover3b0846e2014-05-24 12:50:23 +0000957 return MBBI;
958 }
959 // Unable to combine these instructions due to interference in between.
960 // Keep looking.
961 }
962 }
963
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000964 // If the instruction wasn't a matching load or store. Stop searching if we
965 // encounter a call instruction that might modify memory.
966 if (MI->isCall())
Tim Northover3b0846e2014-05-24 12:50:23 +0000967 return E;
968
969 // Update modified / uses register lists.
970 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
971
972 // Otherwise, if the base register is modified, we have no match, so
973 // return early.
974 if (ModifiedRegs[BaseReg])
975 return E;
Chad Rosierce8e5ab2015-05-21 21:36:46 +0000976
977 // Update list of instructions that read/write memory.
978 if (MI->mayLoadOrStore())
979 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000980 }
981 return E;
982}
983
984MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +0000985AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
986 MachineBasicBlock::iterator Update,
987 bool IsPreIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000988 assert((Update->getOpcode() == AArch64::ADDXri ||
989 Update->getOpcode() == AArch64::SUBXri) &&
990 "Unexpected base register update instruction to merge!");
991 MachineBasicBlock::iterator NextI = I;
992 // Return the instruction following the merged instruction, which is
993 // the instruction following our unmerged load. Unless that's the add/sub
994 // instruction we're merging, in which case it's the one after that.
995 if (++NextI == Update)
996 ++NextI;
997
998 int Value = Update->getOperand(2).getImm();
999 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
Chad Rosier2dfd3542015-09-23 13:51:44 +00001000 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
Tim Northover3b0846e2014-05-24 12:50:23 +00001001 if (Update->getOpcode() == AArch64::SUBXri)
1002 Value = -Value;
1003
Chad Rosier2dfd3542015-09-23 13:51:44 +00001004 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1005 : getPostIndexedOpcode(I->getOpcode());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001006 MachineInstrBuilder MIB;
1007 if (!isPairedLdSt(I)) {
1008 // Non-paired instruction.
1009 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1010 .addOperand(getLdStRegOp(Update))
1011 .addOperand(getLdStRegOp(I))
1012 .addOperand(getLdStBaseOp(I))
1013 .addImm(Value);
1014 } else {
1015 // Paired instruction.
Chad Rosier32d4d372015-09-29 16:07:32 +00001016 int Scale = getMemScale(I);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001017 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1018 .addOperand(getLdStRegOp(Update))
1019 .addOperand(getLdStRegOp(I, 0))
1020 .addOperand(getLdStRegOp(I, 1))
1021 .addOperand(getLdStBaseOp(I))
1022 .addImm(Value / Scale);
1023 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001024 (void)MIB;
1025
Chad Rosier2dfd3542015-09-23 13:51:44 +00001026 if (IsPreIdx)
1027 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1028 else
1029 DEBUG(dbgs() << "Creating post-indexed load/store.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001030 DEBUG(dbgs() << " Replacing instructions:\n ");
1031 DEBUG(I->print(dbgs()));
1032 DEBUG(dbgs() << " ");
1033 DEBUG(Update->print(dbgs()));
1034 DEBUG(dbgs() << " with instruction:\n ");
1035 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1036 DEBUG(dbgs() << "\n");
1037
1038 // Erase the old instructions for the block.
1039 I->eraseFromParent();
1040 Update->eraseFromParent();
1041
1042 return NextI;
1043}
1044
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001045bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
1046 MachineInstr *MI,
1047 unsigned BaseReg, int Offset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001048 switch (MI->getOpcode()) {
1049 default:
1050 break;
1051 case AArch64::SUBXri:
1052 // Negate the offset for a SUB instruction.
1053 Offset *= -1;
1054 // FALLTHROUGH
1055 case AArch64::ADDXri:
1056 // Make sure it's a vanilla immediate operand, not a relocation or
1057 // anything else we can't handle.
1058 if (!MI->getOperand(2).isImm())
1059 break;
1060 // Watch out for 1 << 12 shifted value.
1061 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
1062 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001063
1064 // The update instruction source and destination register must be the
1065 // same as the load/store base register.
1066 if (MI->getOperand(0).getReg() != BaseReg ||
1067 MI->getOperand(1).getReg() != BaseReg)
1068 break;
1069
1070 bool IsPairedInsn = isPairedLdSt(MemMI);
1071 int UpdateOffset = MI->getOperand(2).getImm();
1072 // For non-paired load/store instructions, the immediate must fit in a
1073 // signed 9-bit integer.
1074 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1075 break;
1076
1077 // For paired load/store instructions, the immediate must be a multiple of
1078 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1079 // integer.
1080 if (IsPairedInsn) {
Chad Rosier32d4d372015-09-29 16:07:32 +00001081 int Scale = getMemScale(MemMI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001082 if (UpdateOffset % Scale != 0)
1083 break;
1084
1085 int ScaledOffset = UpdateOffset / Scale;
1086 if (ScaledOffset > 64 || ScaledOffset < -64)
1087 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001088 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001089
1090 // If we have a non-zero Offset, we check that it matches the amount
1091 // we're adding to the register.
1092 if (!Offset || Offset == MI->getOperand(2).getImm())
1093 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001094 break;
1095 }
1096 return false;
1097}
1098
1099MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001100 MachineBasicBlock::iterator I, unsigned Limit, int UnscaledOffset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001101 MachineBasicBlock::iterator E = I->getParent()->end();
1102 MachineInstr *MemMI = I;
1103 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001104
Chad Rosierf77e9092015-08-06 15:50:12 +00001105 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001106 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001107
Chad Rosierb7c5b912015-10-01 13:43:05 +00001108 // Scan forward looking for post-index opportunities. Updating instructions
1109 // can't be formed if the memory instruction doesn't have the offset we're
1110 // looking for.
1111 if (MIUnscaledOffset != UnscaledOffset)
1112 return E;
1113
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001114 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001115 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001116 bool IsPairedInsn = isPairedLdSt(MemMI);
1117 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1118 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1119 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1120 return E;
1121 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001122
Tim Northover3b0846e2014-05-24 12:50:23 +00001123 // Track which registers have been modified and used between the first insn
1124 // (inclusive) and the second insn.
1125 BitVector ModifiedRegs, UsedRegs;
1126 ModifiedRegs.resize(TRI->getNumRegs());
1127 UsedRegs.resize(TRI->getNumRegs());
1128 ++MBBI;
1129 for (unsigned Count = 0; MBBI != E; ++MBBI) {
1130 MachineInstr *MI = MBBI;
1131 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1132 // optimization by changing how far we scan.
1133 if (MI->isDebugValue())
1134 continue;
1135
1136 // Now that we know this is a real instruction, count it.
1137 ++Count;
1138
1139 // If we found a match, return it.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001140 if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001141 return MBBI;
1142
1143 // Update the status of what the instruction clobbered and used.
1144 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1145
1146 // Otherwise, if the base register is used or modified, we have no match, so
1147 // return early.
1148 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1149 return E;
1150 }
1151 return E;
1152}
1153
1154MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
1155 MachineBasicBlock::iterator I, unsigned Limit) {
1156 MachineBasicBlock::iterator B = I->getParent()->begin();
1157 MachineBasicBlock::iterator E = I->getParent()->end();
1158 MachineInstr *MemMI = I;
1159 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001160
Chad Rosierf77e9092015-08-06 15:50:12 +00001161 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1162 int Offset = getLdStOffsetOp(MemMI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001163
1164 // If the load/store is the first instruction in the block, there's obviously
1165 // not any matching update. Ditto if the memory offset isn't zero.
1166 if (MBBI == B || Offset != 0)
1167 return E;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001168 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001169 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001170 bool IsPairedInsn = isPairedLdSt(MemMI);
1171 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1172 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1173 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1174 return E;
1175 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001176
1177 // Track which registers have been modified and used between the first insn
1178 // (inclusive) and the second insn.
1179 BitVector ModifiedRegs, UsedRegs;
1180 ModifiedRegs.resize(TRI->getNumRegs());
1181 UsedRegs.resize(TRI->getNumRegs());
1182 --MBBI;
1183 for (unsigned Count = 0; MBBI != B; --MBBI) {
1184 MachineInstr *MI = MBBI;
1185 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1186 // optimization by changing how far we scan.
1187 if (MI->isDebugValue())
1188 continue;
1189
1190 // Now that we know this is a real instruction, count it.
1191 ++Count;
1192
1193 // If we found a match, return it.
Chad Rosier11c825f2015-09-30 19:44:40 +00001194 if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001195 return MBBI;
1196
1197 // Update the status of what the instruction clobbered and used.
1198 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1199
1200 // Otherwise, if the base register is used or modified, we have no match, so
1201 // return early.
1202 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1203 return E;
1204 }
1205 return E;
1206}
1207
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001208bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1209 MachineBasicBlock::iterator &MBBI) {
1210 MachineInstr *MI = MBBI;
1211 MachineBasicBlock::iterator E = MI->getParent()->end();
1212 // If this is a volatile load/store, don't mess with it.
1213 if (MI->hasOrderedMemoryRef())
1214 return false;
1215
1216 // Make sure this is a reg+imm (as opposed to an address reloc).
1217 if (!getLdStOffsetOp(MI).isImm())
1218 return false;
1219
1220 // Check if this load/store has a hint to avoid pair formation.
1221 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1222 if (TII->isLdStPairSuppressed(MI))
1223 return false;
1224
1225 // Look ahead up to ScanLimit instructions for a pairable instruction.
1226 LdStPairFlags Flags;
1227 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, ScanLimit);
1228 if (Paired != E) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001229 if (isNarrowLoad(MI)) {
1230 ++NumNarrowLoadsPromoted;
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001231 } else {
1232 ++NumPairCreated;
1233 if (isUnscaledLdSt(MI))
1234 ++NumUnscaledPairCreated;
1235 }
1236
1237 // Merge the loads into a pair. Keeping the iterator straight is a
1238 // pain, so we let the merge routine tell us what the next instruction
1239 // is after it's done mucking about.
1240 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1241 return true;
1242 }
1243 return false;
1244}
1245
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001246bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1247 bool enableNarrowLdOpt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001248 bool Modified = false;
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001249 // Three tranformations to do here:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001250 // 1) Find narrow loads that can be converted into a single wider load
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001251 // with bitfield extract instructions.
1252 // e.g.,
1253 // ldrh w0, [x2]
1254 // ldrh w1, [x2, #2]
1255 // ; becomes
1256 // ldr w0, [x2]
1257 // ubfx w1, w0, #16, #16
1258 // and w0, w0, #ffff
1259 // 2) Find loads and stores that can be merged into a single load or store
Tim Northover3b0846e2014-05-24 12:50:23 +00001260 // pair instruction.
1261 // e.g.,
1262 // ldr x0, [x2]
1263 // ldr x1, [x2, #8]
1264 // ; becomes
1265 // ldp x0, x1, [x2]
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001266 // 3) Find base register updates that can be merged into the load or store
Tim Northover3b0846e2014-05-24 12:50:23 +00001267 // as a base-reg writeback.
1268 // e.g.,
1269 // ldr x0, [x2]
1270 // add x2, x2, #4
1271 // ; becomes
1272 // ldr x0, [x2], #4
1273
1274 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001275 enableNarrowLdOpt && MBBI != E;) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001276 MachineInstr *MI = MBBI;
1277 switch (MI->getOpcode()) {
1278 default:
1279 // Just move on to the next instruction.
1280 ++MBBI;
1281 break;
1282 // Scaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001283 case AArch64::LDRBBui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001284 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001285 case AArch64::LDRSBWui:
1286 case AArch64::LDRSHWui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001287 // Unscaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001288 case AArch64::LDURBBi:
1289 case AArch64::LDURHHi:
1290 case AArch64::LDURSBWi:
1291 case AArch64::LDURSHWi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001292 if (tryToMergeLdStInst(MBBI)) {
1293 Modified = true;
1294 break;
1295 }
1296 ++MBBI;
1297 break;
1298 }
1299 // FIXME: Do the other instructions.
1300 }
1301 }
1302
1303 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Tim Northover3b0846e2014-05-24 12:50:23 +00001304 MBBI != E;) {
1305 MachineInstr *MI = MBBI;
1306 switch (MI->getOpcode()) {
1307 default:
1308 // Just move on to the next instruction.
1309 ++MBBI;
1310 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001311 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001312 case AArch64::STRSui:
1313 case AArch64::STRDui:
1314 case AArch64::STRQui:
1315 case AArch64::STRXui:
1316 case AArch64::STRWui:
1317 case AArch64::LDRSui:
1318 case AArch64::LDRDui:
1319 case AArch64::LDRQui:
1320 case AArch64::LDRXui:
1321 case AArch64::LDRWui:
Quentin Colombet29f55332015-01-24 01:25:54 +00001322 case AArch64::LDRSWui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001323 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001324 case AArch64::STURSi:
1325 case AArch64::STURDi:
1326 case AArch64::STURQi:
1327 case AArch64::STURWi:
1328 case AArch64::STURXi:
1329 case AArch64::LDURSi:
1330 case AArch64::LDURDi:
1331 case AArch64::LDURQi:
1332 case AArch64::LDURWi:
Quentin Colombet29f55332015-01-24 01:25:54 +00001333 case AArch64::LDURXi:
1334 case AArch64::LDURSWi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001335 if (tryToMergeLdStInst(MBBI)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001336 Modified = true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001337 break;
1338 }
1339 ++MBBI;
1340 break;
1341 }
1342 // FIXME: Do the other instructions.
1343 }
1344 }
1345
1346 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1347 MBBI != E;) {
1348 MachineInstr *MI = MBBI;
1349 // Do update merging. It's simpler to keep this separate from the above
1350 // switch, though not strictly necessary.
Matthias Braunfa3872e2015-05-18 20:27:55 +00001351 unsigned Opc = MI->getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +00001352 switch (Opc) {
1353 default:
1354 // Just move on to the next instruction.
1355 ++MBBI;
1356 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001357 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001358 case AArch64::STRSui:
1359 case AArch64::STRDui:
1360 case AArch64::STRQui:
1361 case AArch64::STRXui:
1362 case AArch64::STRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001363 case AArch64::STRHHui:
1364 case AArch64::STRBBui:
Tim Northover3b0846e2014-05-24 12:50:23 +00001365 case AArch64::LDRSui:
1366 case AArch64::LDRDui:
1367 case AArch64::LDRQui:
1368 case AArch64::LDRXui:
1369 case AArch64::LDRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001370 case AArch64::LDRHHui:
1371 case AArch64::LDRBBui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001372 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001373 case AArch64::STURSi:
1374 case AArch64::STURDi:
1375 case AArch64::STURQi:
1376 case AArch64::STURWi:
1377 case AArch64::STURXi:
1378 case AArch64::LDURSi:
1379 case AArch64::LDURDi:
1380 case AArch64::LDURQi:
1381 case AArch64::LDURWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001382 case AArch64::LDURXi:
1383 // Paired instructions.
1384 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +00001385 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001386 case AArch64::LDPDi:
1387 case AArch64::LDPQi:
1388 case AArch64::LDPWi:
1389 case AArch64::LDPXi:
1390 case AArch64::STPSi:
1391 case AArch64::STPDi:
1392 case AArch64::STPQi:
1393 case AArch64::STPWi:
1394 case AArch64::STPXi: {
Tim Northover3b0846e2014-05-24 12:50:23 +00001395 // Make sure this is a reg+imm (as opposed to an address reloc).
Chad Rosierf77e9092015-08-06 15:50:12 +00001396 if (!getLdStOffsetOp(MI).isImm()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001397 ++MBBI;
1398 break;
1399 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001400 // Look forward to try to form a post-index instruction. For example,
1401 // ldr x0, [x20]
1402 // add x20, x20, #32
1403 // merged into:
1404 // ldr x0, [x20], #32
Tim Northover3b0846e2014-05-24 12:50:23 +00001405 MachineBasicBlock::iterator Update =
1406 findMatchingUpdateInsnForward(MBBI, ScanLimit, 0);
1407 if (Update != E) {
1408 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001409 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001410 Modified = true;
1411 ++NumPostFolded;
1412 break;
1413 }
1414 // Don't know how to handle pre/post-index versions, so move to the next
1415 // instruction.
Chad Rosier22eb7102015-08-06 17:37:18 +00001416 if (isUnscaledLdSt(Opc)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001417 ++MBBI;
1418 break;
1419 }
1420
1421 // Look back to try to find a pre-index instruction. For example,
1422 // add x0, x0, #8
1423 // ldr x1, [x0]
1424 // merged into:
1425 // ldr x1, [x0, #8]!
1426 Update = findMatchingUpdateInsnBackward(MBBI, ScanLimit);
1427 if (Update != E) {
1428 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001429 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001430 Modified = true;
1431 ++NumPreFolded;
1432 break;
1433 }
Chad Rosier7a83d772015-10-01 13:09:44 +00001434 // The immediate in the load/store is scaled by the size of the memory
1435 // operation. The immediate in the add we're looking for,
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001436 // however, is not, so adjust here.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001437 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001438
Tim Northover3b0846e2014-05-24 12:50:23 +00001439 // Look forward to try to find a post-index instruction. For example,
1440 // ldr x1, [x0, #64]
1441 // add x0, x0, #64
1442 // merged into:
1443 // ldr x1, [x0, #64]!
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001444 Update = findMatchingUpdateInsnForward(MBBI, ScanLimit, UnscaledOffset);
Tim Northover3b0846e2014-05-24 12:50:23 +00001445 if (Update != E) {
1446 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001447 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001448 Modified = true;
1449 ++NumPreFolded;
1450 break;
1451 }
1452
1453 // Nothing found. Just move to the next instruction.
1454 ++MBBI;
1455 break;
1456 }
1457 // FIXME: Do the other instructions.
1458 }
1459 }
1460
1461 return Modified;
1462}
1463
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001464bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001465 bool ProfitableArch = Subtarget->isCortexA57();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001466 // FIXME: The benefit from converting narrow loads into a wider load could be
1467 // microarchitectural as it assumes that a single load with two bitfield
1468 // extracts is cheaper than two narrow loads. Currently, this conversion is
1469 // enabled only in cortex-a57 on which performance benefits were verified.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001470 return ProfitableArch && !Subtarget->requiresStrictAlign();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001471}
1472
Tim Northover3b0846e2014-05-24 12:50:23 +00001473bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
Oliver Stannardd414c992015-11-10 11:04:18 +00001474 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1475 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1476 TRI = Subtarget->getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001477
1478 bool Modified = false;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001479 bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
Tim Northover3b0846e2014-05-24 12:50:23 +00001480 for (auto &MBB : Fn)
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001481 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001482
1483 return Modified;
1484}
1485
1486// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1487// loads and stores near one another?
1488
Chad Rosier43f5c842015-08-05 12:40:13 +00001489/// createAArch64LoadStoreOptimizationPass - returns an instance of the
1490/// load / store optimization pass.
Tim Northover3b0846e2014-05-24 12:50:23 +00001491FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1492 return new AArch64LoadStoreOpt();
1493}