blob: 193a0b632e802b05b67bd9cc9e6c0a602ba2b126 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that performs load / store related peephole
11// optimizations. This pass should be run after register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AArch64InstrInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000016#include "AArch64Subtarget.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000017#include "MCTargetDesc/AArch64AddressingModes.h"
18#include "llvm/ADT/BitVector.h"
Chad Rosierce8e5ab2015-05-21 21:36:46 +000019#include "llvm/ADT/SmallVector.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000020#include "llvm/ADT/Statistic.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000021#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/ErrorHandling.h"
28#include "llvm/Support/raw_ostream.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000029#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Target/TargetRegisterInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000032using namespace llvm;
33
34#define DEBUG_TYPE "aarch64-ldst-opt"
35
36/// AArch64AllocLoadStoreOpt - Post-register allocation pass to combine
37/// load / store instructions to form ldp / stp instructions.
38
39STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
40STATISTIC(NumPostFolded, "Number of post-index updates folded");
41STATISTIC(NumPreFolded, "Number of pre-index updates folded");
42STATISTIC(NumUnscaledPairCreated,
43 "Number of load/store from unscaled generated");
Jun Bum Limc12c2792015-11-19 18:41:27 +000044STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
Jun Bum Lim80ec0d32015-11-20 21:14:07 +000045STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
Jun Bum Lim6755c3b2015-12-22 16:36:16 +000046STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
Tim Northover3b0846e2014-05-24 12:50:23 +000047
Chad Rosier35706ad2016-02-04 21:26:02 +000048// The LdStLimit limits how far we search for load/store pairs.
49static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +000050 cl::init(20), cl::Hidden);
Tim Northover3b0846e2014-05-24 12:50:23 +000051
Chad Rosier35706ad2016-02-04 21:26:02 +000052// The UpdateLimit limits how far we search for update instructions when we form
53// pre-/post-index instructions.
54static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
55 cl::Hidden);
56
Chad Rosier96530b32015-08-05 13:44:51 +000057namespace llvm {
58void initializeAArch64LoadStoreOptPass(PassRegistry &);
59}
60
61#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
62
Tim Northover3b0846e2014-05-24 12:50:23 +000063namespace {
Chad Rosier96a18a92015-07-21 17:42:04 +000064
65typedef struct LdStPairFlags {
66 // If a matching instruction is found, MergeForward is set to true if the
67 // merge is to remove the first instruction and replace the second with
68 // a pair-wise insn, and false if the reverse is true.
69 bool MergeForward;
70
71 // SExtIdx gives the index of the result of the load pair that must be
72 // extended. The value of SExtIdx assumes that the paired load produces the
73 // value in this order: (I, returned iterator), i.e., -1 means no value has
74 // to be extended, 0 means I, and 1 means the returned iterator.
75 int SExtIdx;
76
77 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
78
79 void setMergeForward(bool V = true) { MergeForward = V; }
80 bool getMergeForward() const { return MergeForward; }
81
82 void setSExtIdx(int V) { SExtIdx = V; }
83 int getSExtIdx() const { return SExtIdx; }
84
85} LdStPairFlags;
86
Tim Northover3b0846e2014-05-24 12:50:23 +000087struct AArch64LoadStoreOpt : public MachineFunctionPass {
88 static char ID;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +000089 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
Chad Rosier96530b32015-08-05 13:44:51 +000090 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
91 }
Tim Northover3b0846e2014-05-24 12:50:23 +000092
93 const AArch64InstrInfo *TII;
94 const TargetRegisterInfo *TRI;
Oliver Stannardd414c992015-11-10 11:04:18 +000095 const AArch64Subtarget *Subtarget;
Tim Northover3b0846e2014-05-24 12:50:23 +000096
Chad Rosierbba881e2016-02-02 15:02:30 +000097 // Track which registers have been modified and used.
98 BitVector ModifiedRegs, UsedRegs;
99
Tim Northover3b0846e2014-05-24 12:50:23 +0000100 // Scan the instructions looking for a load/store that can be combined
101 // with the current instruction into a load/store pair.
102 // Return the matching instruction if one is found, else MBB->end().
Tim Northover3b0846e2014-05-24 12:50:23 +0000103 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000104 LdStPairFlags &Flags,
Tim Northover3b0846e2014-05-24 12:50:23 +0000105 unsigned Limit);
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000106
107 // Scan the instructions looking for a store that writes to the address from
108 // which the current load instruction reads. Return true if one is found.
109 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
110 MachineBasicBlock::iterator &StoreI);
111
Tim Northover3b0846e2014-05-24 12:50:23 +0000112 // Merge the two instructions indicated into a single pair-wise instruction.
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000113 // If MergeForward is true, erase the first instruction and fold its
Tim Northover3b0846e2014-05-24 12:50:23 +0000114 // operation into the second. If false, the reverse. Return the instruction
115 // following the first instruction (which may change during processing).
116 MachineBasicBlock::iterator
117 mergePairedInsns(MachineBasicBlock::iterator I,
Chad Rosier96a18a92015-07-21 17:42:04 +0000118 MachineBasicBlock::iterator Paired,
Chad Rosierfe5399f2015-07-21 17:47:56 +0000119 const LdStPairFlags &Flags);
Tim Northover3b0846e2014-05-24 12:50:23 +0000120
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000121 // Promote the load that reads directly from the address stored to.
122 MachineBasicBlock::iterator
123 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
124 MachineBasicBlock::iterator StoreI);
125
Tim Northover3b0846e2014-05-24 12:50:23 +0000126 // Scan the instruction list to find a base register update that can
127 // be combined with the current instruction (a load or store) using
128 // pre or post indexed addressing with writeback. Scan forwards.
129 MachineBasicBlock::iterator
Chad Rosier234bf6f2016-01-18 21:56:40 +0000130 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
Chad Rosier35706ad2016-02-04 21:26:02 +0000131 int UnscaledOffset, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000132
133 // Scan the instruction list to find a base register update that can
134 // be combined with the current instruction (a load or store) using
135 // pre or post indexed addressing with writeback. Scan backwards.
136 MachineBasicBlock::iterator
Chad Rosier35706ad2016-02-04 21:26:02 +0000137 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
Tim Northover3b0846e2014-05-24 12:50:23 +0000138
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000139 // Find an instruction that updates the base register of the ld/st
140 // instruction.
141 bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
142 unsigned BaseReg, int Offset);
143
Chad Rosier2dfd3542015-09-23 13:51:44 +0000144 // Merge a pre- or post-index base register update into a ld/st instruction.
Tim Northover3b0846e2014-05-24 12:50:23 +0000145 MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +0000146 mergeUpdateInsn(MachineBasicBlock::iterator I,
147 MachineBasicBlock::iterator Update, bool IsPreIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000148
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000149 // Find and merge foldable ldr/str instructions.
150 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
151
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000152 // Find and promote load instructions which read directly from store.
153 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
154
Jun Bum Lim22fe15e2015-11-06 16:27:47 +0000155 // Check if converting two narrow loads into a single wider load with
156 // bitfield extracts could be enabled.
157 bool enableNarrowLdMerge(MachineFunction &Fn);
158
159 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000160
161 bool runOnMachineFunction(MachineFunction &Fn) override;
162
163 const char *getPassName() const override {
Chad Rosier96530b32015-08-05 13:44:51 +0000164 return AARCH64_LOAD_STORE_OPT_NAME;
Tim Northover3b0846e2014-05-24 12:50:23 +0000165 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000166};
167char AArch64LoadStoreOpt::ID = 0;
Jim Grosbach1eee3df2014-08-11 22:42:31 +0000168} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +0000169
Chad Rosier96530b32015-08-05 13:44:51 +0000170INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
171 AARCH64_LOAD_STORE_OPT_NAME, false, false)
172
Chad Rosier22eb7102015-08-06 17:37:18 +0000173static bool isUnscaledLdSt(unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000174 switch (Opc) {
175 default:
176 return false;
177 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000178 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000179 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000180 case AArch64::STURBBi:
181 case AArch64::STURHHi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000182 case AArch64::STURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000183 case AArch64::STURXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000184 case AArch64::LDURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000185 case AArch64::LDURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000186 case AArch64::LDURQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000187 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000188 case AArch64::LDURXi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000189 case AArch64::LDURSWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000190 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000191 case AArch64::LDURBBi:
192 case AArch64::LDURSBWi:
193 case AArch64::LDURSHWi:
Quentin Colombet29f55332015-01-24 01:25:54 +0000194 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000195 }
196}
197
Chad Rosier22eb7102015-08-06 17:37:18 +0000198static bool isUnscaledLdSt(MachineInstr *MI) {
199 return isUnscaledLdSt(MI->getOpcode());
200}
201
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000202static unsigned getBitExtrOpcode(MachineInstr *MI) {
203 switch (MI->getOpcode()) {
204 default:
205 llvm_unreachable("Unexpected opcode.");
206 case AArch64::LDRBBui:
207 case AArch64::LDURBBi:
208 case AArch64::LDRHHui:
209 case AArch64::LDURHHi:
210 return AArch64::UBFMWri;
211 case AArch64::LDRSBWui:
212 case AArch64::LDURSBWi:
213 case AArch64::LDRSHWui:
214 case AArch64::LDURSHWi:
215 return AArch64::SBFMWri;
216 }
217}
218
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000219static bool isNarrowStore(unsigned Opc) {
220 switch (Opc) {
221 default:
222 return false;
223 case AArch64::STRBBui:
224 case AArch64::STURBBi:
225 case AArch64::STRHHui:
226 case AArch64::STURHHi:
227 return true;
228 }
229}
230
231static bool isNarrowStore(MachineInstr *MI) {
232 return isNarrowStore(MI->getOpcode());
233}
234
Jun Bum Limc12c2792015-11-19 18:41:27 +0000235static bool isNarrowLoad(unsigned Opc) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000236 switch (Opc) {
237 default:
238 return false;
239 case AArch64::LDRHHui:
240 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000241 case AArch64::LDRBBui:
242 case AArch64::LDURBBi:
243 case AArch64::LDRSHWui:
244 case AArch64::LDURSHWi:
245 case AArch64::LDRSBWui:
246 case AArch64::LDURSBWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000247 return true;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000248 }
249}
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000250
Jun Bum Limc12c2792015-11-19 18:41:27 +0000251static bool isNarrowLoad(MachineInstr *MI) {
252 return isNarrowLoad(MI->getOpcode());
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000253}
254
Chad Rosier32d4d372015-09-29 16:07:32 +0000255// Scaling factor for unscaled load or store.
256static int getMemScale(MachineInstr *MI) {
Chad Rosier22eb7102015-08-06 17:37:18 +0000257 switch (MI->getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000258 default:
Chad Rosierdabe2532015-09-29 18:26:15 +0000259 llvm_unreachable("Opcode has unknown scale!");
260 case AArch64::LDRBBui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000261 case AArch64::LDURBBi:
262 case AArch64::LDRSBWui:
263 case AArch64::LDURSBWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000264 case AArch64::STRBBui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000265 case AArch64::STURBBi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000266 return 1;
267 case AArch64::LDRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000268 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000269 case AArch64::LDRSHWui:
270 case AArch64::LDURSHWi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000271 case AArch64::STRHHui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000272 case AArch64::STURHHi:
Chad Rosierdabe2532015-09-29 18:26:15 +0000273 return 2;
Chad Rosiera4d32172015-09-29 14:57:10 +0000274 case AArch64::LDRSui:
275 case AArch64::LDURSi:
276 case AArch64::LDRSWui:
277 case AArch64::LDURSWi:
278 case AArch64::LDRWui:
279 case AArch64::LDURWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000280 case AArch64::STRSui:
281 case AArch64::STURSi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000282 case AArch64::STRWui:
283 case AArch64::STURWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000284 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000285 case AArch64::LDPSWi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000286 case AArch64::LDPWi:
287 case AArch64::STPSi:
288 case AArch64::STPWi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000289 return 4;
Chad Rosiera4d32172015-09-29 14:57:10 +0000290 case AArch64::LDRDui:
291 case AArch64::LDURDi:
292 case AArch64::LDRXui:
293 case AArch64::LDURXi:
294 case AArch64::STRDui:
295 case AArch64::STURDi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000296 case AArch64::STRXui:
297 case AArch64::STURXi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000298 case AArch64::LDPDi:
299 case AArch64::LDPXi:
300 case AArch64::STPDi:
301 case AArch64::STPXi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000302 return 8;
Tim Northover3b0846e2014-05-24 12:50:23 +0000303 case AArch64::LDRQui:
304 case AArch64::LDURQi:
Chad Rosiera4d32172015-09-29 14:57:10 +0000305 case AArch64::STRQui:
306 case AArch64::STURQi:
Chad Rosier32d4d372015-09-29 16:07:32 +0000307 case AArch64::LDPQi:
308 case AArch64::STPQi:
Tim Northover3b0846e2014-05-24 12:50:23 +0000309 return 16;
Tim Northover3b0846e2014-05-24 12:50:23 +0000310 }
311}
312
Quentin Colombet66b61632015-03-06 22:42:10 +0000313static unsigned getMatchingNonSExtOpcode(unsigned Opc,
314 bool *IsValidLdStrOpc = nullptr) {
315 if (IsValidLdStrOpc)
316 *IsValidLdStrOpc = true;
317 switch (Opc) {
318 default:
319 if (IsValidLdStrOpc)
320 *IsValidLdStrOpc = false;
321 return UINT_MAX;
322 case AArch64::STRDui:
323 case AArch64::STURDi:
324 case AArch64::STRQui:
325 case AArch64::STURQi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000326 case AArch64::STRBBui:
327 case AArch64::STURBBi:
328 case AArch64::STRHHui:
329 case AArch64::STURHHi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000330 case AArch64::STRWui:
331 case AArch64::STURWi:
332 case AArch64::STRXui:
333 case AArch64::STURXi:
334 case AArch64::LDRDui:
335 case AArch64::LDURDi:
336 case AArch64::LDRQui:
337 case AArch64::LDURQi:
338 case AArch64::LDRWui:
339 case AArch64::LDURWi:
340 case AArch64::LDRXui:
341 case AArch64::LDURXi:
342 case AArch64::STRSui:
343 case AArch64::STURSi:
344 case AArch64::LDRSui:
345 case AArch64::LDURSi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000346 case AArch64::LDRHHui:
347 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000348 case AArch64::LDRBBui:
349 case AArch64::LDURBBi:
Quentin Colombet66b61632015-03-06 22:42:10 +0000350 return Opc;
351 case AArch64::LDRSWui:
352 return AArch64::LDRWui;
353 case AArch64::LDURSWi:
354 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000355 case AArch64::LDRSBWui:
356 return AArch64::LDRBBui;
357 case AArch64::LDRSHWui:
358 return AArch64::LDRHHui;
359 case AArch64::LDURSBWi:
360 return AArch64::LDURBBi;
361 case AArch64::LDURSHWi:
362 return AArch64::LDURHHi;
Quentin Colombet66b61632015-03-06 22:42:10 +0000363 }
364}
365
Tim Northover3b0846e2014-05-24 12:50:23 +0000366static unsigned getMatchingPairOpcode(unsigned Opc) {
367 switch (Opc) {
368 default:
369 llvm_unreachable("Opcode has no pairwise equivalent!");
370 case AArch64::STRSui:
371 case AArch64::STURSi:
372 return AArch64::STPSi;
373 case AArch64::STRDui:
374 case AArch64::STURDi:
375 return AArch64::STPDi;
376 case AArch64::STRQui:
377 case AArch64::STURQi:
378 return AArch64::STPQi;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000379 case AArch64::STRBBui:
380 return AArch64::STRHHui;
381 case AArch64::STRHHui:
382 return AArch64::STRWui;
383 case AArch64::STURBBi:
384 return AArch64::STURHHi;
385 case AArch64::STURHHi:
386 return AArch64::STURWi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000387 case AArch64::STRWui:
388 case AArch64::STURWi:
389 return AArch64::STPWi;
390 case AArch64::STRXui:
391 case AArch64::STURXi:
392 return AArch64::STPXi;
393 case AArch64::LDRSui:
394 case AArch64::LDURSi:
395 return AArch64::LDPSi;
396 case AArch64::LDRDui:
397 case AArch64::LDURDi:
398 return AArch64::LDPDi;
399 case AArch64::LDRQui:
400 case AArch64::LDURQi:
401 return AArch64::LDPQi;
402 case AArch64::LDRWui:
403 case AArch64::LDURWi:
404 return AArch64::LDPWi;
405 case AArch64::LDRXui:
406 case AArch64::LDURXi:
407 return AArch64::LDPXi;
Quentin Colombet29f55332015-01-24 01:25:54 +0000408 case AArch64::LDRSWui:
409 case AArch64::LDURSWi:
410 return AArch64::LDPSWi;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000411 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000412 case AArch64::LDRSHWui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000413 return AArch64::LDRWui;
414 case AArch64::LDURHHi:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000415 case AArch64::LDURSHWi:
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000416 return AArch64::LDURWi;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000417 case AArch64::LDRBBui:
418 case AArch64::LDRSBWui:
419 return AArch64::LDRHHui;
420 case AArch64::LDURBBi:
421 case AArch64::LDURSBWi:
422 return AArch64::LDURHHi;
Tim Northover3b0846e2014-05-24 12:50:23 +0000423 }
424}
425
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000426static unsigned isMatchingStore(MachineInstr *LoadInst,
427 MachineInstr *StoreInst) {
428 unsigned LdOpc = LoadInst->getOpcode();
429 unsigned StOpc = StoreInst->getOpcode();
430 switch (LdOpc) {
431 default:
432 llvm_unreachable("Unsupported load instruction!");
433 case AArch64::LDRBBui:
434 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
435 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
436 case AArch64::LDURBBi:
437 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
438 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
439 case AArch64::LDRHHui:
440 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
441 StOpc == AArch64::STRXui;
442 case AArch64::LDURHHi:
443 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
444 StOpc == AArch64::STURXi;
445 case AArch64::LDRWui:
446 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
447 case AArch64::LDURWi:
448 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
449 case AArch64::LDRXui:
450 return StOpc == AArch64::STRXui;
451 case AArch64::LDURXi:
452 return StOpc == AArch64::STURXi;
453 }
454}
455
Tim Northover3b0846e2014-05-24 12:50:23 +0000456static unsigned getPreIndexedOpcode(unsigned Opc) {
457 switch (Opc) {
458 default:
459 llvm_unreachable("Opcode has no pre-indexed equivalent!");
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000460 case AArch64::STRSui:
461 return AArch64::STRSpre;
462 case AArch64::STRDui:
463 return AArch64::STRDpre;
464 case AArch64::STRQui:
465 return AArch64::STRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000466 case AArch64::STRBBui:
467 return AArch64::STRBBpre;
468 case AArch64::STRHHui:
469 return AArch64::STRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000470 case AArch64::STRWui:
471 return AArch64::STRWpre;
472 case AArch64::STRXui:
473 return AArch64::STRXpre;
474 case AArch64::LDRSui:
475 return AArch64::LDRSpre;
476 case AArch64::LDRDui:
477 return AArch64::LDRDpre;
478 case AArch64::LDRQui:
479 return AArch64::LDRQpre;
Chad Rosierdabe2532015-09-29 18:26:15 +0000480 case AArch64::LDRBBui:
481 return AArch64::LDRBBpre;
482 case AArch64::LDRHHui:
483 return AArch64::LDRHHpre;
Tilmann Scheller5d8d72c2014-06-04 12:40:35 +0000484 case AArch64::LDRWui:
485 return AArch64::LDRWpre;
486 case AArch64::LDRXui:
487 return AArch64::LDRXpre;
Quentin Colombet29f55332015-01-24 01:25:54 +0000488 case AArch64::LDRSWui:
489 return AArch64::LDRSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000490 case AArch64::LDPSi:
491 return AArch64::LDPSpre;
Chad Rosier43150122015-09-29 20:39:55 +0000492 case AArch64::LDPSWi:
493 return AArch64::LDPSWpre;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000494 case AArch64::LDPDi:
495 return AArch64::LDPDpre;
496 case AArch64::LDPQi:
497 return AArch64::LDPQpre;
498 case AArch64::LDPWi:
499 return AArch64::LDPWpre;
500 case AArch64::LDPXi:
501 return AArch64::LDPXpre;
502 case AArch64::STPSi:
503 return AArch64::STPSpre;
504 case AArch64::STPDi:
505 return AArch64::STPDpre;
506 case AArch64::STPQi:
507 return AArch64::STPQpre;
508 case AArch64::STPWi:
509 return AArch64::STPWpre;
510 case AArch64::STPXi:
511 return AArch64::STPXpre;
Tim Northover3b0846e2014-05-24 12:50:23 +0000512 }
513}
514
515static unsigned getPostIndexedOpcode(unsigned Opc) {
516 switch (Opc) {
517 default:
518 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
519 case AArch64::STRSui:
520 return AArch64::STRSpost;
521 case AArch64::STRDui:
522 return AArch64::STRDpost;
523 case AArch64::STRQui:
524 return AArch64::STRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000525 case AArch64::STRBBui:
526 return AArch64::STRBBpost;
527 case AArch64::STRHHui:
528 return AArch64::STRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000529 case AArch64::STRWui:
530 return AArch64::STRWpost;
531 case AArch64::STRXui:
532 return AArch64::STRXpost;
533 case AArch64::LDRSui:
534 return AArch64::LDRSpost;
535 case AArch64::LDRDui:
536 return AArch64::LDRDpost;
537 case AArch64::LDRQui:
538 return AArch64::LDRQpost;
Chad Rosierdabe2532015-09-29 18:26:15 +0000539 case AArch64::LDRBBui:
540 return AArch64::LDRBBpost;
541 case AArch64::LDRHHui:
542 return AArch64::LDRHHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000543 case AArch64::LDRWui:
544 return AArch64::LDRWpost;
545 case AArch64::LDRXui:
546 return AArch64::LDRXpost;
Quentin Colombet29f55332015-01-24 01:25:54 +0000547 case AArch64::LDRSWui:
548 return AArch64::LDRSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000549 case AArch64::LDPSi:
550 return AArch64::LDPSpost;
Chad Rosier43150122015-09-29 20:39:55 +0000551 case AArch64::LDPSWi:
552 return AArch64::LDPSWpost;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000553 case AArch64::LDPDi:
554 return AArch64::LDPDpost;
555 case AArch64::LDPQi:
556 return AArch64::LDPQpost;
557 case AArch64::LDPWi:
558 return AArch64::LDPWpost;
559 case AArch64::LDPXi:
560 return AArch64::LDPXpost;
561 case AArch64::STPSi:
562 return AArch64::STPSpost;
563 case AArch64::STPDi:
564 return AArch64::STPDpost;
565 case AArch64::STPQi:
566 return AArch64::STPQpost;
567 case AArch64::STPWi:
568 return AArch64::STPWpost;
569 case AArch64::STPXi:
570 return AArch64::STPXpost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000571 }
572}
573
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000574static bool isPairedLdSt(const MachineInstr *MI) {
575 switch (MI->getOpcode()) {
576 default:
577 return false;
578 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +0000579 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000580 case AArch64::LDPDi:
581 case AArch64::LDPQi:
582 case AArch64::LDPWi:
583 case AArch64::LDPXi:
584 case AArch64::STPSi:
585 case AArch64::STPDi:
586 case AArch64::STPQi:
587 case AArch64::STPWi:
588 case AArch64::STPXi:
589 return true;
590 }
591}
592
593static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
594 unsigned PairedRegOp = 0) {
595 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
596 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
597 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000598}
599
600static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000601 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
602 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000603}
604
605static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
Chad Rosier1bbd7fb2015-09-25 17:48:17 +0000606 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
607 return MI->getOperand(Idx);
Chad Rosierf77e9092015-08-06 15:50:12 +0000608}
609
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000610static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
611 MachineInstr *StoreInst) {
612 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
613 int LoadSize = getMemScale(LoadInst);
614 int StoreSize = getMemScale(StoreInst);
615 int UnscaledStOffset = isUnscaledLdSt(StoreInst)
616 ? getLdStOffsetOp(StoreInst).getImm()
617 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
618 int UnscaledLdOffset = isUnscaledLdSt(LoadInst)
619 ? getLdStOffsetOp(LoadInst).getImm()
620 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
621 return (UnscaledStOffset <= UnscaledLdOffset) &&
622 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
623}
624
Tim Northover3b0846e2014-05-24 12:50:23 +0000625MachineBasicBlock::iterator
626AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
627 MachineBasicBlock::iterator Paired,
Chad Rosier96a18a92015-07-21 17:42:04 +0000628 const LdStPairFlags &Flags) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000629 MachineBasicBlock::iterator NextI = I;
630 ++NextI;
631 // If NextI is the second of the two instructions to be merged, we need
632 // to skip one further. Either way we merge will invalidate the iterator,
633 // and we don't need to scan the new instruction, as it's a pairwise
634 // instruction, which we're not considering for further action anyway.
635 if (NextI == Paired)
636 ++NextI;
637
Chad Rosier96a18a92015-07-21 17:42:04 +0000638 int SExtIdx = Flags.getSExtIdx();
Quentin Colombet66b61632015-03-06 22:42:10 +0000639 unsigned Opc =
640 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
Chad Rosier22eb7102015-08-06 17:37:18 +0000641 bool IsUnscaled = isUnscaledLdSt(Opc);
Chad Rosierf11d0402015-10-01 18:17:12 +0000642 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
Tim Northover3b0846e2014-05-24 12:50:23 +0000643
Chad Rosier96a18a92015-07-21 17:42:04 +0000644 bool MergeForward = Flags.getMergeForward();
Quentin Colombet66b61632015-03-06 22:42:10 +0000645 unsigned NewOpc = getMatchingPairOpcode(Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000646 // Insert our new paired instruction after whichever of the paired
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +0000647 // instructions MergeForward indicates.
648 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
649 // Also based on MergeForward is from where we copy the base register operand
Tim Northover3b0846e2014-05-24 12:50:23 +0000650 // so we get the flags compatible with the input code.
Chad Rosierf77e9092015-08-06 15:50:12 +0000651 const MachineOperand &BaseRegOp =
652 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000653
Chad Rosier05f80202016-02-04 18:59:49 +0000654 int Offset = getLdStOffsetOp(I).getImm();
655 int PairedOffset = getLdStOffsetOp(Paired).getImm();
656 bool PairedIsUnscaled = isUnscaledLdSt(Paired->getOpcode());
657
658 // We're trying to pair instructions that differ in how they are scaled.
659 // If I is scaled then scale the offset of Paired accordingly.
660 // Otherwise, do the opposite (i.e., make Paired's offset unscaled).
661 if (IsUnscaled != PairedIsUnscaled) {
662 int MemSize = getMemScale(Paired);
663 assert(!(PairedOffset % getMemScale(Paired)) &&
664 "Offset should be a multiple of the stride!");
665 PairedOffset =
666 PairedIsUnscaled ? PairedOffset / MemSize : PairedOffset * MemSize;
667 }
668
Tim Northover3b0846e2014-05-24 12:50:23 +0000669 // Which register is Rt and which is Rt2 depends on the offset order.
670 MachineInstr *RtMI, *Rt2MI;
Chad Rosier05f80202016-02-04 18:59:49 +0000671 if (Offset == PairedOffset + OffsetStride) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000672 RtMI = Paired;
673 Rt2MI = I;
Quentin Colombet66b61632015-03-06 22:42:10 +0000674 // Here we swapped the assumption made for SExtIdx.
675 // I.e., we turn ldp I, Paired into ldp Paired, I.
676 // Update the index accordingly.
677 if (SExtIdx != -1)
678 SExtIdx = (SExtIdx + 1) % 2;
Tim Northover3b0846e2014-05-24 12:50:23 +0000679 } else {
680 RtMI = I;
681 Rt2MI = Paired;
682 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000683
James Molloy5b18b4c2015-10-23 10:41:38 +0000684 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000685
Jun Bum Limc12c2792015-11-19 18:41:27 +0000686 if (isNarrowLoad(Opc)) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000687 // Change the scaled offset from small to large type.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000688 if (!IsUnscaled) {
689 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000690 OffsetImm /= 2;
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000691 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000692 MachineInstr *RtNewDest = MergeForward ? I : Paired;
Oliver Stannardd414c992015-11-10 11:04:18 +0000693 // When merging small (< 32 bit) loads for big-endian targets, the order of
694 // the component parts gets swapped.
695 if (!Subtarget->isLittleEndian())
696 std::swap(RtMI, Rt2MI);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000697 // Construct the new load instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000698 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
699 NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
700 TII->get(NewOpc))
701 .addOperand(getLdStRegOp(RtNewDest))
702 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000703 .addImm(OffsetImm)
704 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000705
706 DEBUG(
707 dbgs()
708 << "Creating the new load and extract. Replacing instructions:\n ");
709 DEBUG(I->print(dbgs()));
710 DEBUG(dbgs() << " ");
711 DEBUG(Paired->print(dbgs()));
712 DEBUG(dbgs() << " with instructions:\n ");
713 DEBUG((NewMemMI)->print(dbgs()));
714
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000715 int Width = getMemScale(I) == 1 ? 8 : 16;
716 int LSBLow = 0;
717 int LSBHigh = Width;
718 int ImmsLow = LSBLow + Width - 1;
719 int ImmsHigh = LSBHigh + Width - 1;
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000720 MachineInstr *ExtDestMI = MergeForward ? Paired : I;
Oliver Stannardd414c992015-11-10 11:04:18 +0000721 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000722 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000723 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000724 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000725 .addOperand(getLdStRegOp(Rt2MI))
726 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000727 .addImm(LSBHigh)
728 .addImm(ImmsHigh);
729 // Create the bitfield extract for low bits.
730 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
731 // For unsigned, prefer to use AND for low bits.
732 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
733 TII->get(AArch64::ANDWri))
734 .addOperand(getLdStRegOp(RtMI))
735 .addReg(getLdStRegOp(RtNewDest).getReg())
736 .addImm(ImmsLow);
737 } else {
738 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
739 TII->get(getBitExtrOpcode(RtMI)))
740 .addOperand(getLdStRegOp(RtMI))
741 .addReg(getLdStRegOp(RtNewDest).getReg())
742 .addImm(LSBLow)
743 .addImm(ImmsLow);
744 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000745 } else {
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000746 // Create the bitfield extract for low bits.
747 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
748 // For unsigned, prefer to use AND for low bits.
749 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
750 TII->get(AArch64::ANDWri))
751 .addOperand(getLdStRegOp(RtMI))
752 .addReg(getLdStRegOp(RtNewDest).getReg())
753 .addImm(ImmsLow);
754 } else {
755 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
756 TII->get(getBitExtrOpcode(RtMI)))
757 .addOperand(getLdStRegOp(RtMI))
758 .addReg(getLdStRegOp(RtNewDest).getReg())
759 .addImm(LSBLow)
760 .addImm(ImmsLow);
761 }
762
763 // Create the bitfield extract for high bits.
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000764 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000765 TII->get(getBitExtrOpcode(Rt2MI)))
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000766 .addOperand(getLdStRegOp(Rt2MI))
767 .addReg(getLdStRegOp(RtNewDest).getReg())
Jun Bum Lim4c35cca2015-11-19 17:21:41 +0000768 .addImm(LSBHigh)
769 .addImm(ImmsHigh);
Jun Bum Limc9879ec2015-10-27 19:16:03 +0000770 }
771 DEBUG(dbgs() << " ");
772 DEBUG((BitExtMI1)->print(dbgs()));
773 DEBUG(dbgs() << " ");
774 DEBUG((BitExtMI2)->print(dbgs()));
775 DEBUG(dbgs() << "\n");
776
777 // Erase the old instructions.
778 I->eraseFromParent();
779 Paired->eraseFromParent();
780 return NextI;
781 }
782
Tim Northover3b0846e2014-05-24 12:50:23 +0000783 // Construct the new instruction.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000784 MachineInstrBuilder MIB;
785 if (isNarrowStore(Opc)) {
786 // Change the scaled offset from small to large type.
787 if (!IsUnscaled) {
788 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
789 OffsetImm /= 2;
790 }
791 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
792 TII->get(NewOpc))
793 .addOperand(getLdStRegOp(I))
794 .addOperand(BaseRegOp)
Philip Reamesc86ed002016-01-06 04:39:03 +0000795 .addImm(OffsetImm)
796 .setMemRefs(I->mergeMemRefsWith(*Paired));
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000797 } else {
Chad Rosier05f80202016-02-04 18:59:49 +0000798 // Scale the immediate offset, if necessary.
799 if (isUnscaledLdSt(RtMI->getOpcode())) {
800 assert(!(OffsetImm % getMemScale(RtMI)) &&
801 "Offset should be a multiple of the stride!");
802 OffsetImm /= getMemScale(RtMI);
803 }
Jun Bum Lim80ec0d32015-11-20 21:14:07 +0000804 MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
805 TII->get(NewOpc))
806 .addOperand(getLdStRegOp(RtMI))
807 .addOperand(getLdStRegOp(Rt2MI))
808 .addOperand(BaseRegOp)
809 .addImm(OffsetImm);
810 }
811
Tim Northover3b0846e2014-05-24 12:50:23 +0000812 (void)MIB;
813
814 // FIXME: Do we need/want to copy the mem operands from the source
815 // instructions? Probably. What uses them after this?
816
817 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
818 DEBUG(I->print(dbgs()));
819 DEBUG(dbgs() << " ");
820 DEBUG(Paired->print(dbgs()));
821 DEBUG(dbgs() << " with instruction:\n ");
Quentin Colombet66b61632015-03-06 22:42:10 +0000822
823 if (SExtIdx != -1) {
824 // Generate the sign extension for the proper result of the ldp.
825 // I.e., with X1, that would be:
826 // %W1<def> = KILL %W1, %X1<imp-def>
827 // %X1<def> = SBFMXri %X1<kill>, 0, 31
828 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
829 // Right now, DstMO has the extended register, since it comes from an
830 // extended opcode.
831 unsigned DstRegX = DstMO.getReg();
832 // Get the W variant of that register.
833 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
834 // Update the result of LDP to use the W instead of the X variant.
835 DstMO.setReg(DstRegW);
836 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
837 DEBUG(dbgs() << "\n");
838 // Make the machine verifier happy by providing a definition for
839 // the X register.
840 // Insert this definition right after the generated LDP, i.e., before
841 // InsertionPoint.
842 MachineInstrBuilder MIBKill =
843 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
844 TII->get(TargetOpcode::KILL), DstRegW)
845 .addReg(DstRegW)
846 .addReg(DstRegX, RegState::Define);
847 MIBKill->getOperand(2).setImplicit();
848 // Create the sign extension.
849 MachineInstrBuilder MIBSXTW =
850 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
851 TII->get(AArch64::SBFMXri), DstRegX)
852 .addReg(DstRegX)
853 .addImm(0)
854 .addImm(31);
855 (void)MIBSXTW;
856 DEBUG(dbgs() << " Extend operand:\n ");
857 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
858 DEBUG(dbgs() << "\n");
859 } else {
860 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
861 DEBUG(dbgs() << "\n");
862 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000863
864 // Erase the old instructions.
865 I->eraseFromParent();
866 Paired->eraseFromParent();
867
868 return NextI;
869}
870
Jun Bum Lim6755c3b2015-12-22 16:36:16 +0000871MachineBasicBlock::iterator
872AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
873 MachineBasicBlock::iterator StoreI) {
874 MachineBasicBlock::iterator NextI = LoadI;
875 ++NextI;
876
877 int LoadSize = getMemScale(LoadI);
878 int StoreSize = getMemScale(StoreI);
879 unsigned LdRt = getLdStRegOp(LoadI).getReg();
880 unsigned StRt = getLdStRegOp(StoreI).getReg();
881 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
882
883 assert((IsStoreXReg ||
884 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
885 "Unexpected RegClass");
886
887 MachineInstr *BitExtMI;
888 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
889 // Remove the load, if the destination register of the loads is the same
890 // register for stored value.
891 if (StRt == LdRt && LoadSize == 8) {
892 DEBUG(dbgs() << "Remove load instruction:\n ");
893 DEBUG(LoadI->print(dbgs()));
894 DEBUG(dbgs() << "\n");
895 LoadI->eraseFromParent();
896 return NextI;
897 }
898 // Replace the load with a mov if the load and store are in the same size.
899 BitExtMI =
900 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
901 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
902 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
903 .addReg(StRt)
904 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
905 } else {
906 // FIXME: Currently we disable this transformation in big-endian targets as
907 // performance and correctness are verified only in little-endian.
908 if (!Subtarget->isLittleEndian())
909 return NextI;
910 bool IsUnscaled = isUnscaledLdSt(LoadI);
911 assert(IsUnscaled == isUnscaledLdSt(StoreI) && "Unsupported ld/st match");
912 assert(LoadSize <= StoreSize && "Invalid load size");
913 int UnscaledLdOffset = IsUnscaled
914 ? getLdStOffsetOp(LoadI).getImm()
915 : getLdStOffsetOp(LoadI).getImm() * LoadSize;
916 int UnscaledStOffset = IsUnscaled
917 ? getLdStOffsetOp(StoreI).getImm()
918 : getLdStOffsetOp(StoreI).getImm() * StoreSize;
919 int Width = LoadSize * 8;
920 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
921 int Imms = Immr + Width - 1;
922 unsigned DestReg = IsStoreXReg
923 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
924 &AArch64::GPR64RegClass)
925 : LdRt;
926
927 assert((UnscaledLdOffset >= UnscaledStOffset &&
928 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
929 "Invalid offset");
930
931 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
932 Imms = Immr + Width - 1;
933 if (UnscaledLdOffset == UnscaledStOffset) {
934 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
935 | ((Immr) << 6) // immr
936 | ((Imms) << 0) // imms
937 ;
938
939 BitExtMI =
940 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
941 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
942 DestReg)
943 .addReg(StRt)
944 .addImm(AndMaskEncoded);
945 } else {
946 BitExtMI =
947 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
948 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
949 DestReg)
950 .addReg(StRt)
951 .addImm(Immr)
952 .addImm(Imms);
953 }
954 }
955
956 DEBUG(dbgs() << "Promoting load by replacing :\n ");
957 DEBUG(StoreI->print(dbgs()));
958 DEBUG(dbgs() << " ");
959 DEBUG(LoadI->print(dbgs()));
960 DEBUG(dbgs() << " with instructions:\n ");
961 DEBUG(StoreI->print(dbgs()));
962 DEBUG(dbgs() << " ");
963 DEBUG((BitExtMI)->print(dbgs()));
964 DEBUG(dbgs() << "\n");
965
966 // Erase the old instructions.
967 LoadI->eraseFromParent();
968 return NextI;
969}
970
Tim Northover3b0846e2014-05-24 12:50:23 +0000971/// trackRegDefsUses - Remember what registers the specified instruction uses
972/// and modifies.
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000973static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
Tim Northover3b0846e2014-05-24 12:50:23 +0000974 BitVector &UsedRegs,
975 const TargetRegisterInfo *TRI) {
Pete Cooper7be8f8f2015-08-03 19:04:32 +0000976 for (const MachineOperand &MO : MI->operands()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000977 if (MO.isRegMask())
978 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
979
980 if (!MO.isReg())
981 continue;
982 unsigned Reg = MO.getReg();
983 if (MO.isDef()) {
984 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
985 ModifiedRegs.set(*AI);
986 } else {
987 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
988 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
989 UsedRegs.set(*AI);
990 }
991 }
992}
993
994static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
Chad Rosier3dd0e942015-08-18 16:20:03 +0000995 // Convert the byte-offset used by unscaled into an "element" offset used
996 // by the scaled pair load/store instructions.
Chad Rosier05f80202016-02-04 18:59:49 +0000997 if (IsUnscaled) {
998 // If the byte-offset isn't a multiple of the stride, there's no point
999 // trying to match it.
1000 if (Offset % OffsetStride)
1001 return false;
Chad Rosier3dd0e942015-08-18 16:20:03 +00001002 Offset /= OffsetStride;
Chad Rosier05f80202016-02-04 18:59:49 +00001003 }
Chad Rosier3dd0e942015-08-18 16:20:03 +00001004 return Offset <= 63 && Offset >= -64;
Tim Northover3b0846e2014-05-24 12:50:23 +00001005}
1006
1007// Do alignment, specialized to power of 2 and for signed ints,
1008// avoiding having to do a C-style cast from uint_64t to int when
Rui Ueyamada00f2f2016-01-14 21:06:47 +00001009// using alignTo from include/llvm/Support/MathExtras.h.
Tim Northover3b0846e2014-05-24 12:50:23 +00001010// FIXME: Move this function to include/MathExtras.h?
1011static int alignTo(int Num, int PowOf2) {
1012 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1013}
1014
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001015static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
1016 const AArch64InstrInfo *TII) {
1017 // One of the instructions must modify memory.
1018 if (!MIa->mayStore() && !MIb->mayStore())
1019 return false;
1020
1021 // Both instructions must be memory operations.
1022 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
1023 return false;
1024
1025 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
1026}
1027
1028static bool mayAlias(MachineInstr *MIa,
1029 SmallVectorImpl<MachineInstr *> &MemInsns,
1030 const AArch64InstrInfo *TII) {
1031 for (auto &MIb : MemInsns)
1032 if (mayAlias(MIa, MIb, TII))
1033 return true;
1034
1035 return false;
1036}
1037
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001038bool AArch64LoadStoreOpt::findMatchingStore(
1039 MachineBasicBlock::iterator I, unsigned Limit,
1040 MachineBasicBlock::iterator &StoreI) {
1041 MachineBasicBlock::iterator E = I->getParent()->begin();
1042 MachineBasicBlock::iterator MBBI = I;
1043 MachineInstr *FirstMI = I;
1044 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1045
1046 // Track which registers have been modified and used between the first insn
1047 // and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001048 ModifiedRegs.reset();
1049 UsedRegs.reset();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001050
Chad Rosier1142f3c2016-02-02 15:22:55 +00001051 // FIXME: We miss the case where the matching store is the first instruction
1052 // in the basic block.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001053 for (unsigned Count = 0; MBBI != E && Count < Limit;) {
1054 --MBBI;
1055 MachineInstr *MI = MBBI;
1056 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1057 // optimization by changing how far we scan.
1058 if (MI->isDebugValue())
1059 continue;
1060 // Now that we know this is a real instruction, count it.
1061 ++Count;
1062
1063 // If the load instruction reads directly from the address to which the
1064 // store instruction writes and the stored value is not modified, we can
1065 // promote the load. Since we do not handle stores with pre-/post-index,
1066 // it's unnecessary to check if BaseReg is modified by the store itself.
1067 if (MI->mayStore() && isMatchingStore(FirstMI, MI) &&
1068 BaseReg == getLdStBaseOp(MI).getReg() &&
1069 isLdOffsetInRangeOfSt(FirstMI, MI) &&
1070 !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
1071 StoreI = MBBI;
1072 return true;
1073 }
1074
1075 if (MI->isCall())
1076 return false;
1077
1078 // Update modified / uses register lists.
1079 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1080
1081 // Otherwise, if the base register is modified, we have no match, so
1082 // return early.
1083 if (ModifiedRegs[BaseReg])
1084 return false;
1085
1086 // If we encounter a store aliased with the load, return early.
1087 if (MI->mayStore() && mayAlias(FirstMI, MI, TII))
1088 return false;
1089 }
1090 return false;
1091}
1092
Chad Rosier05f80202016-02-04 18:59:49 +00001093
1094static bool canMergeOpc(unsigned Opc, unsigned PairOpc, LdStPairFlags &Flags) {
1095 // Opcodes match: nothing more to check.
1096 if (Opc == PairOpc)
1097 return true;
1098
1099 // Try to match a sign-extended load/store with a zero-extended load/store.
1100 bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1101 unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
1102 assert(IsValidLdStrOpc &&
1103 "Given Opc should be a Load or Store with an immediate");
1104 // Opc will be the first instruction in the pair.
1105 if (NonSExtOpc == getMatchingNonSExtOpcode(PairOpc, &PairIsValidLdStrOpc)) {
1106 Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
1107 return true;
1108 }
1109
1110 // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1111
1112 // If the second instruction isn't even a load/store, bail out.
1113 if (!PairIsValidLdStrOpc)
1114 return false;
1115
1116 // Try to match an unscaled load/store with a scaled load/store.
1117 return isUnscaledLdSt(Opc) != isUnscaledLdSt(PairOpc) &&
1118 getMatchingPairOpcode(Opc) == getMatchingPairOpcode(PairOpc);
1119}
Tim Northover3b0846e2014-05-24 12:50:23 +00001120/// findMatchingInsn - Scan the instructions looking for a load/store that can
1121/// be combined with the current instruction into a load/store pair.
1122MachineBasicBlock::iterator
1123AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001124 LdStPairFlags &Flags, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001125 MachineBasicBlock::iterator E = I->getParent()->end();
1126 MachineBasicBlock::iterator MBBI = I;
1127 MachineInstr *FirstMI = I;
1128 ++MBBI;
1129
Matthias Braunfa3872e2015-05-18 20:27:55 +00001130 unsigned Opc = FirstMI->getOpcode();
Tilmann Scheller4aad3bd2014-06-04 12:36:28 +00001131 bool MayLoad = FirstMI->mayLoad();
Chad Rosier22eb7102015-08-06 17:37:18 +00001132 bool IsUnscaled = isUnscaledLdSt(FirstMI);
Chad Rosierf77e9092015-08-06 15:50:12 +00001133 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1134 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1135 int Offset = getLdStOffsetOp(FirstMI).getImm();
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001136 bool IsNarrowStore = isNarrowStore(Opc);
1137
1138 // For narrow stores, find only the case where the stored value is WZR.
1139 if (IsNarrowStore && Reg != AArch64::WZR)
1140 return E;
Tim Northover3b0846e2014-05-24 12:50:23 +00001141
1142 // Early exit if the first instruction modifies the base register.
1143 // e.g., ldr x0, [x0]
Tim Northover3b0846e2014-05-24 12:50:23 +00001144 if (FirstMI->modifiesRegister(BaseReg, TRI))
1145 return E;
Chad Rosiercaed6db2015-08-10 17:17:19 +00001146
1147 // Early exit if the offset if not possible to match. (6 bits of positive
1148 // range, plus allow an extra one in case we find a later insn that matches
1149 // with Offset-1)
Chad Rosierf11d0402015-10-01 18:17:12 +00001150 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001151 if (!(isNarrowLoad(Opc) || IsNarrowStore) &&
1152 !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
Tim Northover3b0846e2014-05-24 12:50:23 +00001153 return E;
1154
1155 // Track which registers have been modified and used between the first insn
1156 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001157 ModifiedRegs.reset();
1158 UsedRegs.reset();
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001159
1160 // Remember any instructions that read/write memory between FirstMI and MI.
1161 SmallVector<MachineInstr *, 4> MemInsns;
1162
Tim Northover3b0846e2014-05-24 12:50:23 +00001163 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1164 MachineInstr *MI = MBBI;
1165 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1166 // optimization by changing how far we scan.
1167 if (MI->isDebugValue())
1168 continue;
1169
1170 // Now that we know this is a real instruction, count it.
1171 ++Count;
1172
Chad Rosier18896c02016-02-04 16:01:40 +00001173 Flags.setSExtIdx(-1);
Chad Rosier05f80202016-02-04 18:59:49 +00001174 if (canMergeOpc(Opc, MI->getOpcode(), Flags) &&
1175 getLdStOffsetOp(MI).isImm()) {
Chad Rosierc56a9132015-08-10 18:42:45 +00001176 assert(MI->mayLoadOrStore() && "Expected memory operation.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001177 // If we've found another instruction with the same opcode, check to see
1178 // if the base and offset are compatible with our starting instruction.
1179 // These instructions all have scaled immediate operands, so we just
1180 // check for +1/-1. Make sure to check the new instruction offset is
1181 // actually an immediate and not a symbolic reference destined for
1182 // a relocation.
1183 //
1184 // Pairwise instructions have a 7-bit signed offset field. Single insns
1185 // have a 12-bit unsigned offset field. To be a valid combine, the
1186 // final offset must be in range.
Chad Rosierf77e9092015-08-06 15:50:12 +00001187 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1188 int MIOffset = getLdStOffsetOp(MI).getImm();
Chad Rosier05f80202016-02-04 18:59:49 +00001189
1190 // We're trying to pair instructions that differ in how they are scaled.
1191 // If FirstMI is scaled then scale the offset of MI accordingly.
1192 // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1193 bool MIIsUnscaled = isUnscaledLdSt(MI);
1194 if (IsUnscaled != MIIsUnscaled) {
1195 int MemSize = getMemScale(MI);
1196 if (MIIsUnscaled) {
1197 // If the unscaled offset isn't a multiple of the MemSize, we can't
1198 // pair the operations together: bail and keep looking.
1199 if (MIOffset % MemSize)
1200 continue;
1201 MIOffset /= MemSize;
1202 } else {
1203 MIOffset *= MemSize;
1204 }
1205 }
1206
Tim Northover3b0846e2014-05-24 12:50:23 +00001207 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1208 (Offset + OffsetStride == MIOffset))) {
1209 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1210 // If this is a volatile load/store that otherwise matched, stop looking
1211 // as something is going on that we don't have enough information to
1212 // safely transform. Similarly, stop if we see a hint to avoid pairs.
1213 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1214 return E;
1215 // If the resultant immediate offset of merging these instructions
1216 // is out of range for a pairwise instruction, bail and keep looking.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001217 bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
1218 if (!IsNarrowLoad &&
Chad Rosier05f80202016-02-04 18:59:49 +00001219 !inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001220 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001221 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001222 continue;
1223 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001224
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001225 if (IsNarrowLoad || IsNarrowStore) {
1226 // If the alignment requirements of the scaled wide load/store
1227 // instruction can't express the offset of the scaled narrow
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001228 // input, bail and keep looking.
1229 if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
1230 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1231 MemInsns.push_back(MI);
1232 continue;
1233 }
1234 } else {
1235 // If the alignment requirements of the paired (scaled) instruction
1236 // can't express the offset of the unscaled input, bail and keep
1237 // looking.
1238 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1239 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1240 MemInsns.push_back(MI);
1241 continue;
1242 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001243 }
1244 // If the destination register of the loads is the same register, bail
1245 // and keep looking. A load-pair instruction with both destination
1246 // registers the same is UNPREDICTABLE and will result in an exception.
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001247 // For narrow stores, allow only when the stored value is the same
1248 // (i.e., WZR).
1249 if ((MayLoad && Reg == getLdStRegOp(MI).getReg()) ||
1250 (IsNarrowStore && Reg != getLdStRegOp(MI).getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001251 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
Chad Rosierc56a9132015-08-10 18:42:45 +00001252 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001253 continue;
1254 }
1255
1256 // If the Rt of the second instruction was not modified or used between
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001257 // the two instructions and none of the instructions between the second
1258 // and first alias with the second, we can combine the second into the
1259 // first.
Chad Rosierf77e9092015-08-06 15:50:12 +00001260 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1261 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001262 !mayAlias(MI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001263 Flags.setMergeForward(false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001264 return MBBI;
1265 }
1266
1267 // Likewise, if the Rt of the first instruction is not modified or used
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001268 // between the two instructions and none of the instructions between the
1269 // first and the second alias with the first, we can combine the first
1270 // into the second.
Chad Rosierf77e9092015-08-06 15:50:12 +00001271 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
Chad Rosier5f668e12015-09-03 14:19:43 +00001272 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001273 !mayAlias(FirstMI, MemInsns, TII)) {
Chad Rosier96a18a92015-07-21 17:42:04 +00001274 Flags.setMergeForward(true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001275 return MBBI;
1276 }
1277 // Unable to combine these instructions due to interference in between.
1278 // Keep looking.
1279 }
1280 }
1281
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001282 // If the instruction wasn't a matching load or store. Stop searching if we
1283 // encounter a call instruction that might modify memory.
1284 if (MI->isCall())
Tim Northover3b0846e2014-05-24 12:50:23 +00001285 return E;
1286
1287 // Update modified / uses register lists.
1288 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1289
1290 // Otherwise, if the base register is modified, we have no match, so
1291 // return early.
1292 if (ModifiedRegs[BaseReg])
1293 return E;
Chad Rosierce8e5ab2015-05-21 21:36:46 +00001294
1295 // Update list of instructions that read/write memory.
1296 if (MI->mayLoadOrStore())
1297 MemInsns.push_back(MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001298 }
1299 return E;
1300}
1301
1302MachineBasicBlock::iterator
Chad Rosier2dfd3542015-09-23 13:51:44 +00001303AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1304 MachineBasicBlock::iterator Update,
1305 bool IsPreIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001306 assert((Update->getOpcode() == AArch64::ADDXri ||
1307 Update->getOpcode() == AArch64::SUBXri) &&
1308 "Unexpected base register update instruction to merge!");
1309 MachineBasicBlock::iterator NextI = I;
1310 // Return the instruction following the merged instruction, which is
1311 // the instruction following our unmerged load. Unless that's the add/sub
1312 // instruction we're merging, in which case it's the one after that.
1313 if (++NextI == Update)
1314 ++NextI;
1315
1316 int Value = Update->getOperand(2).getImm();
1317 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
Chad Rosier2dfd3542015-09-23 13:51:44 +00001318 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
Tim Northover3b0846e2014-05-24 12:50:23 +00001319 if (Update->getOpcode() == AArch64::SUBXri)
1320 Value = -Value;
1321
Chad Rosier2dfd3542015-09-23 13:51:44 +00001322 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1323 : getPostIndexedOpcode(I->getOpcode());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001324 MachineInstrBuilder MIB;
1325 if (!isPairedLdSt(I)) {
1326 // Non-paired instruction.
1327 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1328 .addOperand(getLdStRegOp(Update))
1329 .addOperand(getLdStRegOp(I))
1330 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001331 .addImm(Value)
1332 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001333 } else {
1334 // Paired instruction.
Chad Rosier32d4d372015-09-29 16:07:32 +00001335 int Scale = getMemScale(I);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001336 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1337 .addOperand(getLdStRegOp(Update))
1338 .addOperand(getLdStRegOp(I, 0))
1339 .addOperand(getLdStRegOp(I, 1))
1340 .addOperand(getLdStBaseOp(I))
Chad Rosier3ada75f2016-01-28 15:38:24 +00001341 .addImm(Value / Scale)
1342 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001343 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001344 (void)MIB;
1345
Chad Rosier2dfd3542015-09-23 13:51:44 +00001346 if (IsPreIdx)
1347 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1348 else
1349 DEBUG(dbgs() << "Creating post-indexed load/store.");
Tim Northover3b0846e2014-05-24 12:50:23 +00001350 DEBUG(dbgs() << " Replacing instructions:\n ");
1351 DEBUG(I->print(dbgs()));
1352 DEBUG(dbgs() << " ");
1353 DEBUG(Update->print(dbgs()));
1354 DEBUG(dbgs() << " with instruction:\n ");
1355 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1356 DEBUG(dbgs() << "\n");
1357
1358 // Erase the old instructions for the block.
1359 I->eraseFromParent();
1360 Update->eraseFromParent();
1361
1362 return NextI;
1363}
1364
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001365bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
1366 MachineInstr *MI,
1367 unsigned BaseReg, int Offset) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001368 switch (MI->getOpcode()) {
1369 default:
1370 break;
1371 case AArch64::SUBXri:
1372 // Negate the offset for a SUB instruction.
1373 Offset *= -1;
1374 // FALLTHROUGH
1375 case AArch64::ADDXri:
1376 // Make sure it's a vanilla immediate operand, not a relocation or
1377 // anything else we can't handle.
1378 if (!MI->getOperand(2).isImm())
1379 break;
1380 // Watch out for 1 << 12 shifted value.
1381 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
1382 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001383
1384 // The update instruction source and destination register must be the
1385 // same as the load/store base register.
1386 if (MI->getOperand(0).getReg() != BaseReg ||
1387 MI->getOperand(1).getReg() != BaseReg)
1388 break;
1389
1390 bool IsPairedInsn = isPairedLdSt(MemMI);
1391 int UpdateOffset = MI->getOperand(2).getImm();
1392 // For non-paired load/store instructions, the immediate must fit in a
1393 // signed 9-bit integer.
1394 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1395 break;
1396
1397 // For paired load/store instructions, the immediate must be a multiple of
1398 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1399 // integer.
1400 if (IsPairedInsn) {
Chad Rosier32d4d372015-09-29 16:07:32 +00001401 int Scale = getMemScale(MemMI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001402 if (UpdateOffset % Scale != 0)
1403 break;
1404
1405 int ScaledOffset = UpdateOffset / Scale;
1406 if (ScaledOffset > 64 || ScaledOffset < -64)
1407 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001408 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001409
1410 // If we have a non-zero Offset, we check that it matches the amount
1411 // we're adding to the register.
1412 if (!Offset || Offset == MI->getOperand(2).getImm())
1413 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001414 break;
1415 }
1416 return false;
1417}
1418
1419MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001420 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001421 MachineBasicBlock::iterator E = I->getParent()->end();
1422 MachineInstr *MemMI = I;
1423 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001424
Chad Rosierf77e9092015-08-06 15:50:12 +00001425 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001426 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001427
Chad Rosierb7c5b912015-10-01 13:43:05 +00001428 // Scan forward looking for post-index opportunities. Updating instructions
1429 // can't be formed if the memory instruction doesn't have the offset we're
1430 // looking for.
1431 if (MIUnscaledOffset != UnscaledOffset)
1432 return E;
1433
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001434 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001435 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001436 bool IsPairedInsn = isPairedLdSt(MemMI);
1437 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1438 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1439 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1440 return E;
1441 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001442
Tim Northover3b0846e2014-05-24 12:50:23 +00001443 // Track which registers have been modified and used between the first insn
1444 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001445 ModifiedRegs.reset();
1446 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001447 ++MBBI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001448 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001449 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001450 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001451 if (MI->isDebugValue())
1452 continue;
1453
Chad Rosier35706ad2016-02-04 21:26:02 +00001454 // Now that we know this is a real instruction, count it.
1455 ++Count;
1456
Tim Northover3b0846e2014-05-24 12:50:23 +00001457 // If we found a match, return it.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001458 if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001459 return MBBI;
1460
1461 // Update the status of what the instruction clobbered and used.
1462 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1463
1464 // Otherwise, if the base register is used or modified, we have no match, so
1465 // return early.
1466 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1467 return E;
1468 }
1469 return E;
1470}
1471
1472MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
Chad Rosier35706ad2016-02-04 21:26:02 +00001473 MachineBasicBlock::iterator I, unsigned Limit) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001474 MachineBasicBlock::iterator B = I->getParent()->begin();
1475 MachineBasicBlock::iterator E = I->getParent()->end();
1476 MachineInstr *MemMI = I;
1477 MachineBasicBlock::iterator MBBI = I;
Tim Northover3b0846e2014-05-24 12:50:23 +00001478
Chad Rosierf77e9092015-08-06 15:50:12 +00001479 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1480 int Offset = getLdStOffsetOp(MemMI).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001481
1482 // If the load/store is the first instruction in the block, there's obviously
1483 // not any matching update. Ditto if the memory offset isn't zero.
1484 if (MBBI == B || Offset != 0)
1485 return E;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001486 // If the base register overlaps a destination register, we can't
Tim Northover3b0846e2014-05-24 12:50:23 +00001487 // merge the update.
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001488 bool IsPairedInsn = isPairedLdSt(MemMI);
1489 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1490 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1491 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1492 return E;
1493 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001494
1495 // Track which registers have been modified and used between the first insn
1496 // (inclusive) and the second insn.
Chad Rosierbba881e2016-02-02 15:02:30 +00001497 ModifiedRegs.reset();
1498 UsedRegs.reset();
Tim Northover3b0846e2014-05-24 12:50:23 +00001499 --MBBI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001500 for (unsigned Count = 0; MBBI != B && Count < Limit; --MBBI) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001501 MachineInstr *MI = MBBI;
Chad Rosierb11c82d2016-01-19 21:27:05 +00001502 // Skip DBG_VALUE instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001503 if (MI->isDebugValue())
1504 continue;
1505
Chad Rosier35706ad2016-02-04 21:26:02 +00001506 // Now that we know this is a real instruction, count it.
1507 ++Count;
1508
Tim Northover3b0846e2014-05-24 12:50:23 +00001509 // If we found a match, return it.
Chad Rosier11c825f2015-09-30 19:44:40 +00001510 if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
Tim Northover3b0846e2014-05-24 12:50:23 +00001511 return MBBI;
1512
1513 // Update the status of what the instruction clobbered and used.
1514 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1515
1516 // Otherwise, if the base register is used or modified, we have no match, so
1517 // return early.
1518 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1519 return E;
1520 }
1521 return E;
1522}
1523
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001524bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1525 MachineBasicBlock::iterator &MBBI) {
1526 MachineInstr *MI = MBBI;
1527 // If this is a volatile load, don't mess with it.
1528 if (MI->hasOrderedMemoryRef())
1529 return false;
1530
1531 // Make sure this is a reg+imm.
1532 // FIXME: It is possible to extend it to handle reg+reg cases.
1533 if (!getLdStOffsetOp(MI).isImm())
1534 return false;
1535
Chad Rosier35706ad2016-02-04 21:26:02 +00001536 // Look backward up to LdStLimit instructions.
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001537 MachineBasicBlock::iterator StoreI;
Chad Rosier35706ad2016-02-04 21:26:02 +00001538 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001539 ++NumLoadsFromStoresPromoted;
1540 // Promote the load. Keeping the iterator straight is a
1541 // pain, so we let the merge routine tell us what the next instruction
1542 // is after it's done mucking about.
1543 MBBI = promoteLoadFromStore(MBBI, StoreI);
1544 return true;
1545 }
1546 return false;
1547}
1548
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001549bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1550 MachineBasicBlock::iterator &MBBI) {
1551 MachineInstr *MI = MBBI;
1552 MachineBasicBlock::iterator E = MI->getParent()->end();
1553 // If this is a volatile load/store, don't mess with it.
1554 if (MI->hasOrderedMemoryRef())
1555 return false;
1556
1557 // Make sure this is a reg+imm (as opposed to an address reloc).
1558 if (!getLdStOffsetOp(MI).isImm())
1559 return false;
1560
1561 // Check if this load/store has a hint to avoid pair formation.
1562 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1563 if (TII->isLdStPairSuppressed(MI))
1564 return false;
1565
Chad Rosier35706ad2016-02-04 21:26:02 +00001566 // Look ahead up to LdStLimit instructions for a pairable instruction.
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001567 LdStPairFlags Flags;
Chad Rosier35706ad2016-02-04 21:26:02 +00001568 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit);
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001569 if (Paired != E) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001570 if (isNarrowLoad(MI)) {
1571 ++NumNarrowLoadsPromoted;
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001572 } else if (isNarrowStore(MI)) {
1573 ++NumZeroStoresPromoted;
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001574 } else {
1575 ++NumPairCreated;
1576 if (isUnscaledLdSt(MI))
1577 ++NumUnscaledPairCreated;
1578 }
1579
1580 // Merge the loads into a pair. Keeping the iterator straight is a
1581 // pain, so we let the merge routine tell us what the next instruction
1582 // is after it's done mucking about.
1583 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1584 return true;
1585 }
1586 return false;
1587}
1588
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001589bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1590 bool enableNarrowLdOpt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001591 bool Modified = false;
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001592 // Four tranformations to do here:
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001593 // 1) Find loads that directly read from stores and promote them by
1594 // replacing with mov instructions. If the store is wider than the load,
1595 // the load will be replaced with a bitfield extract.
1596 // e.g.,
1597 // str w1, [x0, #4]
1598 // ldrh w2, [x0, #6]
1599 // ; becomes
1600 // str w1, [x0, #4]
1601 // lsr w2, w1, #16
Tim Northover3b0846e2014-05-24 12:50:23 +00001602 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001603 MBBI != E;) {
1604 MachineInstr *MI = MBBI;
1605 switch (MI->getOpcode()) {
1606 default:
1607 // Just move on to the next instruction.
1608 ++MBBI;
1609 break;
1610 // Scaled instructions.
1611 case AArch64::LDRBBui:
1612 case AArch64::LDRHHui:
1613 case AArch64::LDRWui:
1614 case AArch64::LDRXui:
1615 // Unscaled instructions.
1616 case AArch64::LDURBBi:
1617 case AArch64::LDURHHi:
1618 case AArch64::LDURWi:
1619 case AArch64::LDURXi: {
1620 if (tryToPromoteLoadFromStore(MBBI)) {
1621 Modified = true;
1622 break;
1623 }
1624 ++MBBI;
1625 break;
1626 }
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001627 }
1628 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001629 // 2) Find narrow loads that can be converted into a single wider load
1630 // with bitfield extract instructions.
1631 // e.g.,
1632 // ldrh w0, [x2]
1633 // ldrh w1, [x2, #2]
1634 // ; becomes
1635 // ldr w0, [x2]
1636 // ubfx w1, w0, #16, #16
1637 // and w0, w0, #ffff
Jun Bum Lim6755c3b2015-12-22 16:36:16 +00001638 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001639 enableNarrowLdOpt && MBBI != E;) {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001640 MachineInstr *MI = MBBI;
1641 switch (MI->getOpcode()) {
1642 default:
1643 // Just move on to the next instruction.
1644 ++MBBI;
1645 break;
1646 // Scaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001647 case AArch64::LDRBBui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001648 case AArch64::LDRHHui:
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001649 case AArch64::LDRSBWui:
1650 case AArch64::LDRSHWui:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001651 case AArch64::STRBBui:
1652 case AArch64::STRHHui:
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001653 // Unscaled instructions.
Jun Bum Lim4c35cca2015-11-19 17:21:41 +00001654 case AArch64::LDURBBi:
1655 case AArch64::LDURHHi:
1656 case AArch64::LDURSBWi:
Jun Bum Lim80ec0d32015-11-20 21:14:07 +00001657 case AArch64::LDURSHWi:
1658 case AArch64::STURBBi:
1659 case AArch64::STURHHi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001660 if (tryToMergeLdStInst(MBBI)) {
1661 Modified = true;
1662 break;
1663 }
1664 ++MBBI;
1665 break;
1666 }
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001667 }
1668 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001669 // 3) Find loads and stores that can be merged into a single load or store
1670 // pair instruction.
1671 // e.g.,
1672 // ldr x0, [x2]
1673 // ldr x1, [x2, #8]
1674 // ; becomes
1675 // ldp x0, x1, [x2]
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001676 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
Tim Northover3b0846e2014-05-24 12:50:23 +00001677 MBBI != E;) {
1678 MachineInstr *MI = MBBI;
1679 switch (MI->getOpcode()) {
1680 default:
1681 // Just move on to the next instruction.
1682 ++MBBI;
1683 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001684 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001685 case AArch64::STRSui:
1686 case AArch64::STRDui:
1687 case AArch64::STRQui:
1688 case AArch64::STRXui:
1689 case AArch64::STRWui:
1690 case AArch64::LDRSui:
1691 case AArch64::LDRDui:
1692 case AArch64::LDRQui:
1693 case AArch64::LDRXui:
1694 case AArch64::LDRWui:
Quentin Colombet29f55332015-01-24 01:25:54 +00001695 case AArch64::LDRSWui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001696 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001697 case AArch64::STURSi:
1698 case AArch64::STURDi:
1699 case AArch64::STURQi:
1700 case AArch64::STURWi:
1701 case AArch64::STURXi:
1702 case AArch64::LDURSi:
1703 case AArch64::LDURDi:
1704 case AArch64::LDURQi:
1705 case AArch64::LDURWi:
Quentin Colombet29f55332015-01-24 01:25:54 +00001706 case AArch64::LDURXi:
1707 case AArch64::LDURSWi: {
Jun Bum Limc9879ec2015-10-27 19:16:03 +00001708 if (tryToMergeLdStInst(MBBI)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001709 Modified = true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001710 break;
1711 }
1712 ++MBBI;
1713 break;
1714 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001715 }
1716 }
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001717 // 4) Find base register updates that can be merged into the load or store
1718 // as a base-reg writeback.
1719 // e.g.,
1720 // ldr x0, [x2]
1721 // add x2, x2, #4
1722 // ; becomes
1723 // ldr x0, [x2], #4
Tim Northover3b0846e2014-05-24 12:50:23 +00001724 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1725 MBBI != E;) {
1726 MachineInstr *MI = MBBI;
1727 // Do update merging. It's simpler to keep this separate from the above
Chad Rosierdbdb1d62016-02-01 21:38:31 +00001728 // switchs, though not strictly necessary.
Matthias Braunfa3872e2015-05-18 20:27:55 +00001729 unsigned Opc = MI->getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +00001730 switch (Opc) {
1731 default:
1732 // Just move on to the next instruction.
1733 ++MBBI;
1734 break;
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001735 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001736 case AArch64::STRSui:
1737 case AArch64::STRDui:
1738 case AArch64::STRQui:
1739 case AArch64::STRXui:
1740 case AArch64::STRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001741 case AArch64::STRHHui:
1742 case AArch64::STRBBui:
Tim Northover3b0846e2014-05-24 12:50:23 +00001743 case AArch64::LDRSui:
1744 case AArch64::LDRDui:
1745 case AArch64::LDRQui:
1746 case AArch64::LDRXui:
1747 case AArch64::LDRWui:
Chad Rosierdabe2532015-09-29 18:26:15 +00001748 case AArch64::LDRHHui:
1749 case AArch64::LDRBBui:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001750 // Unscaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001751 case AArch64::STURSi:
1752 case AArch64::STURDi:
1753 case AArch64::STURQi:
1754 case AArch64::STURWi:
1755 case AArch64::STURXi:
1756 case AArch64::LDURSi:
1757 case AArch64::LDURDi:
1758 case AArch64::LDURQi:
1759 case AArch64::LDURWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001760 case AArch64::LDURXi:
1761 // Paired instructions.
1762 case AArch64::LDPSi:
Chad Rosier43150122015-09-29 20:39:55 +00001763 case AArch64::LDPSWi:
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001764 case AArch64::LDPDi:
1765 case AArch64::LDPQi:
1766 case AArch64::LDPWi:
1767 case AArch64::LDPXi:
1768 case AArch64::STPSi:
1769 case AArch64::STPDi:
1770 case AArch64::STPQi:
1771 case AArch64::STPWi:
1772 case AArch64::STPXi: {
Tim Northover3b0846e2014-05-24 12:50:23 +00001773 // Make sure this is a reg+imm (as opposed to an address reloc).
Chad Rosierf77e9092015-08-06 15:50:12 +00001774 if (!getLdStOffsetOp(MI).isImm()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001775 ++MBBI;
1776 break;
1777 }
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001778 // Look forward to try to form a post-index instruction. For example,
1779 // ldr x0, [x20]
1780 // add x20, x20, #32
1781 // merged into:
1782 // ldr x0, [x20], #32
Tim Northover3b0846e2014-05-24 12:50:23 +00001783 MachineBasicBlock::iterator Update =
Chad Rosier35706ad2016-02-04 21:26:02 +00001784 findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001785 if (Update != E) {
1786 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001787 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001788 Modified = true;
1789 ++NumPostFolded;
1790 break;
1791 }
1792 // Don't know how to handle pre/post-index versions, so move to the next
1793 // instruction.
Chad Rosier22eb7102015-08-06 17:37:18 +00001794 if (isUnscaledLdSt(Opc)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001795 ++MBBI;
1796 break;
1797 }
1798
1799 // Look back to try to find a pre-index instruction. For example,
1800 // add x0, x0, #8
1801 // ldr x1, [x0]
1802 // merged into:
1803 // ldr x1, [x0, #8]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001804 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001805 if (Update != E) {
1806 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001807 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001808 Modified = true;
1809 ++NumPreFolded;
1810 break;
1811 }
Chad Rosier7a83d772015-10-01 13:09:44 +00001812 // The immediate in the load/store is scaled by the size of the memory
1813 // operation. The immediate in the add we're looking for,
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001814 // however, is not, so adjust here.
Chad Rosier0b15e7c2015-10-01 13:33:31 +00001815 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
Chad Rosier1bbd7fb2015-09-25 17:48:17 +00001816
Tim Northover3b0846e2014-05-24 12:50:23 +00001817 // Look forward to try to find a post-index instruction. For example,
1818 // ldr x1, [x0, #64]
1819 // add x0, x0, #64
1820 // merged into:
1821 // ldr x1, [x0, #64]!
Chad Rosier35706ad2016-02-04 21:26:02 +00001822 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
Tim Northover3b0846e2014-05-24 12:50:23 +00001823 if (Update != E) {
1824 // Merge the update into the ld/st.
Chad Rosier2dfd3542015-09-23 13:51:44 +00001825 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
Tim Northover3b0846e2014-05-24 12:50:23 +00001826 Modified = true;
1827 ++NumPreFolded;
1828 break;
1829 }
1830
1831 // Nothing found. Just move to the next instruction.
1832 ++MBBI;
1833 break;
1834 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001835 }
1836 }
1837
1838 return Modified;
1839}
1840
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001841bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
Jun Bum Limc12c2792015-11-19 18:41:27 +00001842 bool ProfitableArch = Subtarget->isCortexA57();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001843 // FIXME: The benefit from converting narrow loads into a wider load could be
1844 // microarchitectural as it assumes that a single load with two bitfield
1845 // extracts is cheaper than two narrow loads. Currently, this conversion is
1846 // enabled only in cortex-a57 on which performance benefits were verified.
Jun Bum Limc12c2792015-11-19 18:41:27 +00001847 return ProfitableArch && !Subtarget->requiresStrictAlign();
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001848}
1849
Tim Northover3b0846e2014-05-24 12:50:23 +00001850bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
Oliver Stannardd414c992015-11-10 11:04:18 +00001851 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1852 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1853 TRI = Subtarget->getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001854
Chad Rosierbba881e2016-02-02 15:02:30 +00001855 // Resize the modified and used register bitfield trackers. We do this once
1856 // per function and then clear the bitfield each time we optimize a load or
1857 // store.
1858 ModifiedRegs.resize(TRI->getNumRegs());
1859 UsedRegs.resize(TRI->getNumRegs());
1860
Tim Northover3b0846e2014-05-24 12:50:23 +00001861 bool Modified = false;
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001862 bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
Tim Northover3b0846e2014-05-24 12:50:23 +00001863 for (auto &MBB : Fn)
Jun Bum Lim22fe15e2015-11-06 16:27:47 +00001864 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001865
1866 return Modified;
1867}
1868
1869// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1870// loads and stores near one another?
1871
Chad Rosier43f5c842015-08-05 12:40:13 +00001872/// createAArch64LoadStoreOptimizationPass - returns an instance of the
1873/// load / store optimization pass.
Tim Northover3b0846e2014-05-24 12:50:23 +00001874FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1875 return new AArch64LoadStoreOpt();
1876}