blob: 7a3f39df6599d361a0cb660db7cd0f38cb9a4e79 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the AArch64 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64InstrInfo.h"
15#include "AArch64Subtarget.h"
16#include "MCTargetDesc/AArch64AddressingModes.h"
17#include "llvm/CodeGen/MachineFrameInfo.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineMemOperand.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/CodeGen/PseudoSourceValue.h"
Diana Picus4b972882016-09-13 07:45:17 +000022#include "llvm/CodeGen/StackMaps.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000023#include "llvm/MC/MCInst.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/TargetRegistry.h"
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +000026#include <algorithm>
Tim Northover3b0846e2014-05-24 12:50:23 +000027
28using namespace llvm;
29
30#define GET_INSTRINFO_CTOR_DTOR
31#include "AArch64GenInstrInfo.inc"
32
George Burgess IV381fc0e2016-08-25 01:05:08 +000033static const MachineMemOperand::Flags MOSuppressPair =
Justin Lebar288b3372016-07-14 18:15:20 +000034 MachineMemOperand::MOTargetFlag1;
35
Matt Arsenaulte8da1452016-08-02 08:06:17 +000036static cl::opt<unsigned>
37TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
38 cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
39
40static cl::opt<unsigned>
41CBZDisplacementBits("aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
42 cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
43
44static cl::opt<unsigned>
45BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
46 cl::desc("Restrict range of Bcc instructions (DEBUG)"));
47
Tim Northover3b0846e2014-05-24 12:50:23 +000048AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
49 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
Eric Christophera0de2532015-03-18 20:37:30 +000050 RI(STI.getTargetTriple()), Subtarget(STI) {}
Tim Northover3b0846e2014-05-24 12:50:23 +000051
52/// GetInstSize - Return the number of bytes of code the specified
53/// instruction may be. This returns the maximum number of bytes.
Sjoerd Meijer89217f82016-07-28 16:32:22 +000054unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +000055 const MachineBasicBlock &MBB = *MI.getParent();
Tim Northoverd5531f72014-06-17 11:31:42 +000056 const MachineFunction *MF = MBB.getParent();
57 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +000058
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +000059 if (MI.getOpcode() == AArch64::INLINEASM)
60 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
Tim Northoverd5531f72014-06-17 11:31:42 +000061
Diana Picus4b972882016-09-13 07:45:17 +000062 // FIXME: We currently only handle pseudoinstructions that don't get expanded
63 // before the assembly printer.
64 unsigned NumBytes = 0;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +000065 const MCInstrDesc &Desc = MI.getDesc();
Tim Northover3b0846e2014-05-24 12:50:23 +000066 switch (Desc.getOpcode()) {
67 default:
Diana Picusc65d8bd2016-07-27 15:13:25 +000068 // Anything not explicitly designated otherwise is a normal 4-byte insn.
Diana Picus4b972882016-09-13 07:45:17 +000069 NumBytes = 4;
70 break;
Tim Northover3b0846e2014-05-24 12:50:23 +000071 case TargetOpcode::DBG_VALUE:
72 case TargetOpcode::EH_LABEL:
73 case TargetOpcode::IMPLICIT_DEF:
74 case TargetOpcode::KILL:
Diana Picus4b972882016-09-13 07:45:17 +000075 NumBytes = 0;
76 break;
77 case TargetOpcode::STACKMAP:
78 // The upper bound for a stackmap intrinsic is the full length of its shadow
79 NumBytes = StackMapOpers(&MI).getNumPatchBytes();
80 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
81 break;
82 case TargetOpcode::PATCHPOINT:
83 // The size of the patchpoint intrinsic is the number of bytes requested
84 NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
85 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
86 break;
Diana Picusab5a4c72016-08-01 08:38:49 +000087 case AArch64::TLSDESC_CALLSEQ:
88 // This gets lowered to an instruction sequence which takes 16 bytes
Diana Picus4b972882016-09-13 07:45:17 +000089 NumBytes = 16;
90 break;
Tim Northover3b0846e2014-05-24 12:50:23 +000091 }
92
Diana Picus4b972882016-09-13 07:45:17 +000093 return NumBytes;
Tim Northover3b0846e2014-05-24 12:50:23 +000094}
95
96static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
97 SmallVectorImpl<MachineOperand> &Cond) {
98 // Block ends with fall-through condbranch.
99 switch (LastInst->getOpcode()) {
100 default:
101 llvm_unreachable("Unknown branch instruction?");
102 case AArch64::Bcc:
103 Target = LastInst->getOperand(1).getMBB();
104 Cond.push_back(LastInst->getOperand(0));
105 break;
106 case AArch64::CBZW:
107 case AArch64::CBZX:
108 case AArch64::CBNZW:
109 case AArch64::CBNZX:
110 Target = LastInst->getOperand(1).getMBB();
111 Cond.push_back(MachineOperand::CreateImm(-1));
112 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
113 Cond.push_back(LastInst->getOperand(0));
114 break;
115 case AArch64::TBZW:
116 case AArch64::TBZX:
117 case AArch64::TBNZW:
118 case AArch64::TBNZX:
119 Target = LastInst->getOperand(2).getMBB();
120 Cond.push_back(MachineOperand::CreateImm(-1));
121 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
122 Cond.push_back(LastInst->getOperand(0));
123 Cond.push_back(LastInst->getOperand(1));
124 }
125}
126
Matt Arsenaulte8da1452016-08-02 08:06:17 +0000127static unsigned getBranchDisplacementBits(unsigned Opc) {
128 switch (Opc) {
129 default:
130 llvm_unreachable("unexpected opcode!");
Matt Arsenault0a3ea892016-10-06 15:38:09 +0000131 case AArch64::B:
132 return 64;
Matt Arsenaulte8da1452016-08-02 08:06:17 +0000133 case AArch64::TBNZW:
134 case AArch64::TBZW:
135 case AArch64::TBNZX:
136 case AArch64::TBZX:
137 return TBZDisplacementBits;
138 case AArch64::CBNZW:
139 case AArch64::CBZW:
140 case AArch64::CBNZX:
141 case AArch64::CBZX:
142 return CBZDisplacementBits;
143 case AArch64::Bcc:
144 return BCCDisplacementBits;
145 }
146}
147
Matt Arsenault0a3ea892016-10-06 15:38:09 +0000148bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
149 int64_t BrOffset) const {
150 unsigned Bits = getBranchDisplacementBits(BranchOp);
151 assert(Bits >= 3 && "max branch displacement must be enough to jump"
152 "over conditional branch expansion");
153 return isIntN(Bits, BrOffset / 4);
Matt Arsenaulte8da1452016-08-02 08:06:17 +0000154}
155
Matt Arsenault0a3ea892016-10-06 15:38:09 +0000156MachineBasicBlock *AArch64InstrInfo::getBranchDestBlock(
157 const MachineInstr &MI) const {
158 switch (MI.getOpcode()) {
159 default:
160 llvm_unreachable("unexpected opcode!");
161 case AArch64::B:
162 return MI.getOperand(0).getMBB();
163 case AArch64::TBZW:
164 case AArch64::TBNZW:
165 case AArch64::TBZX:
166 case AArch64::TBNZX:
167 return MI.getOperand(2).getMBB();
168 case AArch64::CBZW:
169 case AArch64::CBNZW:
170 case AArch64::CBZX:
171 case AArch64::CBNZX:
172 case AArch64::Bcc:
173 return MI.getOperand(1).getMBB();
174 }
Matt Arsenaulte8da1452016-08-02 08:06:17 +0000175}
176
Tim Northover3b0846e2014-05-24 12:50:23 +0000177// Branch analysis.
Jacques Pienaar71c30a12016-07-15 14:41:04 +0000178bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
179 MachineBasicBlock *&TBB,
180 MachineBasicBlock *&FBB,
181 SmallVectorImpl<MachineOperand> &Cond,
182 bool AllowModify) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000183 // If the block has no terminators, it just falls into the block after it.
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000184 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
185 if (I == MBB.end())
Tim Northover3b0846e2014-05-24 12:50:23 +0000186 return false;
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000187
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000188 if (!isUnpredicatedTerminator(*I))
Tim Northover3b0846e2014-05-24 12:50:23 +0000189 return false;
190
191 // Get the last instruction in the block.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000192 MachineInstr *LastInst = &*I;
Tim Northover3b0846e2014-05-24 12:50:23 +0000193
194 // If there is only one terminator instruction, process it.
195 unsigned LastOpc = LastInst->getOpcode();
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000196 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000197 if (isUncondBranchOpcode(LastOpc)) {
198 TBB = LastInst->getOperand(0).getMBB();
199 return false;
200 }
201 if (isCondBranchOpcode(LastOpc)) {
202 // Block ends with fall-through condbranch.
203 parseCondBranch(LastInst, TBB, Cond);
204 return false;
205 }
206 return true; // Can't handle indirect branch.
207 }
208
209 // Get the instruction before it if it is a terminator.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000210 MachineInstr *SecondLastInst = &*I;
Tim Northover3b0846e2014-05-24 12:50:23 +0000211 unsigned SecondLastOpc = SecondLastInst->getOpcode();
212
213 // If AllowModify is true and the block ends with two or more unconditional
214 // branches, delete all but the first unconditional branch.
215 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
216 while (isUncondBranchOpcode(SecondLastOpc)) {
217 LastInst->eraseFromParent();
218 LastInst = SecondLastInst;
219 LastOpc = LastInst->getOpcode();
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000220 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000221 // Return now the only terminator is an unconditional branch.
222 TBB = LastInst->getOperand(0).getMBB();
223 return false;
224 } else {
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000225 SecondLastInst = &*I;
Tim Northover3b0846e2014-05-24 12:50:23 +0000226 SecondLastOpc = SecondLastInst->getOpcode();
227 }
228 }
229 }
230
231 // If there are three terminators, we don't know what sort of block this is.
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000232 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
Tim Northover3b0846e2014-05-24 12:50:23 +0000233 return true;
234
235 // If the block ends with a B and a Bcc, handle it.
236 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
237 parseCondBranch(SecondLastInst, TBB, Cond);
238 FBB = LastInst->getOperand(0).getMBB();
239 return false;
240 }
241
242 // If the block ends with two unconditional branches, handle it. The second
243 // one is not executed, so remove it.
244 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
245 TBB = SecondLastInst->getOperand(0).getMBB();
246 I = LastInst;
247 if (AllowModify)
248 I->eraseFromParent();
249 return false;
250 }
251
252 // ...likewise if it ends with an indirect branch followed by an unconditional
253 // branch.
254 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
255 I = LastInst;
256 if (AllowModify)
257 I->eraseFromParent();
258 return true;
259 }
260
261 // Otherwise, can't handle this.
262 return true;
263}
264
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +0000265bool AArch64InstrInfo::reverseBranchCondition(
Tim Northover3b0846e2014-05-24 12:50:23 +0000266 SmallVectorImpl<MachineOperand> &Cond) const {
267 if (Cond[0].getImm() != -1) {
268 // Regular Bcc
269 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
270 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
271 } else {
272 // Folded compare-and-branch
273 switch (Cond[1].getImm()) {
274 default:
275 llvm_unreachable("Unknown conditional branch!");
276 case AArch64::CBZW:
277 Cond[1].setImm(AArch64::CBNZW);
278 break;
279 case AArch64::CBNZW:
280 Cond[1].setImm(AArch64::CBZW);
281 break;
282 case AArch64::CBZX:
283 Cond[1].setImm(AArch64::CBNZX);
284 break;
285 case AArch64::CBNZX:
286 Cond[1].setImm(AArch64::CBZX);
287 break;
288 case AArch64::TBZW:
289 Cond[1].setImm(AArch64::TBNZW);
290 break;
291 case AArch64::TBNZW:
292 Cond[1].setImm(AArch64::TBZW);
293 break;
294 case AArch64::TBZX:
295 Cond[1].setImm(AArch64::TBNZX);
296 break;
297 case AArch64::TBNZX:
298 Cond[1].setImm(AArch64::TBZX);
299 break;
300 }
301 }
302
303 return false;
304}
305
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +0000306unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000307 int *BytesRemoved) const {
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000308 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
309 if (I == MBB.end())
Tim Northover3b0846e2014-05-24 12:50:23 +0000310 return 0;
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000311
Tim Northover3b0846e2014-05-24 12:50:23 +0000312 if (!isUncondBranchOpcode(I->getOpcode()) &&
313 !isCondBranchOpcode(I->getOpcode()))
314 return 0;
315
316 // Remove the branch.
317 I->eraseFromParent();
318
319 I = MBB.end();
320
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000321 if (I == MBB.begin()) {
322 if (BytesRemoved)
323 *BytesRemoved = 4;
Tim Northover3b0846e2014-05-24 12:50:23 +0000324 return 1;
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000325 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000326 --I;
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000327 if (!isCondBranchOpcode(I->getOpcode())) {
328 if (BytesRemoved)
329 *BytesRemoved = 4;
Tim Northover3b0846e2014-05-24 12:50:23 +0000330 return 1;
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000331 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000332
333 // Remove the branch.
334 I->eraseFromParent();
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000335 if (BytesRemoved)
336 *BytesRemoved = 8;
337
Tim Northover3b0846e2014-05-24 12:50:23 +0000338 return 2;
339}
340
341void AArch64InstrInfo::instantiateCondBranch(
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000342 MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000343 ArrayRef<MachineOperand> Cond) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000344 if (Cond[0].getImm() != -1) {
345 // Regular Bcc
346 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
347 } else {
348 // Folded compare-and-branch
Ahmed Bougacha72001cf2014-11-07 02:50:00 +0000349 // Note that we use addOperand instead of addReg to keep the flags.
Tim Northover3b0846e2014-05-24 12:50:23 +0000350 const MachineInstrBuilder MIB =
Ahmed Bougacha72001cf2014-11-07 02:50:00 +0000351 BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000352 if (Cond.size() > 3)
353 MIB.addImm(Cond[3].getImm());
354 MIB.addMBB(TBB);
355 }
356}
357
Matt Arsenaulte8e0f5c2016-09-14 17:24:15 +0000358unsigned AArch64InstrInfo::insertBranch(MachineBasicBlock &MBB,
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000359 MachineBasicBlock *TBB,
360 MachineBasicBlock *FBB,
361 ArrayRef<MachineOperand> Cond,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000362 const DebugLoc &DL,
363 int *BytesAdded) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000364 // Shouldn't be a fall through.
Matt Arsenaulte8e0f5c2016-09-14 17:24:15 +0000365 assert(TBB && "insertBranch must not be told to insert a fallthrough");
Tim Northover3b0846e2014-05-24 12:50:23 +0000366
367 if (!FBB) {
368 if (Cond.empty()) // Unconditional branch?
369 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
370 else
371 instantiateCondBranch(MBB, DL, TBB, Cond);
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000372
373 if (BytesAdded)
374 *BytesAdded = 4;
375
Tim Northover3b0846e2014-05-24 12:50:23 +0000376 return 1;
377 }
378
379 // Two-way conditional branch.
380 instantiateCondBranch(MBB, DL, TBB, Cond);
381 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000382
383 if (BytesAdded)
384 *BytesAdded = 8;
385
Tim Northover3b0846e2014-05-24 12:50:23 +0000386 return 2;
387}
388
389// Find the original register that VReg is copied from.
390static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
391 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
392 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
393 if (!DefMI->isFullCopy())
394 return VReg;
395 VReg = DefMI->getOperand(1).getReg();
396 }
397 return VReg;
398}
399
400// Determine if VReg is defined by an instruction that can be folded into a
401// csel instruction. If so, return the folded opcode, and the replacement
402// register.
403static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
404 unsigned *NewVReg = nullptr) {
405 VReg = removeCopies(MRI, VReg);
406 if (!TargetRegisterInfo::isVirtualRegister(VReg))
407 return 0;
408
409 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
410 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
411 unsigned Opc = 0;
412 unsigned SrcOpNum = 0;
413 switch (DefMI->getOpcode()) {
414 case AArch64::ADDSXri:
415 case AArch64::ADDSWri:
416 // if NZCV is used, do not fold.
417 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
418 return 0;
Justin Bognerb03fd122016-08-17 05:10:15 +0000419 // fall-through to ADDXri and ADDWri.
420 LLVM_FALLTHROUGH;
Tim Northover3b0846e2014-05-24 12:50:23 +0000421 case AArch64::ADDXri:
422 case AArch64::ADDWri:
423 // add x, 1 -> csinc.
424 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
425 DefMI->getOperand(3).getImm() != 0)
426 return 0;
427 SrcOpNum = 1;
428 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
429 break;
430
431 case AArch64::ORNXrr:
432 case AArch64::ORNWrr: {
433 // not x -> csinv, represented as orn dst, xzr, src.
434 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
435 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
436 return 0;
437 SrcOpNum = 2;
438 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
439 break;
440 }
441
442 case AArch64::SUBSXrr:
443 case AArch64::SUBSWrr:
444 // if NZCV is used, do not fold.
445 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
446 return 0;
Justin Bognerb03fd122016-08-17 05:10:15 +0000447 // fall-through to SUBXrr and SUBWrr.
448 LLVM_FALLTHROUGH;
Tim Northover3b0846e2014-05-24 12:50:23 +0000449 case AArch64::SUBXrr:
450 case AArch64::SUBWrr: {
451 // neg x -> csneg, represented as sub dst, xzr, src.
452 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
453 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
454 return 0;
455 SrcOpNum = 2;
456 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
457 break;
458 }
459 default:
460 return 0;
461 }
462 assert(Opc && SrcOpNum && "Missing parameters");
463
464 if (NewVReg)
465 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
466 return Opc;
467}
468
469bool AArch64InstrInfo::canInsertSelect(
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000470 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
Tim Northover3b0846e2014-05-24 12:50:23 +0000471 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
472 int &FalseCycles) const {
473 // Check register classes.
474 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
475 const TargetRegisterClass *RC =
Eric Christophera0de2532015-03-18 20:37:30 +0000476 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
Tim Northover3b0846e2014-05-24 12:50:23 +0000477 if (!RC)
478 return false;
479
480 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
481 unsigned ExtraCondLat = Cond.size() != 1;
482
483 // GPRs are handled by csel.
484 // FIXME: Fold in x+1, -x, and ~x when applicable.
485 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
486 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
487 // Single-cycle csel, csinc, csinv, and csneg.
488 CondCycles = 1 + ExtraCondLat;
489 TrueCycles = FalseCycles = 1;
490 if (canFoldIntoCSel(MRI, TrueReg))
491 TrueCycles = 0;
492 else if (canFoldIntoCSel(MRI, FalseReg))
493 FalseCycles = 0;
494 return true;
495 }
496
497 // Scalar floating point is handled by fcsel.
498 // FIXME: Form fabs, fmin, and fmax when applicable.
499 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
500 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
501 CondCycles = 5 + ExtraCondLat;
502 TrueCycles = FalseCycles = 2;
503 return true;
504 }
505
506 // Can't do vectors.
507 return false;
508}
509
510void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000511 MachineBasicBlock::iterator I,
512 const DebugLoc &DL, unsigned DstReg,
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000513 ArrayRef<MachineOperand> Cond,
Tim Northover3b0846e2014-05-24 12:50:23 +0000514 unsigned TrueReg, unsigned FalseReg) const {
515 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
516
517 // Parse the condition code, see parseCondBranch() above.
518 AArch64CC::CondCode CC;
519 switch (Cond.size()) {
520 default:
521 llvm_unreachable("Unknown condition opcode in Cond");
522 case 1: // b.cc
523 CC = AArch64CC::CondCode(Cond[0].getImm());
524 break;
525 case 3: { // cbz/cbnz
526 // We must insert a compare against 0.
527 bool Is64Bit;
528 switch (Cond[1].getImm()) {
529 default:
530 llvm_unreachable("Unknown branch opcode in Cond");
531 case AArch64::CBZW:
532 Is64Bit = 0;
533 CC = AArch64CC::EQ;
534 break;
535 case AArch64::CBZX:
536 Is64Bit = 1;
537 CC = AArch64CC::EQ;
538 break;
539 case AArch64::CBNZW:
540 Is64Bit = 0;
541 CC = AArch64CC::NE;
542 break;
543 case AArch64::CBNZX:
544 Is64Bit = 1;
545 CC = AArch64CC::NE;
546 break;
547 }
548 unsigned SrcReg = Cond[2].getReg();
549 if (Is64Bit) {
550 // cmp reg, #0 is actually subs xzr, reg, #0.
551 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
552 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
553 .addReg(SrcReg)
554 .addImm(0)
555 .addImm(0);
556 } else {
557 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
558 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
559 .addReg(SrcReg)
560 .addImm(0)
561 .addImm(0);
562 }
563 break;
564 }
565 case 4: { // tbz/tbnz
566 // We must insert a tst instruction.
567 switch (Cond[1].getImm()) {
568 default:
569 llvm_unreachable("Unknown branch opcode in Cond");
570 case AArch64::TBZW:
571 case AArch64::TBZX:
572 CC = AArch64CC::EQ;
573 break;
574 case AArch64::TBNZW:
575 case AArch64::TBNZX:
576 CC = AArch64CC::NE;
577 break;
578 }
579 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
580 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
581 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
582 .addReg(Cond[2].getReg())
583 .addImm(
584 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
585 else
586 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
587 .addReg(Cond[2].getReg())
588 .addImm(
589 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
590 break;
591 }
592 }
593
594 unsigned Opc = 0;
595 const TargetRegisterClass *RC = nullptr;
596 bool TryFold = false;
597 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
598 RC = &AArch64::GPR64RegClass;
599 Opc = AArch64::CSELXr;
600 TryFold = true;
601 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
602 RC = &AArch64::GPR32RegClass;
603 Opc = AArch64::CSELWr;
604 TryFold = true;
605 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
606 RC = &AArch64::FPR64RegClass;
607 Opc = AArch64::FCSELDrrr;
608 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
609 RC = &AArch64::FPR32RegClass;
610 Opc = AArch64::FCSELSrrr;
611 }
612 assert(RC && "Unsupported regclass");
613
614 // Try folding simple instructions into the csel.
615 if (TryFold) {
616 unsigned NewVReg = 0;
617 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
618 if (FoldedOpc) {
619 // The folded opcodes csinc, csinc and csneg apply the operation to
620 // FalseReg, so we need to invert the condition.
621 CC = AArch64CC::getInvertedCondCode(CC);
622 TrueReg = FalseReg;
623 } else
624 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
625
626 // Fold the operation. Leave any dead instructions for DCE to clean up.
627 if (FoldedOpc) {
628 FalseReg = NewVReg;
629 Opc = FoldedOpc;
630 // The extends the live range of NewVReg.
631 MRI.clearKillFlags(NewVReg);
632 }
633 }
634
635 // Pull all virtual register into the appropriate class.
636 MRI.constrainRegClass(TrueReg, RC);
637 MRI.constrainRegClass(FalseReg, RC);
638
639 // Insert the csel.
640 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
641 CC);
642}
643
Lawrence Hu687097a2015-07-23 23:55:28 +0000644/// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000645static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
646 uint64_t Imm = MI.getOperand(1).getImm();
Weiming Zhaob33a5552015-07-23 19:24:53 +0000647 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
648 uint64_t Encoding;
649 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
650}
651
Jiangning Liucd296372014-07-29 02:09:26 +0000652// FIXME: this implementation should be micro-architecture dependent, so a
653// micro-architecture target hook should be introduced here in future.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000654bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
Matthias Braun651cff42016-06-02 18:03:53 +0000655 if (!Subtarget.hasCustomCheapAsMoveHandling())
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000656 return MI.isAsCheapAsAMove();
Jiangning Liucd296372014-07-29 02:09:26 +0000657
Evandro Menezesd23324a2016-05-04 20:47:25 +0000658 unsigned Imm;
659
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000660 switch (MI.getOpcode()) {
Jiangning Liucd296372014-07-29 02:09:26 +0000661 default:
662 return false;
663
664 // add/sub on register without shift
665 case AArch64::ADDWri:
666 case AArch64::ADDXri:
667 case AArch64::SUBWri:
668 case AArch64::SUBXri:
Matthias Braun651cff42016-06-02 18:03:53 +0000669 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000670 MI.getOperand(3).getImm() == 0);
Evandro Menezesd23324a2016-05-04 20:47:25 +0000671
672 // add/sub on register with shift
673 case AArch64::ADDWrs:
674 case AArch64::ADDXrs:
675 case AArch64::SUBWrs:
676 case AArch64::SUBXrs:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000677 Imm = MI.getOperand(3).getImm();
Matthias Braun651cff42016-06-02 18:03:53 +0000678 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
Evandro Menezesd23324a2016-05-04 20:47:25 +0000679 AArch64_AM::getArithShiftValue(Imm) < 4);
Jiangning Liucd296372014-07-29 02:09:26 +0000680
681 // logical ops on immediate
682 case AArch64::ANDWri:
683 case AArch64::ANDXri:
684 case AArch64::EORWri:
685 case AArch64::EORXri:
686 case AArch64::ORRWri:
687 case AArch64::ORRXri:
688 return true;
689
690 // logical ops on register without shift
691 case AArch64::ANDWrr:
692 case AArch64::ANDXrr:
693 case AArch64::BICWrr:
694 case AArch64::BICXrr:
695 case AArch64::EONWrr:
696 case AArch64::EONXrr:
697 case AArch64::EORWrr:
698 case AArch64::EORXrr:
699 case AArch64::ORNWrr:
700 case AArch64::ORNXrr:
701 case AArch64::ORRWrr:
702 case AArch64::ORRXrr:
703 return true;
Evandro Menezesd23324a2016-05-04 20:47:25 +0000704
705 // logical ops on register with shift
706 case AArch64::ANDWrs:
707 case AArch64::ANDXrs:
708 case AArch64::BICWrs:
709 case AArch64::BICXrs:
710 case AArch64::EONWrs:
711 case AArch64::EONXrs:
712 case AArch64::EORWrs:
713 case AArch64::EORXrs:
714 case AArch64::ORNWrs:
715 case AArch64::ORNXrs:
716 case AArch64::ORRWrs:
717 case AArch64::ORRXrs:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000718 Imm = MI.getOperand(3).getImm();
Matthias Braun651cff42016-06-02 18:03:53 +0000719 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
Evandro Menezesd23324a2016-05-04 20:47:25 +0000720 AArch64_AM::getShiftValue(Imm) < 4 &&
721 AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL);
722
Weiming Zhaob33a5552015-07-23 19:24:53 +0000723 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
724 // ORRXri, it is as cheap as MOV
725 case AArch64::MOVi32imm:
726 return canBeExpandedToORR(MI, 32);
727 case AArch64::MOVi64imm:
728 return canBeExpandedToORR(MI, 64);
Haicheng Wu711ca862016-07-12 15:31:41 +0000729
Haicheng Wuf0b01272016-07-15 00:27:01 +0000730 // It is cheap to zero out registers if the subtarget has ZeroCycleZeroing
731 // feature.
Haicheng Wu711ca862016-07-12 15:31:41 +0000732 case AArch64::FMOVS0:
733 case AArch64::FMOVD0:
734 return Subtarget.hasZeroCycleZeroing();
Haicheng Wuf0b01272016-07-15 00:27:01 +0000735 case TargetOpcode::COPY:
736 return (Subtarget.hasZeroCycleZeroing() &&
737 (MI.getOperand(1).getReg() == AArch64::WZR ||
738 MI.getOperand(1).getReg() == AArch64::XZR));
Jiangning Liucd296372014-07-29 02:09:26 +0000739 }
740
741 llvm_unreachable("Unknown opcode to check as cheap as a move!");
742}
743
Tim Northover3b0846e2014-05-24 12:50:23 +0000744bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
745 unsigned &SrcReg, unsigned &DstReg,
746 unsigned &SubIdx) const {
747 switch (MI.getOpcode()) {
748 default:
749 return false;
750 case AArch64::SBFMXri: // aka sxtw
751 case AArch64::UBFMXri: // aka uxtw
752 // Check for the 32 -> 64 bit extension case, these instructions can do
753 // much more.
754 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
755 return false;
756 // This is a signed or unsigned 32 -> 64 bit extension.
757 SrcReg = MI.getOperand(1).getReg();
758 DstReg = MI.getOperand(0).getReg();
759 SubIdx = AArch64::sub_32;
760 return true;
761 }
762}
763
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000764bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
765 MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
Eric Christophera0de2532015-03-18 20:37:30 +0000766 const TargetRegisterInfo *TRI = &getRegisterInfo();
Chad Rosier3528c1e2014-09-08 14:43:48 +0000767 unsigned BaseRegA = 0, BaseRegB = 0;
Chad Rosier0da267d2016-03-09 16:46:48 +0000768 int64_t OffsetA = 0, OffsetB = 0;
769 unsigned WidthA = 0, WidthB = 0;
Chad Rosier3528c1e2014-09-08 14:43:48 +0000770
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000771 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
772 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
Chad Rosier3528c1e2014-09-08 14:43:48 +0000773
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000774 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
775 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
Chad Rosier3528c1e2014-09-08 14:43:48 +0000776 return false;
777
778 // Retrieve the base register, offset from the base register and width. Width
779 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
780 // base registers are identical, and the offset of a lower memory access +
781 // the width doesn't overlap the offset of a higher memory access,
782 // then the memory accesses are different.
Sanjoy Dasb666ea32015-06-15 18:44:14 +0000783 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
784 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
Chad Rosier3528c1e2014-09-08 14:43:48 +0000785 if (BaseRegA == BaseRegB) {
786 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
787 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
788 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
789 if (LowOffset + LowWidth <= HighOffset)
790 return true;
791 }
792 }
793 return false;
794}
795
Tim Northover3b0846e2014-05-24 12:50:23 +0000796/// analyzeCompare - For a comparison instruction, return the source registers
797/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
798/// Return true if the comparison instruction can be analyzed.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000799bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
Tim Northover3b0846e2014-05-24 12:50:23 +0000800 unsigned &SrcReg2, int &CmpMask,
801 int &CmpValue) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000802 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000803 default:
804 break;
805 case AArch64::SUBSWrr:
806 case AArch64::SUBSWrs:
807 case AArch64::SUBSWrx:
808 case AArch64::SUBSXrr:
809 case AArch64::SUBSXrs:
810 case AArch64::SUBSXrx:
811 case AArch64::ADDSWrr:
812 case AArch64::ADDSWrs:
813 case AArch64::ADDSWrx:
814 case AArch64::ADDSXrr:
815 case AArch64::ADDSXrs:
816 case AArch64::ADDSXrx:
817 // Replace SUBSWrr with SUBWrr if NZCV is not used.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000818 SrcReg = MI.getOperand(1).getReg();
819 SrcReg2 = MI.getOperand(2).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +0000820 CmpMask = ~0;
821 CmpValue = 0;
822 return true;
823 case AArch64::SUBSWri:
824 case AArch64::ADDSWri:
825 case AArch64::SUBSXri:
826 case AArch64::ADDSXri:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000827 SrcReg = MI.getOperand(1).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +0000828 SrcReg2 = 0;
829 CmpMask = ~0;
Jiangning Liudcc651f2014-08-08 14:19:29 +0000830 // FIXME: In order to convert CmpValue to 0 or 1
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000831 CmpValue = MI.getOperand(2).getImm() != 0;
Tim Northover3b0846e2014-05-24 12:50:23 +0000832 return true;
833 case AArch64::ANDSWri:
834 case AArch64::ANDSXri:
835 // ANDS does not use the same encoding scheme as the others xxxS
836 // instructions.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000837 SrcReg = MI.getOperand(1).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +0000838 SrcReg2 = 0;
839 CmpMask = ~0;
Jiangning Liudcc651f2014-08-08 14:19:29 +0000840 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
841 // while the type of CmpValue is int. When converting uint64_t to int,
842 // the high 32 bits of uint64_t will be lost.
843 // In fact it causes a bug in spec2006-483.xalancbmk
844 // CmpValue is only used to compare with zero in OptimizeCompareInstr
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000845 CmpValue = AArch64_AM::decodeLogicalImmediate(
846 MI.getOperand(2).getImm(),
847 MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
Tim Northover3b0846e2014-05-24 12:50:23 +0000848 return true;
849 }
850
851 return false;
852}
853
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000854static bool UpdateOperandRegClass(MachineInstr &Instr) {
855 MachineBasicBlock *MBB = Instr.getParent();
Tim Northover3b0846e2014-05-24 12:50:23 +0000856 assert(MBB && "Can't get MachineBasicBlock here");
857 MachineFunction *MF = MBB->getParent();
858 assert(MF && "Can't get MachineFunction here");
Eric Christopher6c901622015-01-28 03:51:33 +0000859 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
860 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000861 MachineRegisterInfo *MRI = &MF->getRegInfo();
862
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000863 for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
Tim Northover3b0846e2014-05-24 12:50:23 +0000864 ++OpIdx) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000865 MachineOperand &MO = Instr.getOperand(OpIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000866 const TargetRegisterClass *OpRegCstraints =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000867 Instr.getRegClassConstraint(OpIdx, TII, TRI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000868
869 // If there's no constraint, there's nothing to do.
870 if (!OpRegCstraints)
871 continue;
872 // If the operand is a frame index, there's nothing to do here.
873 // A frame index operand will resolve correctly during PEI.
874 if (MO.isFI())
875 continue;
876
877 assert(MO.isReg() &&
878 "Operand has register constraints without being a register!");
879
880 unsigned Reg = MO.getReg();
881 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
882 if (!OpRegCstraints->contains(Reg))
883 return false;
884 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
885 !MRI->constrainRegClass(Reg, OpRegCstraints))
886 return false;
887 }
888
889 return true;
890}
891
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000892/// \brief Return the opcode that does not set flags when possible - otherwise
893/// return the original opcode. The caller is responsible to do the actual
894/// substitution and legality checking.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000895static unsigned convertFlagSettingOpcode(const MachineInstr &MI) {
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000896 // Don't convert all compare instructions, because for some the zero register
897 // encoding becomes the sp register.
898 bool MIDefinesZeroReg = false;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000899 if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000900 MIDefinesZeroReg = true;
901
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000902 switch (MI.getOpcode()) {
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000903 default:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000904 return MI.getOpcode();
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000905 case AArch64::ADDSWrr:
906 return AArch64::ADDWrr;
907 case AArch64::ADDSWri:
908 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
909 case AArch64::ADDSWrs:
910 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
911 case AArch64::ADDSWrx:
912 return AArch64::ADDWrx;
913 case AArch64::ADDSXrr:
914 return AArch64::ADDXrr;
915 case AArch64::ADDSXri:
916 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
917 case AArch64::ADDSXrs:
918 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
919 case AArch64::ADDSXrx:
920 return AArch64::ADDXrx;
921 case AArch64::SUBSWrr:
922 return AArch64::SUBWrr;
923 case AArch64::SUBSWri:
924 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
925 case AArch64::SUBSWrs:
926 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
927 case AArch64::SUBSWrx:
928 return AArch64::SUBWrx;
929 case AArch64::SUBSXrr:
930 return AArch64::SUBXrr;
931 case AArch64::SUBSXri:
932 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
933 case AArch64::SUBSXrs:
934 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
935 case AArch64::SUBSXrx:
936 return AArch64::SUBXrx;
937 }
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000938}
Tim Northover3b0846e2014-05-24 12:50:23 +0000939
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000940enum AccessKind {
941 AK_Write = 0x01,
942 AK_Read = 0x10,
943 AK_All = 0x11
944};
945
946/// True when condition flags are accessed (either by writing or reading)
947/// on the instruction trace starting at From and ending at To.
948///
949/// Note: If From and To are from different blocks it's assumed CC are accessed
950/// on the path.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000951static bool areCFlagsAccessedBetweenInstrs(
952 MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
953 const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000954 // Early exit if To is at the beginning of the BB.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000955 if (To == To->getParent()->begin())
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000956 return true;
957
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000958 // Check whether the instructions are in the same basic block
959 // If not, assume the condition flags might get modified somewhere.
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000960 if (To->getParent() != From->getParent())
961 return true;
962
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000963 // From must be above To.
Duncan P. N. Exon Smith18720962016-09-11 18:51:28 +0000964 assert(std::find_if(++To.getReverse(), To->getParent()->rend(),
965 [From](MachineInstr &MI) {
966 return MI.getIterator() == From;
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000967 }) != To->getParent()->rend());
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000968
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000969 // We iterate backward starting \p To until we hit \p From.
970 for (--To; To != From; --To) {
971 const MachineInstr &Instr = *To;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000972
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000973 if ( ((AccessToCheck & AK_Write) && Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
974 ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000975 return true;
976 }
977 return false;
978}
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000979
980/// Try to optimize a compare instruction. A compare instruction is an
981/// instruction which produces AArch64::NZCV. It can be truly compare instruction
982/// when there are no uses of its destination register.
983///
984/// The following steps are tried in order:
985/// 1. Convert CmpInstr into an unconditional version.
986/// 2. Remove CmpInstr if above there is an instruction producing a needed
987/// condition code or an instruction which can be converted into such an instruction.
988/// Only comparison with zero is supported.
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000989bool AArch64InstrInfo::optimizeCompareInstr(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000990 MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000991 int CmpValue, const MachineRegisterInfo *MRI) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000992 assert(CmpInstr.getParent());
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000993 assert(MRI);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000994
995 // Replace SUBSWrr with SUBWrr if NZCV is not used.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000996 int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000997 if (DeadNZCVIdx != -1) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000998 if (CmpInstr.definesRegister(AArch64::WZR) ||
999 CmpInstr.definesRegister(AArch64::XZR)) {
1000 CmpInstr.eraseFromParent();
Juergen Ributzka7a7c4682014-11-18 21:02:40 +00001001 return true;
1002 }
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001003 unsigned Opc = CmpInstr.getOpcode();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00001004 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
1005 if (NewOpc == Opc)
1006 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001007 const MCInstrDesc &MCID = get(NewOpc);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001008 CmpInstr.setDesc(MCID);
1009 CmpInstr.RemoveOperand(DeadNZCVIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001010 bool succeeded = UpdateOperandRegClass(CmpInstr);
1011 (void)succeeded;
1012 assert(succeeded && "Some operands reg class are incompatible!");
1013 return true;
1014 }
1015
1016 // Continue only if we have a "ri" where immediate is zero.
Jiangning Liudcc651f2014-08-08 14:19:29 +00001017 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
1018 // function.
1019 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
Tim Northover3b0846e2014-05-24 12:50:23 +00001020 if (CmpValue != 0 || SrcReg2 != 0)
1021 return false;
1022
1023 // CmpInstr is a Compare instruction if destination register is not used.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001024 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
Tim Northover3b0846e2014-05-24 12:50:23 +00001025 return false;
1026
Evgeny Astigeevichfd89fe02016-04-21 08:54:08 +00001027 return substituteCmpToZero(CmpInstr, SrcReg, MRI);
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001028}
Tim Northover3b0846e2014-05-24 12:50:23 +00001029
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001030/// Get opcode of S version of Instr.
1031/// If Instr is S version its opcode is returned.
1032/// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1033/// or we are not interested in it.
1034static unsigned sForm(MachineInstr &Instr) {
1035 switch (Instr.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001036 default:
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001037 return AArch64::INSTRUCTION_LIST_END;
1038
Tim Northover3b0846e2014-05-24 12:50:23 +00001039 case AArch64::ADDSWrr:
1040 case AArch64::ADDSWri:
1041 case AArch64::ADDSXrr:
1042 case AArch64::ADDSXri:
1043 case AArch64::SUBSWrr:
1044 case AArch64::SUBSWri:
1045 case AArch64::SUBSXrr:
1046 case AArch64::SUBSXri:
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001047 return Instr.getOpcode();;
1048
1049 case AArch64::ADDWrr: return AArch64::ADDSWrr;
1050 case AArch64::ADDWri: return AArch64::ADDSWri;
1051 case AArch64::ADDXrr: return AArch64::ADDSXrr;
1052 case AArch64::ADDXri: return AArch64::ADDSXri;
1053 case AArch64::ADCWr: return AArch64::ADCSWr;
1054 case AArch64::ADCXr: return AArch64::ADCSXr;
1055 case AArch64::SUBWrr: return AArch64::SUBSWrr;
1056 case AArch64::SUBWri: return AArch64::SUBSWri;
1057 case AArch64::SUBXrr: return AArch64::SUBSXrr;
1058 case AArch64::SUBXri: return AArch64::SUBSXri;
1059 case AArch64::SBCWr: return AArch64::SBCSWr;
1060 case AArch64::SBCXr: return AArch64::SBCSXr;
1061 case AArch64::ANDWri: return AArch64::ANDSWri;
1062 case AArch64::ANDXri: return AArch64::ANDSXri;
Tim Northover3b0846e2014-05-24 12:50:23 +00001063 }
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001064}
1065
1066/// Check if AArch64::NZCV should be alive in successors of MBB.
1067static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB) {
1068 for (auto *BB : MBB->successors())
1069 if (BB->isLiveIn(AArch64::NZCV))
1070 return true;
1071 return false;
1072}
1073
Benjamin Kramerb7d33112016-08-06 11:13:10 +00001074namespace {
Evgeny Astigeevichfd89fe02016-04-21 08:54:08 +00001075struct UsedNZCV {
1076 bool N;
1077 bool Z;
1078 bool C;
1079 bool V;
1080 UsedNZCV(): N(false), Z(false), C(false), V(false) {}
1081 UsedNZCV& operator |=(const UsedNZCV& UsedFlags) {
1082 this->N |= UsedFlags.N;
1083 this->Z |= UsedFlags.Z;
1084 this->C |= UsedFlags.C;
1085 this->V |= UsedFlags.V;
1086 return *this;
1087 }
1088};
Benjamin Kramerb7d33112016-08-06 11:13:10 +00001089} // end anonymous namespace
Evgeny Astigeevichfd89fe02016-04-21 08:54:08 +00001090
1091/// Find a condition code used by the instruction.
1092/// Returns AArch64CC::Invalid if either the instruction does not use condition
1093/// codes or we don't optimize CmpInstr in the presence of such instructions.
1094static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
1095 switch (Instr.getOpcode()) {
1096 default:
1097 return AArch64CC::Invalid;
1098
1099 case AArch64::Bcc: {
1100 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1101 assert(Idx >= 2);
1102 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm());
1103 }
1104
1105 case AArch64::CSINVWr:
1106 case AArch64::CSINVXr:
1107 case AArch64::CSINCWr:
1108 case AArch64::CSINCXr:
1109 case AArch64::CSELWr:
1110 case AArch64::CSELXr:
1111 case AArch64::CSNEGWr:
1112 case AArch64::CSNEGXr:
1113 case AArch64::FCSELSrrr:
1114 case AArch64::FCSELDrrr: {
1115 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1116 assert(Idx >= 1);
1117 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm());
1118 }
1119 }
1120}
1121
1122static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
1123 assert(CC != AArch64CC::Invalid);
1124 UsedNZCV UsedFlags;
1125 switch (CC) {
1126 default:
1127 break;
1128
1129 case AArch64CC::EQ: // Z set
1130 case AArch64CC::NE: // Z clear
1131 UsedFlags.Z = true;
1132 break;
1133
1134 case AArch64CC::HI: // Z clear and C set
1135 case AArch64CC::LS: // Z set or C clear
1136 UsedFlags.Z = true;
1137 case AArch64CC::HS: // C set
1138 case AArch64CC::LO: // C clear
1139 UsedFlags.C = true;
1140 break;
1141
1142 case AArch64CC::MI: // N set
1143 case AArch64CC::PL: // N clear
1144 UsedFlags.N = true;
1145 break;
1146
1147 case AArch64CC::VS: // V set
1148 case AArch64CC::VC: // V clear
1149 UsedFlags.V = true;
1150 break;
1151
1152 case AArch64CC::GT: // Z clear, N and V the same
1153 case AArch64CC::LE: // Z set, N and V differ
1154 UsedFlags.Z = true;
1155 case AArch64CC::GE: // N and V the same
1156 case AArch64CC::LT: // N and V differ
1157 UsedFlags.N = true;
1158 UsedFlags.V = true;
1159 break;
1160 }
1161 return UsedFlags;
1162}
1163
1164static bool isADDSRegImm(unsigned Opcode) {
1165 return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1166}
1167
1168static bool isSUBSRegImm(unsigned Opcode) {
1169 return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1170}
1171
1172/// Check if CmpInstr can be substituted by MI.
1173///
1174/// CmpInstr can be substituted:
1175/// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1176/// - and, MI and CmpInstr are from the same MachineBB
1177/// - and, condition flags are not alive in successors of the CmpInstr parent
1178/// - and, if MI opcode is the S form there must be no defs of flags between
1179/// MI and CmpInstr
1180/// or if MI opcode is not the S form there must be neither defs of flags
1181/// nor uses of flags between MI and CmpInstr.
1182/// - and C/V flags are not used after CmpInstr
1183static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr,
1184 const TargetRegisterInfo *TRI) {
1185 assert(MI);
1186 assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
1187 assert(CmpInstr);
1188
1189 const unsigned CmpOpcode = CmpInstr->getOpcode();
1190 if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
1191 return false;
1192
1193 if (MI->getParent() != CmpInstr->getParent())
1194 return false;
1195
1196 if (areCFlagsAliveInSuccessors(CmpInstr->getParent()))
1197 return false;
1198
1199 AccessKind AccessToCheck = AK_Write;
1200 if (sForm(*MI) != MI->getOpcode())
1201 AccessToCheck = AK_All;
1202 if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck))
1203 return false;
1204
1205 UsedNZCV NZCVUsedAfterCmp;
1206 for (auto I = std::next(CmpInstr->getIterator()), E = CmpInstr->getParent()->instr_end();
1207 I != E; ++I) {
1208 const MachineInstr &Instr = *I;
1209 if (Instr.readsRegister(AArch64::NZCV, TRI)) {
1210 AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
1211 if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
1212 return false;
1213 NZCVUsedAfterCmp |= getUsedNZCV(CC);
1214 }
1215
1216 if (Instr.modifiesRegister(AArch64::NZCV, TRI))
1217 break;
1218 }
1219
1220 return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
1221}
1222
1223/// Substitute an instruction comparing to zero with another instruction
1224/// which produces needed condition flags.
1225///
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001226/// Return true on success.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001227bool AArch64InstrInfo::substituteCmpToZero(
1228 MachineInstr &CmpInstr, unsigned SrcReg,
1229 const MachineRegisterInfo *MRI) const {
Evgeny Astigeevichfd89fe02016-04-21 08:54:08 +00001230 assert(MRI);
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001231 // Get the unique definition of SrcReg.
1232 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
1233 if (!MI)
1234 return false;
1235
1236 const TargetRegisterInfo *TRI = &getRegisterInfo();
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001237
1238 unsigned NewOpc = sForm(*MI);
1239 if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1240 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001241
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001242 if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001243 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001244
1245 // Update the instruction to set NZCV.
1246 MI->setDesc(get(NewOpc));
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001247 CmpInstr.eraseFromParent();
1248 bool succeeded = UpdateOperandRegClass(*MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001249 (void)succeeded;
1250 assert(succeeded && "Some operands reg class are incompatible!");
1251 MI->addRegisterDefined(AArch64::NZCV, TRI);
1252 return true;
1253}
1254
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001255bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1256 if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001257 return false;
1258
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001259 MachineBasicBlock &MBB = *MI.getParent();
1260 DebugLoc DL = MI.getDebugLoc();
1261 unsigned Reg = MI.getOperand(0).getReg();
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001262 const GlobalValue *GV =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001263 cast<GlobalValue>((*MI.memoperands_begin())->getValue());
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001264 const TargetMachine &TM = MBB.getParent()->getTarget();
1265 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1266 const unsigned char MO_NC = AArch64II::MO_NC;
1267
1268 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1269 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1270 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1271 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001272 .addReg(Reg, RegState::Kill)
1273 .addImm(0)
1274 .addMemOperand(*MI.memoperands_begin());
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001275 } else if (TM.getCodeModel() == CodeModel::Large) {
1276 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1277 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1278 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1279 .addReg(Reg, RegState::Kill)
1280 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1281 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1282 .addReg(Reg, RegState::Kill)
1283 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1284 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1285 .addReg(Reg, RegState::Kill)
1286 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1287 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001288 .addReg(Reg, RegState::Kill)
1289 .addImm(0)
1290 .addMemOperand(*MI.memoperands_begin());
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001291 } else {
1292 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1293 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1294 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1295 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1296 .addReg(Reg, RegState::Kill)
1297 .addGlobalAddress(GV, 0, LoFlags)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001298 .addMemOperand(*MI.memoperands_begin());
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001299 }
1300
1301 MBB.erase(MI);
1302
1303 return true;
1304}
1305
Tim Northover3b0846e2014-05-24 12:50:23 +00001306/// Return true if this is this instruction has a non-zero immediate
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001307bool AArch64InstrInfo::hasShiftedReg(const MachineInstr &MI) const {
1308 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001309 default:
1310 break;
1311 case AArch64::ADDSWrs:
1312 case AArch64::ADDSXrs:
1313 case AArch64::ADDWrs:
1314 case AArch64::ADDXrs:
1315 case AArch64::ANDSWrs:
1316 case AArch64::ANDSXrs:
1317 case AArch64::ANDWrs:
1318 case AArch64::ANDXrs:
1319 case AArch64::BICSWrs:
1320 case AArch64::BICSXrs:
1321 case AArch64::BICWrs:
1322 case AArch64::BICXrs:
1323 case AArch64::CRC32Brr:
1324 case AArch64::CRC32CBrr:
1325 case AArch64::CRC32CHrr:
1326 case AArch64::CRC32CWrr:
1327 case AArch64::CRC32CXrr:
1328 case AArch64::CRC32Hrr:
1329 case AArch64::CRC32Wrr:
1330 case AArch64::CRC32Xrr:
1331 case AArch64::EONWrs:
1332 case AArch64::EONXrs:
1333 case AArch64::EORWrs:
1334 case AArch64::EORXrs:
1335 case AArch64::ORNWrs:
1336 case AArch64::ORNXrs:
1337 case AArch64::ORRWrs:
1338 case AArch64::ORRXrs:
1339 case AArch64::SUBSWrs:
1340 case AArch64::SUBSXrs:
1341 case AArch64::SUBWrs:
1342 case AArch64::SUBXrs:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001343 if (MI.getOperand(3).isImm()) {
1344 unsigned val = MI.getOperand(3).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001345 return (val != 0);
1346 }
1347 break;
1348 }
1349 return false;
1350}
1351
1352/// Return true if this is this instruction has a non-zero immediate
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001353bool AArch64InstrInfo::hasExtendedReg(const MachineInstr &MI) const {
1354 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001355 default:
1356 break;
1357 case AArch64::ADDSWrx:
1358 case AArch64::ADDSXrx:
1359 case AArch64::ADDSXrx64:
1360 case AArch64::ADDWrx:
1361 case AArch64::ADDXrx:
1362 case AArch64::ADDXrx64:
1363 case AArch64::SUBSWrx:
1364 case AArch64::SUBSXrx:
1365 case AArch64::SUBSXrx64:
1366 case AArch64::SUBWrx:
1367 case AArch64::SUBXrx:
1368 case AArch64::SUBXrx64:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001369 if (MI.getOperand(3).isImm()) {
1370 unsigned val = MI.getOperand(3).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001371 return (val != 0);
1372 }
1373 break;
1374 }
1375
1376 return false;
1377}
1378
1379// Return true if this instruction simply sets its single destination register
1380// to zero. This is equivalent to a register rename of the zero-register.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001381bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) const {
1382 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001383 default:
1384 break;
1385 case AArch64::MOVZWi:
1386 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001387 if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
1388 assert(MI.getDesc().getNumOperands() == 3 &&
1389 MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
Tim Northover3b0846e2014-05-24 12:50:23 +00001390 return true;
1391 }
1392 break;
1393 case AArch64::ANDWri: // and Rd, Rzr, #imm
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001394 return MI.getOperand(1).getReg() == AArch64::WZR;
Tim Northover3b0846e2014-05-24 12:50:23 +00001395 case AArch64::ANDXri:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001396 return MI.getOperand(1).getReg() == AArch64::XZR;
Tim Northover3b0846e2014-05-24 12:50:23 +00001397 case TargetOpcode::COPY:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001398 return MI.getOperand(1).getReg() == AArch64::WZR;
Tim Northover3b0846e2014-05-24 12:50:23 +00001399 }
1400 return false;
1401}
1402
1403// Return true if this instruction simply renames a general register without
1404// modifying bits.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001405bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) const {
1406 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001407 default:
1408 break;
1409 case TargetOpcode::COPY: {
1410 // GPR32 copies will by lowered to ORRXrs
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001411 unsigned DstReg = MI.getOperand(0).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00001412 return (AArch64::GPR32RegClass.contains(DstReg) ||
1413 AArch64::GPR64RegClass.contains(DstReg));
1414 }
1415 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001416 if (MI.getOperand(1).getReg() == AArch64::XZR) {
1417 assert(MI.getDesc().getNumOperands() == 4 &&
1418 MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
Tim Northover3b0846e2014-05-24 12:50:23 +00001419 return true;
1420 }
Renato Golin541d7e72014-08-01 17:27:31 +00001421 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001422 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001423 if (MI.getOperand(2).getImm() == 0) {
1424 assert(MI.getDesc().getNumOperands() == 4 &&
1425 MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
Tim Northover3b0846e2014-05-24 12:50:23 +00001426 return true;
1427 }
Renato Golin541d7e72014-08-01 17:27:31 +00001428 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001429 }
1430 return false;
1431}
1432
1433// Return true if this instruction simply renames a general register without
1434// modifying bits.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001435bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) const {
1436 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001437 default:
1438 break;
1439 case TargetOpcode::COPY: {
1440 // FPR64 copies will by lowered to ORR.16b
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001441 unsigned DstReg = MI.getOperand(0).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00001442 return (AArch64::FPR64RegClass.contains(DstReg) ||
1443 AArch64::FPR128RegClass.contains(DstReg));
1444 }
1445 case AArch64::ORRv16i8:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001446 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
1447 assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001448 "invalid ORRv16i8 operands");
1449 return true;
1450 }
Renato Golin541d7e72014-08-01 17:27:31 +00001451 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001452 }
1453 return false;
1454}
1455
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001456unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
Tim Northover3b0846e2014-05-24 12:50:23 +00001457 int &FrameIndex) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001458 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001459 default:
1460 break;
1461 case AArch64::LDRWui:
1462 case AArch64::LDRXui:
1463 case AArch64::LDRBui:
1464 case AArch64::LDRHui:
1465 case AArch64::LDRSui:
1466 case AArch64::LDRDui:
1467 case AArch64::LDRQui:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001468 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1469 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1470 FrameIndex = MI.getOperand(1).getIndex();
1471 return MI.getOperand(0).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00001472 }
1473 break;
1474 }
1475
1476 return 0;
1477}
1478
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001479unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
Tim Northover3b0846e2014-05-24 12:50:23 +00001480 int &FrameIndex) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001481 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001482 default:
1483 break;
1484 case AArch64::STRWui:
1485 case AArch64::STRXui:
1486 case AArch64::STRBui:
1487 case AArch64::STRHui:
1488 case AArch64::STRSui:
1489 case AArch64::STRDui:
1490 case AArch64::STRQui:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001491 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1492 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1493 FrameIndex = MI.getOperand(1).getIndex();
1494 return MI.getOperand(0).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00001495 }
1496 break;
1497 }
1498 return 0;
1499}
1500
1501/// Return true if this is load/store scales or extends its register offset.
1502/// This refers to scaling a dynamic index as opposed to scaled immediates.
1503/// MI should be a memory op that allows scaled addressing.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001504bool AArch64InstrInfo::isScaledAddr(const MachineInstr &MI) const {
1505 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001506 default:
1507 break;
1508 case AArch64::LDRBBroW:
1509 case AArch64::LDRBroW:
1510 case AArch64::LDRDroW:
1511 case AArch64::LDRHHroW:
1512 case AArch64::LDRHroW:
1513 case AArch64::LDRQroW:
1514 case AArch64::LDRSBWroW:
1515 case AArch64::LDRSBXroW:
1516 case AArch64::LDRSHWroW:
1517 case AArch64::LDRSHXroW:
1518 case AArch64::LDRSWroW:
1519 case AArch64::LDRSroW:
1520 case AArch64::LDRWroW:
1521 case AArch64::LDRXroW:
1522 case AArch64::STRBBroW:
1523 case AArch64::STRBroW:
1524 case AArch64::STRDroW:
1525 case AArch64::STRHHroW:
1526 case AArch64::STRHroW:
1527 case AArch64::STRQroW:
1528 case AArch64::STRSroW:
1529 case AArch64::STRWroW:
1530 case AArch64::STRXroW:
1531 case AArch64::LDRBBroX:
1532 case AArch64::LDRBroX:
1533 case AArch64::LDRDroX:
1534 case AArch64::LDRHHroX:
1535 case AArch64::LDRHroX:
1536 case AArch64::LDRQroX:
1537 case AArch64::LDRSBWroX:
1538 case AArch64::LDRSBXroX:
1539 case AArch64::LDRSHWroX:
1540 case AArch64::LDRSHXroX:
1541 case AArch64::LDRSWroX:
1542 case AArch64::LDRSroX:
1543 case AArch64::LDRWroX:
1544 case AArch64::LDRXroX:
1545 case AArch64::STRBBroX:
1546 case AArch64::STRBroX:
1547 case AArch64::STRDroX:
1548 case AArch64::STRHHroX:
1549 case AArch64::STRHroX:
1550 case AArch64::STRQroX:
1551 case AArch64::STRSroX:
1552 case AArch64::STRWroX:
1553 case AArch64::STRXroX:
1554
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001555 unsigned Val = MI.getOperand(3).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001556 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1557 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1558 }
1559 return false;
1560}
1561
1562/// Check all MachineMemOperands for a hint to suppress pairing.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001563bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) const {
Justin Lebar288b3372016-07-14 18:15:20 +00001564 return any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
1565 return MMO->getFlags() & MOSuppressPair;
1566 });
Tim Northover3b0846e2014-05-24 12:50:23 +00001567}
1568
1569/// Set a flag on the first MachineMemOperand to suppress pairing.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001570void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) const {
1571 if (MI.memoperands_empty())
Tim Northover3b0846e2014-05-24 12:50:23 +00001572 return;
Justin Lebar288b3372016-07-14 18:15:20 +00001573 (*MI.memoperands_begin())->setFlags(MOSuppressPair);
Tim Northover3b0846e2014-05-24 12:50:23 +00001574}
1575
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001576bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const {
1577 switch (Opc) {
1578 default:
1579 return false;
1580 case AArch64::STURSi:
1581 case AArch64::STURDi:
1582 case AArch64::STURQi:
1583 case AArch64::STURBBi:
1584 case AArch64::STURHHi:
1585 case AArch64::STURWi:
1586 case AArch64::STURXi:
1587 case AArch64::LDURSi:
1588 case AArch64::LDURDi:
1589 case AArch64::LDURQi:
1590 case AArch64::LDURWi:
1591 case AArch64::LDURXi:
1592 case AArch64::LDURSWi:
1593 case AArch64::LDURHHi:
1594 case AArch64::LDURBBi:
1595 case AArch64::LDURSBWi:
1596 case AArch64::LDURSHWi:
1597 return true;
1598 }
1599}
1600
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001601bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr &MI) const {
1602 return isUnscaledLdSt(MI.getOpcode());
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001603}
1604
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001605// Is this a candidate for ld/st merging or pairing? For example, we don't
1606// touch volatiles or load/stores that have a hint to avoid pair formation.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001607bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001608 // If this is a volatile load/store, don't mess with it.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001609 if (MI.hasOrderedMemoryRef())
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001610 return false;
1611
1612 // Make sure this is a reg+imm (as opposed to an address reloc).
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001613 assert(MI.getOperand(1).isReg() && "Expected a reg operand.");
1614 if (!MI.getOperand(2).isImm())
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001615 return false;
1616
1617 // Can't merge/pair if the instruction modifies the base register.
1618 // e.g., ldr x0, [x0]
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001619 unsigned BaseReg = MI.getOperand(1).getReg();
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001620 const TargetRegisterInfo *TRI = &getRegisterInfo();
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001621 if (MI.modifiesRegister(BaseReg, TRI))
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001622 return false;
1623
1624 // Check if this load/store has a hint to avoid pair formation.
1625 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1626 if (isLdStPairSuppressed(MI))
1627 return false;
1628
Matthias Braun651cff42016-06-02 18:03:53 +00001629 // On some CPUs quad load/store pairs are slower than two single load/stores.
1630 if (Subtarget.avoidQuadLdStPairs()) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001631 switch (MI.getOpcode()) {
Matthias Braunbcfd2362016-05-28 01:06:51 +00001632 default:
1633 break;
Matthias Braunbcfd2362016-05-28 01:06:51 +00001634 case AArch64::LDURQi:
1635 case AArch64::STURQi:
1636 case AArch64::LDRQui:
1637 case AArch64::STRQui:
1638 return false;
Evandro Menezes8d53f882016-04-13 18:31:45 +00001639 }
Matthias Braunbcfd2362016-05-28 01:06:51 +00001640 }
Evandro Menezes8d53f882016-04-13 18:31:45 +00001641
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001642 return true;
1643}
1644
Chad Rosierc27a18f2016-03-09 16:00:35 +00001645bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001646 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
Chad Rosierc27a18f2016-03-09 16:00:35 +00001647 const TargetRegisterInfo *TRI) const {
Geoff Berry22dfbc52016-08-12 15:26:00 +00001648 unsigned Width;
1649 return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001650}
1651
Sanjoy Dasb666ea32015-06-15 18:44:14 +00001652bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001653 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
Chad Rosier3528c1e2014-09-08 14:43:48 +00001654 const TargetRegisterInfo *TRI) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001655 assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
Chad Rosier3528c1e2014-09-08 14:43:48 +00001656 // Handle only loads/stores with base register followed by immediate offset.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001657 if (LdSt.getNumExplicitOperands() == 3) {
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001658 // Non-paired instruction (e.g., ldr x1, [x0, #8]).
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001659 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001660 return false;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001661 } else if (LdSt.getNumExplicitOperands() == 4) {
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001662 // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001663 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isReg() ||
1664 !LdSt.getOperand(3).isImm())
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001665 return false;
1666 } else
Chad Rosier3528c1e2014-09-08 14:43:48 +00001667 return false;
1668
1669 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1670 // Unscaled instructions have scaling factor set to 1.
Chad Rosier0da267d2016-03-09 16:46:48 +00001671 unsigned Scale = 0;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001672 switch (LdSt.getOpcode()) {
Chad Rosier3528c1e2014-09-08 14:43:48 +00001673 default:
1674 return false;
1675 case AArch64::LDURQi:
1676 case AArch64::STURQi:
1677 Width = 16;
1678 Scale = 1;
1679 break;
1680 case AArch64::LDURXi:
1681 case AArch64::LDURDi:
1682 case AArch64::STURXi:
1683 case AArch64::STURDi:
1684 Width = 8;
1685 Scale = 1;
1686 break;
1687 case AArch64::LDURWi:
1688 case AArch64::LDURSi:
1689 case AArch64::LDURSWi:
1690 case AArch64::STURWi:
1691 case AArch64::STURSi:
1692 Width = 4;
1693 Scale = 1;
1694 break;
1695 case AArch64::LDURHi:
1696 case AArch64::LDURHHi:
1697 case AArch64::LDURSHXi:
1698 case AArch64::LDURSHWi:
1699 case AArch64::STURHi:
1700 case AArch64::STURHHi:
1701 Width = 2;
1702 Scale = 1;
1703 break;
1704 case AArch64::LDURBi:
1705 case AArch64::LDURBBi:
1706 case AArch64::LDURSBXi:
1707 case AArch64::LDURSBWi:
1708 case AArch64::STURBi:
1709 case AArch64::STURBBi:
1710 Width = 1;
1711 Scale = 1;
1712 break;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001713 case AArch64::LDPQi:
1714 case AArch64::LDNPQi:
1715 case AArch64::STPQi:
1716 case AArch64::STNPQi:
1717 Scale = 16;
1718 Width = 32;
1719 break;
Chad Rosierd90e2eb2015-09-18 14:15:19 +00001720 case AArch64::LDRQui:
1721 case AArch64::STRQui:
1722 Scale = Width = 16;
1723 break;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001724 case AArch64::LDPXi:
1725 case AArch64::LDPDi:
1726 case AArch64::LDNPXi:
1727 case AArch64::LDNPDi:
1728 case AArch64::STPXi:
1729 case AArch64::STPDi:
1730 case AArch64::STNPXi:
1731 case AArch64::STNPDi:
1732 Scale = 8;
1733 Width = 16;
1734 break;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001735 case AArch64::LDRXui:
Chad Rosier84a0afd2015-09-18 14:13:18 +00001736 case AArch64::LDRDui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001737 case AArch64::STRXui:
Chad Rosier84a0afd2015-09-18 14:13:18 +00001738 case AArch64::STRDui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001739 Scale = Width = 8;
1740 break;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001741 case AArch64::LDPWi:
1742 case AArch64::LDPSi:
1743 case AArch64::LDNPWi:
1744 case AArch64::LDNPSi:
1745 case AArch64::STPWi:
1746 case AArch64::STPSi:
1747 case AArch64::STNPWi:
1748 case AArch64::STNPSi:
1749 Scale = 4;
1750 Width = 8;
1751 break;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001752 case AArch64::LDRWui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001753 case AArch64::LDRSui:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001754 case AArch64::LDRSWui:
Chad Rosier84a0afd2015-09-18 14:13:18 +00001755 case AArch64::STRWui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001756 case AArch64::STRSui:
1757 Scale = Width = 4;
1758 break;
Chad Rosier84a0afd2015-09-18 14:13:18 +00001759 case AArch64::LDRHui:
1760 case AArch64::LDRHHui:
1761 case AArch64::STRHui:
1762 case AArch64::STRHHui:
1763 Scale = Width = 2;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001764 break;
Chad Rosierd90e2eb2015-09-18 14:15:19 +00001765 case AArch64::LDRBui:
1766 case AArch64::LDRBBui:
1767 case AArch64::STRBui:
1768 case AArch64::STRBBui:
1769 Scale = Width = 1;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001770 break;
Chad Rosier064261d2016-02-01 20:54:36 +00001771 }
Chad Rosier3528c1e2014-09-08 14:43:48 +00001772
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001773 if (LdSt.getNumExplicitOperands() == 3) {
1774 BaseReg = LdSt.getOperand(1).getReg();
1775 Offset = LdSt.getOperand(2).getImm() * Scale;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001776 } else {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001777 assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
1778 BaseReg = LdSt.getOperand(2).getReg();
1779 Offset = LdSt.getOperand(3).getImm() * Scale;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001780 }
Chad Rosier3528c1e2014-09-08 14:43:48 +00001781 return true;
1782}
1783
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001784// Scale the unscaled offsets. Returns false if the unscaled offset can't be
1785// scaled.
1786static bool scaleOffset(unsigned Opc, int64_t &Offset) {
1787 unsigned OffsetStride = 1;
1788 switch (Opc) {
1789 default:
1790 return false;
1791 case AArch64::LDURQi:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001792 case AArch64::STURQi:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001793 OffsetStride = 16;
1794 break;
1795 case AArch64::LDURXi:
1796 case AArch64::LDURDi:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001797 case AArch64::STURXi:
1798 case AArch64::STURDi:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001799 OffsetStride = 8;
1800 break;
1801 case AArch64::LDURWi:
1802 case AArch64::LDURSi:
1803 case AArch64::LDURSWi:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001804 case AArch64::STURWi:
1805 case AArch64::STURSi:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001806 OffsetStride = 4;
1807 break;
1808 }
1809 // If the byte-offset isn't a multiple of the stride, we can't scale this
1810 // offset.
1811 if (Offset % OffsetStride != 0)
1812 return false;
1813
1814 // Convert the byte-offset used by unscaled into an "element" offset used
1815 // by the scaled pair load/store instructions.
1816 Offset /= OffsetStride;
1817 return true;
1818}
1819
1820static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
1821 if (FirstOpc == SecondOpc)
1822 return true;
1823 // We can also pair sign-ext and zero-ext instructions.
1824 switch (FirstOpc) {
1825 default:
1826 return false;
1827 case AArch64::LDRWui:
1828 case AArch64::LDURWi:
1829 return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
1830 case AArch64::LDRSWui:
1831 case AArch64::LDURSWi:
1832 return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
1833 }
1834 // These instructions can't be paired based on their opcodes.
1835 return false;
1836}
1837
Tim Northover3b0846e2014-05-24 12:50:23 +00001838/// Detect opportunities for ldp/stp formation.
1839///
Sanjoy Dasb666ea32015-06-15 18:44:14 +00001840/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001841bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
1842 MachineInstr &SecondLdSt,
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001843 unsigned NumLoads) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00001844 // Only cluster up to a single pair.
1845 if (NumLoads > 1)
1846 return false;
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001847
Geoff Berry22dfbc52016-08-12 15:26:00 +00001848 if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
1849 return false;
1850
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001851 // Can we pair these instructions based on their opcodes?
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001852 unsigned FirstOpc = FirstLdSt.getOpcode();
1853 unsigned SecondOpc = SecondLdSt.getOpcode();
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001854 if (!canPairLdStOpc(FirstOpc, SecondOpc))
Tim Northover3b0846e2014-05-24 12:50:23 +00001855 return false;
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001856
1857 // Can't merge volatiles or load/stores that have a hint to avoid pair
1858 // formation, for example.
1859 if (!isCandidateToMergeOrPair(FirstLdSt) ||
1860 !isCandidateToMergeOrPair(SecondLdSt))
Tim Northover3b0846e2014-05-24 12:50:23 +00001861 return false;
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001862
1863 // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001864 int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001865 if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
1866 return false;
1867
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001868 int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001869 if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
1870 return false;
1871
1872 // Pairwise instructions have a 7-bit signed offset field.
1873 if (Offset1 > 63 || Offset1 < -64)
1874 return false;
1875
Tim Northover3b0846e2014-05-24 12:50:23 +00001876 // The caller should already have ordered First/SecondLdSt by offset.
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001877 assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
1878 return Offset1 + 1 == Offset2;
Tim Northover3b0846e2014-05-24 12:50:23 +00001879}
1880
Matthias Braun115efcd2016-11-28 20:11:54 +00001881bool AArch64InstrInfo::shouldScheduleAdjacent(
1882 const MachineInstr &First, const MachineInstr &Second) const {
Matthias Braun46a52382016-10-04 19:28:21 +00001883 if (Subtarget.hasArithmeticBccFusion()) {
Matthias Braun651cff42016-06-02 18:03:53 +00001884 // Fuse CMN, CMP, TST followed by Bcc.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001885 unsigned SecondOpcode = Second.getOpcode();
Matthias Braunc8b67e62015-07-20 23:11:42 +00001886 if (SecondOpcode == AArch64::Bcc) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001887 switch (First.getOpcode()) {
Matthias Braunc8b67e62015-07-20 23:11:42 +00001888 default:
1889 return false;
Matthias Braunc8b67e62015-07-20 23:11:42 +00001890 case AArch64::ADDSWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001891 case AArch64::ADDSWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001892 case AArch64::ADDSXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001893 case AArch64::ADDSXrr:
1894 case AArch64::ANDSWri:
1895 case AArch64::ANDSWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001896 case AArch64::ANDSXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001897 case AArch64::ANDSXrr:
1898 case AArch64::SUBSWri:
1899 case AArch64::SUBSWrr:
1900 case AArch64::SUBSXri:
1901 case AArch64::SUBSXrr:
1902 case AArch64::BICSWrr:
1903 case AArch64::BICSXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001904 return true;
Matthias Braun46a52382016-10-04 19:28:21 +00001905 case AArch64::ADDSWrs:
1906 case AArch64::ADDSXrs:
1907 case AArch64::ANDSWrs:
1908 case AArch64::ANDSXrs:
1909 case AArch64::SUBSWrs:
1910 case AArch64::SUBSXrs:
1911 case AArch64::BICSWrs:
1912 case AArch64::BICSXrs:
1913 // Shift value can be 0 making these behave like the "rr" variant...
1914 return !hasShiftedReg(Second);
Matthias Braunc8b67e62015-07-20 23:11:42 +00001915 }
Matthias Braune536f4f2015-07-20 22:34:47 +00001916 }
Matthias Braun46a52382016-10-04 19:28:21 +00001917 }
1918 if (Subtarget.hasArithmeticCbzFusion()) {
Matthias Braun651cff42016-06-02 18:03:53 +00001919 // Fuse ALU operations followed by CBZ/CBNZ.
Matthias Braun46a52382016-10-04 19:28:21 +00001920 unsigned SecondOpcode = Second.getOpcode();
Matthias Braunc8b67e62015-07-20 23:11:42 +00001921 if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
1922 SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001923 switch (First.getOpcode()) {
Matthias Braunc8b67e62015-07-20 23:11:42 +00001924 default:
1925 return false;
1926 case AArch64::ADDWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001927 case AArch64::ADDWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001928 case AArch64::ADDXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001929 case AArch64::ADDXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001930 case AArch64::ANDWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001931 case AArch64::ANDWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001932 case AArch64::ANDXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001933 case AArch64::ANDXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001934 case AArch64::EORWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001935 case AArch64::EORWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001936 case AArch64::EORXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001937 case AArch64::EORXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001938 case AArch64::ORRWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001939 case AArch64::ORRWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001940 case AArch64::ORRXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001941 case AArch64::ORRXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001942 case AArch64::SUBWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001943 case AArch64::SUBWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001944 case AArch64::SUBXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001945 case AArch64::SUBXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001946 return true;
Matthias Braun46a52382016-10-04 19:28:21 +00001947 case AArch64::ADDWrs:
1948 case AArch64::ADDXrs:
1949 case AArch64::ANDWrs:
1950 case AArch64::ANDXrs:
1951 case AArch64::SUBWrs:
1952 case AArch64::SUBXrs:
1953 case AArch64::BICWrs:
1954 case AArch64::BICXrs:
1955 // Shift value can be 0 making these behave like the "rr" variant...
1956 return !hasShiftedReg(Second);
Matthias Braunc8b67e62015-07-20 23:11:42 +00001957 }
Matthias Braune536f4f2015-07-20 22:34:47 +00001958 }
1959 }
1960 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001961}
1962
Adrian Prantl87b7eb92014-10-01 18:55:02 +00001963MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1964 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001965 const MDNode *Expr, const DebugLoc &DL) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00001966 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1967 .addFrameIndex(FrameIx)
1968 .addImm(0)
1969 .addImm(Offset)
Adrian Prantl87b7eb92014-10-01 18:55:02 +00001970 .addMetadata(Var)
1971 .addMetadata(Expr);
Tim Northover3b0846e2014-05-24 12:50:23 +00001972 return &*MIB;
1973}
1974
1975static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1976 unsigned Reg, unsigned SubIdx,
1977 unsigned State,
1978 const TargetRegisterInfo *TRI) {
1979 if (!SubIdx)
1980 return MIB.addReg(Reg, State);
1981
1982 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1983 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1984 return MIB.addReg(Reg, State, SubIdx);
1985}
1986
1987static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1988 unsigned NumRegs) {
1989 // We really want the positive remainder mod 32 here, that happens to be
1990 // easily obtainable with a mask.
1991 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1992}
1993
1994void AArch64InstrInfo::copyPhysRegTuple(
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001995 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL,
Tim Northover3b0846e2014-05-24 12:50:23 +00001996 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1997 llvm::ArrayRef<unsigned> Indices) const {
Eric Christopher58f32662014-06-10 22:57:21 +00001998 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001999 "Unexpected register copy without NEON");
Eric Christophera0de2532015-03-18 20:37:30 +00002000 const TargetRegisterInfo *TRI = &getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00002001 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2002 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2003 unsigned NumRegs = Indices.size();
2004
2005 int SubReg = 0, End = NumRegs, Incr = 1;
2006 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
2007 SubReg = NumRegs - 1;
2008 End = -1;
2009 Incr = -1;
2010 }
2011
2012 for (; SubReg != End; SubReg += Incr) {
James Molloyf8aa57a2015-04-16 11:37:40 +00002013 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
Tim Northover3b0846e2014-05-24 12:50:23 +00002014 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
2015 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
2016 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
2017 }
2018}
2019
2020void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002021 MachineBasicBlock::iterator I,
2022 const DebugLoc &DL, unsigned DestReg,
2023 unsigned SrcReg, bool KillSrc) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00002024 if (AArch64::GPR32spRegClass.contains(DestReg) &&
2025 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
Eric Christophera0de2532015-03-18 20:37:30 +00002026 const TargetRegisterInfo *TRI = &getRegisterInfo();
2027
Tim Northover3b0846e2014-05-24 12:50:23 +00002028 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
2029 // If either operand is WSP, expand to ADD #0.
2030 if (Subtarget.hasZeroCycleRegMove()) {
2031 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
2032 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2033 &AArch64::GPR64spRegClass);
2034 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2035 &AArch64::GPR64spRegClass);
2036 // This instruction is reading and writing X registers. This may upset
2037 // the register scavenger and machine verifier, so we need to indicate
2038 // that we are reading an undefined value from SrcRegX, but a proper
2039 // value from SrcReg.
2040 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
2041 .addReg(SrcRegX, RegState::Undef)
2042 .addImm(0)
2043 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2044 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2045 } else {
2046 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
2047 .addReg(SrcReg, getKillRegState(KillSrc))
2048 .addImm(0)
2049 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2050 }
2051 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
2052 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
2053 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2054 } else {
2055 if (Subtarget.hasZeroCycleRegMove()) {
2056 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
2057 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2058 &AArch64::GPR64spRegClass);
2059 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2060 &AArch64::GPR64spRegClass);
2061 // This instruction is reading and writing X registers. This may upset
2062 // the register scavenger and machine verifier, so we need to indicate
2063 // that we are reading an undefined value from SrcRegX, but a proper
2064 // value from SrcReg.
2065 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
2066 .addReg(AArch64::XZR)
2067 .addReg(SrcRegX, RegState::Undef)
2068 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2069 } else {
2070 // Otherwise, expand to ORR WZR.
2071 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
2072 .addReg(AArch64::WZR)
2073 .addReg(SrcReg, getKillRegState(KillSrc));
2074 }
2075 }
2076 return;
2077 }
2078
2079 if (AArch64::GPR64spRegClass.contains(DestReg) &&
2080 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
2081 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
2082 // If either operand is SP, expand to ADD #0.
2083 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
2084 .addReg(SrcReg, getKillRegState(KillSrc))
2085 .addImm(0)
2086 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2087 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
2088 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
2089 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2090 } else {
2091 // Otherwise, expand to ORR XZR.
2092 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
2093 .addReg(AArch64::XZR)
2094 .addReg(SrcReg, getKillRegState(KillSrc));
2095 }
2096 return;
2097 }
2098
2099 // Copy a DDDD register quad by copying the individual sub-registers.
2100 if (AArch64::DDDDRegClass.contains(DestReg) &&
2101 AArch64::DDDDRegClass.contains(SrcReg)) {
2102 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2103 AArch64::dsub2, AArch64::dsub3 };
2104 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2105 Indices);
2106 return;
2107 }
2108
2109 // Copy a DDD register triple by copying the individual sub-registers.
2110 if (AArch64::DDDRegClass.contains(DestReg) &&
2111 AArch64::DDDRegClass.contains(SrcReg)) {
2112 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2113 AArch64::dsub2 };
2114 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2115 Indices);
2116 return;
2117 }
2118
2119 // Copy a DD register pair by copying the individual sub-registers.
2120 if (AArch64::DDRegClass.contains(DestReg) &&
2121 AArch64::DDRegClass.contains(SrcReg)) {
2122 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
2123 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2124 Indices);
2125 return;
2126 }
2127
2128 // Copy a QQQQ register quad by copying the individual sub-registers.
2129 if (AArch64::QQQQRegClass.contains(DestReg) &&
2130 AArch64::QQQQRegClass.contains(SrcReg)) {
2131 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2132 AArch64::qsub2, AArch64::qsub3 };
2133 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2134 Indices);
2135 return;
2136 }
2137
2138 // Copy a QQQ register triple by copying the individual sub-registers.
2139 if (AArch64::QQQRegClass.contains(DestReg) &&
2140 AArch64::QQQRegClass.contains(SrcReg)) {
2141 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2142 AArch64::qsub2 };
2143 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2144 Indices);
2145 return;
2146 }
2147
2148 // Copy a QQ register pair by copying the individual sub-registers.
2149 if (AArch64::QQRegClass.contains(DestReg) &&
2150 AArch64::QQRegClass.contains(SrcReg)) {
2151 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
2152 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2153 Indices);
2154 return;
2155 }
2156
2157 if (AArch64::FPR128RegClass.contains(DestReg) &&
2158 AArch64::FPR128RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002159 if(Subtarget.hasNEON()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002160 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2161 .addReg(SrcReg)
2162 .addReg(SrcReg, getKillRegState(KillSrc));
2163 } else {
2164 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
2165 .addReg(AArch64::SP, RegState::Define)
2166 .addReg(SrcReg, getKillRegState(KillSrc))
2167 .addReg(AArch64::SP)
2168 .addImm(-16);
2169 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
2170 .addReg(AArch64::SP, RegState::Define)
2171 .addReg(DestReg, RegState::Define)
2172 .addReg(AArch64::SP)
2173 .addImm(16);
2174 }
2175 return;
2176 }
2177
2178 if (AArch64::FPR64RegClass.contains(DestReg) &&
2179 AArch64::FPR64RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002180 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00002181 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
2182 &AArch64::FPR128RegClass);
2183 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
2184 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002185 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2186 .addReg(SrcReg)
2187 .addReg(SrcReg, getKillRegState(KillSrc));
2188 } else {
2189 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
2190 .addReg(SrcReg, getKillRegState(KillSrc));
2191 }
2192 return;
2193 }
2194
2195 if (AArch64::FPR32RegClass.contains(DestReg) &&
2196 AArch64::FPR32RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002197 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00002198 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
2199 &AArch64::FPR128RegClass);
2200 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
2201 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002202 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2203 .addReg(SrcReg)
2204 .addReg(SrcReg, getKillRegState(KillSrc));
2205 } else {
2206 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2207 .addReg(SrcReg, getKillRegState(KillSrc));
2208 }
2209 return;
2210 }
2211
2212 if (AArch64::FPR16RegClass.contains(DestReg) &&
2213 AArch64::FPR16RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002214 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00002215 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2216 &AArch64::FPR128RegClass);
2217 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2218 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002219 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2220 .addReg(SrcReg)
2221 .addReg(SrcReg, getKillRegState(KillSrc));
2222 } else {
Eric Christophera0de2532015-03-18 20:37:30 +00002223 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2224 &AArch64::FPR32RegClass);
2225 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2226 &AArch64::FPR32RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002227 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2228 .addReg(SrcReg, getKillRegState(KillSrc));
2229 }
2230 return;
2231 }
2232
2233 if (AArch64::FPR8RegClass.contains(DestReg) &&
2234 AArch64::FPR8RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002235 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00002236 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
Tim Northover3b0846e2014-05-24 12:50:23 +00002237 &AArch64::FPR128RegClass);
Eric Christophera0de2532015-03-18 20:37:30 +00002238 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2239 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002240 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2241 .addReg(SrcReg)
2242 .addReg(SrcReg, getKillRegState(KillSrc));
2243 } else {
Eric Christophera0de2532015-03-18 20:37:30 +00002244 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2245 &AArch64::FPR32RegClass);
2246 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2247 &AArch64::FPR32RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002248 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2249 .addReg(SrcReg, getKillRegState(KillSrc));
2250 }
2251 return;
2252 }
2253
2254 // Copies between GPR64 and FPR64.
2255 if (AArch64::FPR64RegClass.contains(DestReg) &&
2256 AArch64::GPR64RegClass.contains(SrcReg)) {
2257 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
2258 .addReg(SrcReg, getKillRegState(KillSrc));
2259 return;
2260 }
2261 if (AArch64::GPR64RegClass.contains(DestReg) &&
2262 AArch64::FPR64RegClass.contains(SrcReg)) {
2263 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
2264 .addReg(SrcReg, getKillRegState(KillSrc));
2265 return;
2266 }
2267 // Copies between GPR32 and FPR32.
2268 if (AArch64::FPR32RegClass.contains(DestReg) &&
2269 AArch64::GPR32RegClass.contains(SrcReg)) {
2270 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
2271 .addReg(SrcReg, getKillRegState(KillSrc));
2272 return;
2273 }
2274 if (AArch64::GPR32RegClass.contains(DestReg) &&
2275 AArch64::FPR32RegClass.contains(SrcReg)) {
2276 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
2277 .addReg(SrcReg, getKillRegState(KillSrc));
2278 return;
2279 }
2280
Tim Northover1bed9af2014-05-27 12:16:02 +00002281 if (DestReg == AArch64::NZCV) {
2282 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
2283 BuildMI(MBB, I, DL, get(AArch64::MSR))
2284 .addImm(AArch64SysReg::NZCV)
2285 .addReg(SrcReg, getKillRegState(KillSrc))
2286 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
2287 return;
2288 }
2289
2290 if (SrcReg == AArch64::NZCV) {
2291 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
Quentin Colombet658d9db2016-04-22 18:46:17 +00002292 BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
Tim Northover1bed9af2014-05-27 12:16:02 +00002293 .addImm(AArch64SysReg::NZCV)
2294 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
2295 return;
2296 }
2297
2298 llvm_unreachable("unimplemented reg-to-reg copy");
Tim Northover3b0846e2014-05-24 12:50:23 +00002299}
2300
2301void AArch64InstrInfo::storeRegToStackSlot(
2302 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
2303 bool isKill, int FI, const TargetRegisterClass *RC,
2304 const TargetRegisterInfo *TRI) const {
2305 DebugLoc DL;
2306 if (MBBI != MBB.end())
2307 DL = MBBI->getDebugLoc();
2308 MachineFunction &MF = *MBB.getParent();
Matthias Braun941a7052016-07-28 18:40:00 +00002309 MachineFrameInfo &MFI = MF.getFrameInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00002310 unsigned Align = MFI.getObjectAlignment(FI);
2311
Alex Lorenze40c8a22015-08-11 23:09:45 +00002312 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
Tim Northover3b0846e2014-05-24 12:50:23 +00002313 MachineMemOperand *MMO = MF.getMachineMemOperand(
2314 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
2315 unsigned Opc = 0;
2316 bool Offset = true;
2317 switch (RC->getSize()) {
2318 case 1:
2319 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2320 Opc = AArch64::STRBui;
2321 break;
2322 case 2:
2323 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2324 Opc = AArch64::STRHui;
2325 break;
2326 case 4:
2327 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2328 Opc = AArch64::STRWui;
2329 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2330 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
2331 else
2332 assert(SrcReg != AArch64::WSP);
2333 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2334 Opc = AArch64::STRSui;
2335 break;
2336 case 8:
2337 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2338 Opc = AArch64::STRXui;
2339 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2340 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2341 else
2342 assert(SrcReg != AArch64::SP);
2343 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2344 Opc = AArch64::STRDui;
2345 break;
2346 case 16:
2347 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2348 Opc = AArch64::STRQui;
2349 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002350 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002351 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002352 Opc = AArch64::ST1Twov1d;
2353 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002354 }
2355 break;
2356 case 24:
2357 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002358 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002359 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002360 Opc = AArch64::ST1Threev1d;
2361 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002362 }
2363 break;
2364 case 32:
2365 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002366 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002367 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002368 Opc = AArch64::ST1Fourv1d;
2369 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002370 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002371 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002372 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002373 Opc = AArch64::ST1Twov2d;
2374 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002375 }
2376 break;
2377 case 48:
2378 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002379 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002380 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002381 Opc = AArch64::ST1Threev2d;
2382 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002383 }
2384 break;
2385 case 64:
2386 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002387 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002388 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002389 Opc = AArch64::ST1Fourv2d;
2390 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002391 }
2392 break;
2393 }
2394 assert(Opc && "Unknown register class");
2395
James Molloyf8aa57a2015-04-16 11:37:40 +00002396 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
Tim Northover3b0846e2014-05-24 12:50:23 +00002397 .addReg(SrcReg, getKillRegState(isKill))
2398 .addFrameIndex(FI);
2399
2400 if (Offset)
2401 MI.addImm(0);
2402 MI.addMemOperand(MMO);
2403}
2404
2405void AArch64InstrInfo::loadRegFromStackSlot(
2406 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
2407 int FI, const TargetRegisterClass *RC,
2408 const TargetRegisterInfo *TRI) const {
2409 DebugLoc DL;
2410 if (MBBI != MBB.end())
2411 DL = MBBI->getDebugLoc();
2412 MachineFunction &MF = *MBB.getParent();
Matthias Braun941a7052016-07-28 18:40:00 +00002413 MachineFrameInfo &MFI = MF.getFrameInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00002414 unsigned Align = MFI.getObjectAlignment(FI);
Alex Lorenze40c8a22015-08-11 23:09:45 +00002415 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
Tim Northover3b0846e2014-05-24 12:50:23 +00002416 MachineMemOperand *MMO = MF.getMachineMemOperand(
2417 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
2418
2419 unsigned Opc = 0;
2420 bool Offset = true;
2421 switch (RC->getSize()) {
2422 case 1:
2423 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2424 Opc = AArch64::LDRBui;
2425 break;
2426 case 2:
2427 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2428 Opc = AArch64::LDRHui;
2429 break;
2430 case 4:
2431 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2432 Opc = AArch64::LDRWui;
2433 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2434 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
2435 else
2436 assert(DestReg != AArch64::WSP);
2437 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2438 Opc = AArch64::LDRSui;
2439 break;
2440 case 8:
2441 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2442 Opc = AArch64::LDRXui;
2443 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2444 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
2445 else
2446 assert(DestReg != AArch64::SP);
2447 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2448 Opc = AArch64::LDRDui;
2449 break;
2450 case 16:
2451 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2452 Opc = AArch64::LDRQui;
2453 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002454 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002455 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002456 Opc = AArch64::LD1Twov1d;
2457 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002458 }
2459 break;
2460 case 24:
2461 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002462 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002463 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002464 Opc = AArch64::LD1Threev1d;
2465 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002466 }
2467 break;
2468 case 32:
2469 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002470 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002471 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002472 Opc = AArch64::LD1Fourv1d;
2473 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002474 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002475 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002476 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002477 Opc = AArch64::LD1Twov2d;
2478 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002479 }
2480 break;
2481 case 48:
2482 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002483 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002484 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002485 Opc = AArch64::LD1Threev2d;
2486 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002487 }
2488 break;
2489 case 64:
2490 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002491 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002492 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002493 Opc = AArch64::LD1Fourv2d;
2494 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002495 }
2496 break;
2497 }
2498 assert(Opc && "Unknown register class");
2499
James Molloyf8aa57a2015-04-16 11:37:40 +00002500 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
Tim Northover3b0846e2014-05-24 12:50:23 +00002501 .addReg(DestReg, getDefRegState(true))
2502 .addFrameIndex(FI);
2503 if (Offset)
2504 MI.addImm(0);
2505 MI.addMemOperand(MMO);
2506}
2507
2508void llvm::emitFrameOffset(MachineBasicBlock &MBB,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002509 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
Tim Northover3b0846e2014-05-24 12:50:23 +00002510 unsigned DestReg, unsigned SrcReg, int Offset,
Eric Christopherbc76b972014-06-10 17:33:39 +00002511 const TargetInstrInfo *TII,
Tim Northover3b0846e2014-05-24 12:50:23 +00002512 MachineInstr::MIFlag Flag, bool SetNZCV) {
2513 if (DestReg == SrcReg && Offset == 0)
2514 return;
2515
Geoff Berrya5335642016-05-06 16:34:59 +00002516 assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
2517 "SP increment/decrement not 16-byte aligned");
2518
Tim Northover3b0846e2014-05-24 12:50:23 +00002519 bool isSub = Offset < 0;
2520 if (isSub)
2521 Offset = -Offset;
2522
2523 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2524 // scratch register. If DestReg is a virtual register, use it as the
2525 // scratch register; otherwise, create a new virtual register (to be
2526 // replaced by the scavenger at the end of PEI). That case can be optimized
2527 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2528 // register can be loaded with offset%8 and the add/sub can use an extending
2529 // instruction with LSL#3.
2530 // Currently the function handles any offsets but generates a poor sequence
2531 // of code.
2532 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2533
2534 unsigned Opc;
2535 if (SetNZCV)
2536 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2537 else
2538 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2539 const unsigned MaxEncoding = 0xfff;
2540 const unsigned ShiftSize = 12;
2541 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2542 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2543 unsigned ThisVal;
2544 if (((unsigned)Offset) > MaxEncodableValue) {
2545 ThisVal = MaxEncodableValue;
2546 } else {
2547 ThisVal = Offset & MaxEncodableValue;
2548 }
2549 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2550 "Encoding cannot handle value that big");
2551 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2552 .addReg(SrcReg)
2553 .addImm(ThisVal >> ShiftSize)
2554 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2555 .setMIFlag(Flag);
2556
2557 SrcReg = DestReg;
2558 Offset -= ThisVal;
2559 if (Offset == 0)
2560 return;
2561 }
2562 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2563 .addReg(SrcReg)
2564 .addImm(Offset)
2565 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2566 .setMIFlag(Flag);
2567}
2568
Keno Fischere70b31f2015-06-08 20:09:58 +00002569MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002570 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
Jonas Paulsson8e5b0c62016-05-10 08:09:37 +00002571 MachineBasicBlock::iterator InsertPt, int FrameIndex,
2572 LiveIntervals *LIS) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00002573 // This is a bit of a hack. Consider this instruction:
2574 //
2575 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2576 //
2577 // We explicitly chose GPR64all for the virtual register so such a copy might
2578 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2579 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2580 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2581 //
2582 // To prevent that, we are going to constrain the %vreg0 register class here.
2583 //
2584 // <rdar://problem/11522048>
2585 //
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002586 if (MI.isCopy()) {
2587 unsigned DstReg = MI.getOperand(0).getReg();
2588 unsigned SrcReg = MI.getOperand(1).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00002589 if (SrcReg == AArch64::SP &&
2590 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2591 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2592 return nullptr;
2593 }
2594 if (DstReg == AArch64::SP &&
2595 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2596 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2597 return nullptr;
2598 }
2599 }
2600
Geoff Berry7c078fc2016-11-29 18:28:32 +00002601 // Handle the case where a WZR/XZR copy is being spilled but the destination
2602 // register class doesn't contain WZR/XZR. For example:
2603 //
2604 // %vreg0<def> = COPY %XZR; GPR64common:%vreg0
2605 //
2606 // In this case we can still safely fold away the COPY and generate the
2607 // following spill code:
2608 //
2609 // STRXui %XZR, <fi#0>
2610 //
2611 if (MI.isFullCopy() && Ops.size() == 1 && Ops[0] == 0) {
2612 MachineBasicBlock &MBB = *MI.getParent();
2613 const MachineOperand &SrcMO = MI.getOperand(1);
2614 unsigned SrcReg = SrcMO.getReg();
2615 if (SrcReg == AArch64::WZR || SrcReg == AArch64::XZR) {
2616 const TargetRegisterInfo &TRI = getRegisterInfo();
2617 const TargetRegisterClass &RC = SrcReg == AArch64::WZR
2618 ? AArch64::GPR32RegClass
2619 : AArch64::GPR64RegClass;
2620 storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
2621 &RC, &TRI);
2622 return &*--InsertPt;
2623 }
2624 }
2625
Tim Northover3b0846e2014-05-24 12:50:23 +00002626 // Cannot fold.
2627 return nullptr;
2628}
2629
2630int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2631 bool *OutUseUnscaledOp,
2632 unsigned *OutUnscaledOp,
2633 int *EmittableOffset) {
2634 int Scale = 1;
2635 bool IsSigned = false;
2636 // The ImmIdx should be changed case by case if it is not 2.
2637 unsigned ImmIdx = 2;
2638 unsigned UnscaledOp = 0;
2639 // Set output values in case of early exit.
2640 if (EmittableOffset)
2641 *EmittableOffset = 0;
2642 if (OutUseUnscaledOp)
2643 *OutUseUnscaledOp = false;
2644 if (OutUnscaledOp)
2645 *OutUnscaledOp = 0;
2646 switch (MI.getOpcode()) {
2647 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002648 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
Tim Northover3b0846e2014-05-24 12:50:23 +00002649 // Vector spills/fills can't take an immediate offset.
2650 case AArch64::LD1Twov2d:
2651 case AArch64::LD1Threev2d:
2652 case AArch64::LD1Fourv2d:
2653 case AArch64::LD1Twov1d:
2654 case AArch64::LD1Threev1d:
2655 case AArch64::LD1Fourv1d:
2656 case AArch64::ST1Twov2d:
2657 case AArch64::ST1Threev2d:
2658 case AArch64::ST1Fourv2d:
2659 case AArch64::ST1Twov1d:
2660 case AArch64::ST1Threev1d:
2661 case AArch64::ST1Fourv1d:
2662 return AArch64FrameOffsetCannotUpdate;
2663 case AArch64::PRFMui:
2664 Scale = 8;
2665 UnscaledOp = AArch64::PRFUMi;
2666 break;
2667 case AArch64::LDRXui:
2668 Scale = 8;
2669 UnscaledOp = AArch64::LDURXi;
2670 break;
2671 case AArch64::LDRWui:
2672 Scale = 4;
2673 UnscaledOp = AArch64::LDURWi;
2674 break;
2675 case AArch64::LDRBui:
2676 Scale = 1;
2677 UnscaledOp = AArch64::LDURBi;
2678 break;
2679 case AArch64::LDRHui:
2680 Scale = 2;
2681 UnscaledOp = AArch64::LDURHi;
2682 break;
2683 case AArch64::LDRSui:
2684 Scale = 4;
2685 UnscaledOp = AArch64::LDURSi;
2686 break;
2687 case AArch64::LDRDui:
2688 Scale = 8;
2689 UnscaledOp = AArch64::LDURDi;
2690 break;
2691 case AArch64::LDRQui:
2692 Scale = 16;
2693 UnscaledOp = AArch64::LDURQi;
2694 break;
2695 case AArch64::LDRBBui:
2696 Scale = 1;
2697 UnscaledOp = AArch64::LDURBBi;
2698 break;
2699 case AArch64::LDRHHui:
2700 Scale = 2;
2701 UnscaledOp = AArch64::LDURHHi;
2702 break;
2703 case AArch64::LDRSBXui:
2704 Scale = 1;
2705 UnscaledOp = AArch64::LDURSBXi;
2706 break;
2707 case AArch64::LDRSBWui:
2708 Scale = 1;
2709 UnscaledOp = AArch64::LDURSBWi;
2710 break;
2711 case AArch64::LDRSHXui:
2712 Scale = 2;
2713 UnscaledOp = AArch64::LDURSHXi;
2714 break;
2715 case AArch64::LDRSHWui:
2716 Scale = 2;
2717 UnscaledOp = AArch64::LDURSHWi;
2718 break;
2719 case AArch64::LDRSWui:
2720 Scale = 4;
2721 UnscaledOp = AArch64::LDURSWi;
2722 break;
2723
2724 case AArch64::STRXui:
2725 Scale = 8;
2726 UnscaledOp = AArch64::STURXi;
2727 break;
2728 case AArch64::STRWui:
2729 Scale = 4;
2730 UnscaledOp = AArch64::STURWi;
2731 break;
2732 case AArch64::STRBui:
2733 Scale = 1;
2734 UnscaledOp = AArch64::STURBi;
2735 break;
2736 case AArch64::STRHui:
2737 Scale = 2;
2738 UnscaledOp = AArch64::STURHi;
2739 break;
2740 case AArch64::STRSui:
2741 Scale = 4;
2742 UnscaledOp = AArch64::STURSi;
2743 break;
2744 case AArch64::STRDui:
2745 Scale = 8;
2746 UnscaledOp = AArch64::STURDi;
2747 break;
2748 case AArch64::STRQui:
2749 Scale = 16;
2750 UnscaledOp = AArch64::STURQi;
2751 break;
2752 case AArch64::STRBBui:
2753 Scale = 1;
2754 UnscaledOp = AArch64::STURBBi;
2755 break;
2756 case AArch64::STRHHui:
2757 Scale = 2;
2758 UnscaledOp = AArch64::STURHHi;
2759 break;
2760
2761 case AArch64::LDPXi:
2762 case AArch64::LDPDi:
2763 case AArch64::STPXi:
2764 case AArch64::STPDi:
Ahmed Bougacha05541452015-09-10 01:54:43 +00002765 case AArch64::LDNPXi:
2766 case AArch64::LDNPDi:
2767 case AArch64::STNPXi:
2768 case AArch64::STNPDi:
2769 ImmIdx = 3;
Tim Northover3b0846e2014-05-24 12:50:23 +00002770 IsSigned = true;
2771 Scale = 8;
2772 break;
2773 case AArch64::LDPQi:
2774 case AArch64::STPQi:
Ahmed Bougacha05541452015-09-10 01:54:43 +00002775 case AArch64::LDNPQi:
2776 case AArch64::STNPQi:
2777 ImmIdx = 3;
Tim Northover3b0846e2014-05-24 12:50:23 +00002778 IsSigned = true;
2779 Scale = 16;
2780 break;
2781 case AArch64::LDPWi:
2782 case AArch64::LDPSi:
2783 case AArch64::STPWi:
2784 case AArch64::STPSi:
Ahmed Bougacha05541452015-09-10 01:54:43 +00002785 case AArch64::LDNPWi:
2786 case AArch64::LDNPSi:
2787 case AArch64::STNPWi:
2788 case AArch64::STNPSi:
2789 ImmIdx = 3;
Tim Northover3b0846e2014-05-24 12:50:23 +00002790 IsSigned = true;
2791 Scale = 4;
2792 break;
2793
2794 case AArch64::LDURXi:
2795 case AArch64::LDURWi:
2796 case AArch64::LDURBi:
2797 case AArch64::LDURHi:
2798 case AArch64::LDURSi:
2799 case AArch64::LDURDi:
2800 case AArch64::LDURQi:
2801 case AArch64::LDURHHi:
2802 case AArch64::LDURBBi:
2803 case AArch64::LDURSBXi:
2804 case AArch64::LDURSBWi:
2805 case AArch64::LDURSHXi:
2806 case AArch64::LDURSHWi:
2807 case AArch64::LDURSWi:
2808 case AArch64::STURXi:
2809 case AArch64::STURWi:
2810 case AArch64::STURBi:
2811 case AArch64::STURHi:
2812 case AArch64::STURSi:
2813 case AArch64::STURDi:
2814 case AArch64::STURQi:
2815 case AArch64::STURBBi:
2816 case AArch64::STURHHi:
2817 Scale = 1;
2818 break;
2819 }
2820
2821 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2822
2823 bool useUnscaledOp = false;
2824 // If the offset doesn't match the scale, we rewrite the instruction to
2825 // use the unscaled instruction instead. Likewise, if we have a negative
2826 // offset (and have an unscaled op to use).
2827 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2828 useUnscaledOp = true;
2829
2830 // Use an unscaled addressing mode if the instruction has a negative offset
2831 // (or if the instruction is already using an unscaled addressing mode).
2832 unsigned MaskBits;
2833 if (IsSigned) {
2834 // ldp/stp instructions.
2835 MaskBits = 7;
2836 Offset /= Scale;
2837 } else if (UnscaledOp == 0 || useUnscaledOp) {
2838 MaskBits = 9;
2839 IsSigned = true;
2840 Scale = 1;
2841 } else {
2842 MaskBits = 12;
2843 IsSigned = false;
2844 Offset /= Scale;
2845 }
2846
2847 // Attempt to fold address computation.
2848 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2849 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2850 if (Offset >= MinOff && Offset <= MaxOff) {
2851 if (EmittableOffset)
2852 *EmittableOffset = Offset;
2853 Offset = 0;
2854 } else {
2855 int NewOff = Offset < 0 ? MinOff : MaxOff;
2856 if (EmittableOffset)
2857 *EmittableOffset = NewOff;
2858 Offset = (Offset - NewOff) * Scale;
2859 }
2860 if (OutUseUnscaledOp)
2861 *OutUseUnscaledOp = useUnscaledOp;
2862 if (OutUnscaledOp)
2863 *OutUnscaledOp = UnscaledOp;
2864 return AArch64FrameOffsetCanUpdate |
2865 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2866}
2867
2868bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2869 unsigned FrameReg, int &Offset,
2870 const AArch64InstrInfo *TII) {
2871 unsigned Opcode = MI.getOpcode();
2872 unsigned ImmIdx = FrameRegIdx + 1;
2873
2874 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2875 Offset += MI.getOperand(ImmIdx).getImm();
2876 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2877 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2878 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2879 MI.eraseFromParent();
2880 Offset = 0;
2881 return true;
2882 }
2883
2884 int NewOffset;
2885 unsigned UnscaledOp;
2886 bool UseUnscaledOp;
2887 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2888 &UnscaledOp, &NewOffset);
2889 if (Status & AArch64FrameOffsetCanUpdate) {
2890 if (Status & AArch64FrameOffsetIsLegal)
2891 // Replace the FrameIndex with FrameReg.
2892 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2893 if (UseUnscaledOp)
2894 MI.setDesc(TII->get(UnscaledOp));
2895
2896 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2897 return Offset == 0;
2898 }
2899
2900 return false;
2901}
2902
2903void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2904 NopInst.setOpcode(AArch64::HINT);
Jim Grosbache9119e42015-05-13 18:37:00 +00002905 NopInst.addOperand(MCOperand::createImm(0));
Tim Northover3b0846e2014-05-24 12:50:23 +00002906}
Chad Rosier9d1a5562016-05-02 14:56:21 +00002907
2908// AArch64 supports MachineCombiner.
Benjamin Kramer8c90fd72014-09-03 11:41:21 +00002909bool AArch64InstrInfo::useMachineCombiner() const {
Chad Rosier9d1a5562016-05-02 14:56:21 +00002910
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002911 return true;
2912}
2913//
2914// True when Opc sets flag
2915static bool isCombineInstrSettingFlag(unsigned Opc) {
2916 switch (Opc) {
2917 case AArch64::ADDSWrr:
2918 case AArch64::ADDSWri:
2919 case AArch64::ADDSXrr:
2920 case AArch64::ADDSXri:
2921 case AArch64::SUBSWrr:
2922 case AArch64::SUBSXrr:
2923 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2924 case AArch64::SUBSWri:
2925 case AArch64::SUBSXri:
2926 return true;
2927 default:
2928 break;
2929 }
2930 return false;
2931}
2932//
2933// 32b Opcodes that can be combined with a MUL
2934static bool isCombineInstrCandidate32(unsigned Opc) {
2935 switch (Opc) {
2936 case AArch64::ADDWrr:
2937 case AArch64::ADDWri:
2938 case AArch64::SUBWrr:
2939 case AArch64::ADDSWrr:
2940 case AArch64::ADDSWri:
2941 case AArch64::SUBSWrr:
2942 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2943 case AArch64::SUBWri:
2944 case AArch64::SUBSWri:
2945 return true;
2946 default:
2947 break;
2948 }
2949 return false;
2950}
2951//
2952// 64b Opcodes that can be combined with a MUL
2953static bool isCombineInstrCandidate64(unsigned Opc) {
2954 switch (Opc) {
2955 case AArch64::ADDXrr:
2956 case AArch64::ADDXri:
2957 case AArch64::SUBXrr:
2958 case AArch64::ADDSXrr:
2959 case AArch64::ADDSXri:
2960 case AArch64::SUBSXrr:
2961 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2962 case AArch64::SUBXri:
2963 case AArch64::SUBSXri:
2964 return true;
2965 default:
2966 break;
2967 }
2968 return false;
2969}
2970//
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00002971// FP Opcodes that can be combined with a FMUL
2972static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
2973 switch (Inst.getOpcode()) {
Evandro Menezes19b2aed2016-09-15 19:55:23 +00002974 default:
2975 break;
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00002976 case AArch64::FADDSrr:
2977 case AArch64::FADDDrr:
2978 case AArch64::FADDv2f32:
2979 case AArch64::FADDv2f64:
2980 case AArch64::FADDv4f32:
2981 case AArch64::FSUBSrr:
2982 case AArch64::FSUBDrr:
2983 case AArch64::FSUBv2f32:
2984 case AArch64::FSUBv2f64:
2985 case AArch64::FSUBv4f32:
Evandro Menezes19b2aed2016-09-15 19:55:23 +00002986 TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
2987 return (Options.UnsafeFPMath ||
2988 Options.AllowFPOpFusion == FPOpFusion::Fast);
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00002989 }
2990 return false;
2991}
2992//
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002993// Opcodes that can be combined with a MUL
2994static bool isCombineInstrCandidate(unsigned Opc) {
2995 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2996}
2997
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00002998//
2999// Utility routine that checks if \param MO is defined by an
3000// \param CombineOpc instruction in the basic block \param MBB
3001static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
3002 unsigned CombineOpc, unsigned ZeroReg = 0,
3003 bool CheckZeroReg = false) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003004 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3005 MachineInstr *MI = nullptr;
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003006
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003007 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
3008 MI = MRI.getUniqueVRegDef(MO.getReg());
3009 // And it needs to be in the trace (otherwise, it won't have a depth).
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003010 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003011 return false;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003012 // Must only used by the user we combine with.
Gerolf Hoflehnerfe2c11f2014-08-13 22:07:36 +00003013 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003014 return false;
3015
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003016 if (CheckZeroReg) {
3017 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
3018 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
3019 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
3020 // The third input reg must be zero.
3021 if (MI->getOperand(3).getReg() != ZeroReg)
3022 return false;
3023 }
3024
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003025 return true;
3026}
3027
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003028//
3029// Is \param MO defined by an integer multiply and can be combined?
3030static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3031 unsigned MulOpc, unsigned ZeroReg) {
3032 return canCombine(MBB, MO, MulOpc, ZeroReg, true);
3033}
3034
3035//
3036// Is \param MO defined by a floating-point multiply and can be combined?
3037static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3038 unsigned MulOpc) {
3039 return canCombine(MBB, MO, MulOpc);
3040}
3041
Haicheng Wu08b94622016-01-07 04:01:02 +00003042// TODO: There are many more machine instruction opcodes to match:
3043// 1. Other data types (integer, vectors)
3044// 2. Other math / logic operations (xor, or)
3045// 3. Other forms of the same operation (intrinsics and other variants)
3046bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
3047 switch (Inst.getOpcode()) {
3048 case AArch64::FADDDrr:
3049 case AArch64::FADDSrr:
3050 case AArch64::FADDv2f32:
3051 case AArch64::FADDv2f64:
3052 case AArch64::FADDv4f32:
3053 case AArch64::FMULDrr:
3054 case AArch64::FMULSrr:
3055 case AArch64::FMULX32:
3056 case AArch64::FMULX64:
3057 case AArch64::FMULXv2f32:
3058 case AArch64::FMULXv2f64:
3059 case AArch64::FMULXv4f32:
3060 case AArch64::FMULv2f32:
3061 case AArch64::FMULv2f64:
3062 case AArch64::FMULv4f32:
3063 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
3064 default:
3065 return false;
3066 }
3067}
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003068
Haicheng Wu08b94622016-01-07 04:01:02 +00003069/// Find instructions that can be turned into madd.
3070static bool getMaddPatterns(MachineInstr &Root,
3071 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003072 unsigned Opc = Root.getOpcode();
3073 MachineBasicBlock &MBB = *Root.getParent();
3074 bool Found = false;
3075
3076 if (!isCombineInstrCandidate(Opc))
Chad Rosier85c85942016-03-23 20:07:28 +00003077 return false;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003078 if (isCombineInstrSettingFlag(Opc)) {
3079 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
3080 // When NZCV is live bail out.
3081 if (Cmp_NZCV == -1)
Chad Rosier85c85942016-03-23 20:07:28 +00003082 return false;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003083 unsigned NewOpc = convertFlagSettingOpcode(Root);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003084 // When opcode can't change bail out.
3085 // CHECKME: do we miss any cases for opcode conversion?
3086 if (NewOpc == Opc)
Chad Rosier85c85942016-03-23 20:07:28 +00003087 return false;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003088 Opc = NewOpc;
3089 }
3090
3091 switch (Opc) {
3092 default:
3093 break;
3094 case AArch64::ADDWrr:
3095 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3096 "ADDWrr does not have register operands");
3097 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3098 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003099 Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003100 Found = true;
3101 }
3102 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3103 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003104 Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003105 Found = true;
3106 }
3107 break;
3108 case AArch64::ADDXrr:
3109 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3110 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003111 Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003112 Found = true;
3113 }
3114 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3115 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003116 Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003117 Found = true;
3118 }
3119 break;
3120 case AArch64::SUBWrr:
3121 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3122 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003123 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003124 Found = true;
3125 }
3126 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3127 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003128 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003129 Found = true;
3130 }
3131 break;
3132 case AArch64::SUBXrr:
3133 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3134 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003135 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003136 Found = true;
3137 }
3138 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3139 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003140 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003141 Found = true;
3142 }
3143 break;
3144 case AArch64::ADDWri:
3145 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3146 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003147 Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003148 Found = true;
3149 }
3150 break;
3151 case AArch64::ADDXri:
3152 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3153 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003154 Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003155 Found = true;
3156 }
3157 break;
3158 case AArch64::SUBWri:
3159 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3160 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003161 Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003162 Found = true;
3163 }
3164 break;
3165 case AArch64::SUBXri:
3166 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3167 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003168 Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003169 Found = true;
3170 }
3171 break;
3172 }
3173 return Found;
3174}
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003175/// Floating-Point Support
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003176
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003177/// Find instructions that can be turned into madd.
3178static bool getFMAPatterns(MachineInstr &Root,
3179 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3180
3181 if (!isCombineInstrCandidateFP(Root))
3182 return 0;
3183
3184 MachineBasicBlock &MBB = *Root.getParent();
3185 bool Found = false;
3186
3187 switch (Root.getOpcode()) {
3188 default:
3189 assert(false && "Unsupported FP instruction in combiner\n");
3190 break;
3191 case AArch64::FADDSrr:
3192 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3193 "FADDWrr does not have register operands");
3194 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3195 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP1);
3196 Found = true;
3197 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3198 AArch64::FMULv1i32_indexed)) {
3199 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP1);
3200 Found = true;
3201 }
3202 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3203 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP2);
3204 Found = true;
3205 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3206 AArch64::FMULv1i32_indexed)) {
3207 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP2);
3208 Found = true;
3209 }
3210 break;
3211 case AArch64::FADDDrr:
3212 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3213 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP1);
3214 Found = true;
3215 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3216 AArch64::FMULv1i64_indexed)) {
3217 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP1);
3218 Found = true;
3219 }
3220 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3221 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP2);
3222 Found = true;
3223 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3224 AArch64::FMULv1i64_indexed)) {
3225 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP2);
3226 Found = true;
3227 }
3228 break;
3229 case AArch64::FADDv2f32:
3230 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3231 AArch64::FMULv2i32_indexed)) {
3232 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP1);
3233 Found = true;
3234 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3235 AArch64::FMULv2f32)) {
3236 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP1);
3237 Found = true;
3238 }
3239 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3240 AArch64::FMULv2i32_indexed)) {
3241 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP2);
3242 Found = true;
3243 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3244 AArch64::FMULv2f32)) {
3245 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP2);
3246 Found = true;
3247 }
3248 break;
3249 case AArch64::FADDv2f64:
3250 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3251 AArch64::FMULv2i64_indexed)) {
3252 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP1);
3253 Found = true;
3254 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3255 AArch64::FMULv2f64)) {
3256 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP1);
3257 Found = true;
3258 }
3259 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3260 AArch64::FMULv2i64_indexed)) {
3261 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP2);
3262 Found = true;
3263 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3264 AArch64::FMULv2f64)) {
3265 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP2);
3266 Found = true;
3267 }
3268 break;
3269 case AArch64::FADDv4f32:
3270 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3271 AArch64::FMULv4i32_indexed)) {
3272 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP1);
3273 Found = true;
3274 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3275 AArch64::FMULv4f32)) {
3276 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP1);
3277 Found = true;
3278 }
3279 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3280 AArch64::FMULv4i32_indexed)) {
3281 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP2);
3282 Found = true;
3283 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3284 AArch64::FMULv4f32)) {
3285 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP2);
3286 Found = true;
3287 }
3288 break;
3289
3290 case AArch64::FSUBSrr:
3291 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3292 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP1);
3293 Found = true;
3294 }
3295 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3296 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP2);
3297 Found = true;
3298 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3299 AArch64::FMULv1i32_indexed)) {
3300 Patterns.push_back(MachineCombinerPattern::FMLSv1i32_indexed_OP2);
3301 Found = true;
3302 }
3303 break;
3304 case AArch64::FSUBDrr:
3305 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3306 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP1);
3307 Found = true;
3308 }
3309 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3310 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP2);
3311 Found = true;
3312 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3313 AArch64::FMULv1i64_indexed)) {
3314 Patterns.push_back(MachineCombinerPattern::FMLSv1i64_indexed_OP2);
3315 Found = true;
3316 }
3317 break;
3318 case AArch64::FSUBv2f32:
3319 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3320 AArch64::FMULv2i32_indexed)) {
3321 Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP2);
3322 Found = true;
3323 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3324 AArch64::FMULv2f32)) {
3325 Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP2);
3326 Found = true;
3327 }
3328 break;
3329 case AArch64::FSUBv2f64:
3330 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3331 AArch64::FMULv2i64_indexed)) {
3332 Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP2);
3333 Found = true;
3334 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3335 AArch64::FMULv2f64)) {
3336 Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP2);
3337 Found = true;
3338 }
3339 break;
3340 case AArch64::FSUBv4f32:
3341 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3342 AArch64::FMULv4i32_indexed)) {
3343 Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP2);
3344 Found = true;
3345 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3346 AArch64::FMULv4f32)) {
3347 Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP2);
3348 Found = true;
3349 }
3350 break;
3351 }
3352 return Found;
3353}
3354
3355/// Return true when a code sequence can improve throughput. It
3356/// should be called only for instructions in loops.
3357/// \param Pattern - combiner pattern
3358bool
3359AArch64InstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
3360 switch (Pattern) {
3361 default:
3362 break;
3363 case MachineCombinerPattern::FMULADDS_OP1:
3364 case MachineCombinerPattern::FMULADDS_OP2:
3365 case MachineCombinerPattern::FMULSUBS_OP1:
3366 case MachineCombinerPattern::FMULSUBS_OP2:
3367 case MachineCombinerPattern::FMULADDD_OP1:
3368 case MachineCombinerPattern::FMULADDD_OP2:
3369 case MachineCombinerPattern::FMULSUBD_OP1:
3370 case MachineCombinerPattern::FMULSUBD_OP2:
3371 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3372 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
3373 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
3374 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
3375 case MachineCombinerPattern::FMLAv2f32_OP2:
3376 case MachineCombinerPattern::FMLAv2f32_OP1:
3377 case MachineCombinerPattern::FMLAv2f64_OP1:
3378 case MachineCombinerPattern::FMLAv2f64_OP2:
3379 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
3380 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
3381 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
3382 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
3383 case MachineCombinerPattern::FMLAv4f32_OP1:
3384 case MachineCombinerPattern::FMLAv4f32_OP2:
3385 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
3386 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
3387 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
3388 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
3389 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
3390 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
3391 case MachineCombinerPattern::FMLSv2f32_OP2:
3392 case MachineCombinerPattern::FMLSv2f64_OP2:
3393 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
3394 case MachineCombinerPattern::FMLSv4f32_OP2:
3395 return true;
3396 } // end switch (Pattern)
3397 return false;
3398}
Haicheng Wu08b94622016-01-07 04:01:02 +00003399/// Return true when there is potentially a faster code sequence for an
3400/// instruction chain ending in \p Root. All potential patterns are listed in
3401/// the \p Pattern vector. Pattern should be sorted in priority order since the
3402/// pattern evaluator stops checking as soon as it finds a faster sequence.
3403
3404bool AArch64InstrInfo::getMachineCombinerPatterns(
3405 MachineInstr &Root,
3406 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003407 // Integer patterns
Haicheng Wu08b94622016-01-07 04:01:02 +00003408 if (getMaddPatterns(Root, Patterns))
3409 return true;
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003410 // Floating point patterns
3411 if (getFMAPatterns(Root, Patterns))
3412 return true;
Haicheng Wu08b94622016-01-07 04:01:02 +00003413
3414 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
3415}
3416
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003417enum class FMAInstKind { Default, Indexed, Accumulator };
3418/// genFusedMultiply - Generate fused multiply instructions.
3419/// This function supports both integer and floating point instructions.
3420/// A typical example:
3421/// F|MUL I=A,B,0
3422/// F|ADD R,I,C
3423/// ==> F|MADD R,A,B,C
3424/// \param Root is the F|ADD instruction
NAKAMURA Takumi40da2672014-08-08 02:04:18 +00003425/// \param [out] InsInstrs is a vector of machine instructions and will
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003426/// contain the generated madd instruction
3427/// \param IdxMulOpd is index of operand in Root that is the result of
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003428/// the F|MUL. In the example above IdxMulOpd is 1.
3429/// \param MaddOpc the opcode fo the f|madd instruction
3430static MachineInstr *
3431genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
3432 const TargetInstrInfo *TII, MachineInstr &Root,
3433 SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
3434 unsigned MaddOpc, const TargetRegisterClass *RC,
3435 FMAInstKind kind = FMAInstKind::Default) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003436 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3437
3438 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
3439 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003440 unsigned ResultReg = Root.getOperand(0).getReg();
3441 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3442 bool Src0IsKill = MUL->getOperand(1).isKill();
3443 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3444 bool Src1IsKill = MUL->getOperand(2).isKill();
3445 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
3446 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
3447
3448 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3449 MRI.constrainRegClass(ResultReg, RC);
3450 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3451 MRI.constrainRegClass(SrcReg0, RC);
3452 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3453 MRI.constrainRegClass(SrcReg1, RC);
3454 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
3455 MRI.constrainRegClass(SrcReg2, RC);
3456
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003457 MachineInstrBuilder MIB;
3458 if (kind == FMAInstKind::Default)
3459 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3460 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3461 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3462 .addReg(SrcReg2, getKillRegState(Src2IsKill));
3463 else if (kind == FMAInstKind::Indexed)
3464 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3465 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3466 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3467 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3468 .addImm(MUL->getOperand(3).getImm());
3469 else if (kind == FMAInstKind::Accumulator)
3470 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3471 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3472 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3473 .addReg(SrcReg1, getKillRegState(Src1IsKill));
3474 else
3475 assert(false && "Invalid FMA instruction kind \n");
3476 // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003477 InsInstrs.push_back(MIB);
3478 return MUL;
3479}
3480
3481/// genMaddR - Generate madd instruction and combine mul and add using
3482/// an extra virtual register
3483/// Example - an ADD intermediate needs to be stored in a register:
3484/// MUL I=A,B,0
3485/// ADD R,I,Imm
3486/// ==> ORR V, ZR, Imm
3487/// ==> MADD R,A,B,V
3488/// \param Root is the ADD instruction
NAKAMURA Takumi40da2672014-08-08 02:04:18 +00003489/// \param [out] InsInstrs is a vector of machine instructions and will
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003490/// contain the generated madd instruction
3491/// \param IdxMulOpd is index of operand in Root that is the result of
3492/// the MUL. In the example above IdxMulOpd is 1.
3493/// \param MaddOpc the opcode fo the madd instruction
3494/// \param VR is a virtual register that holds the value of an ADD operand
3495/// (V in the example above).
3496static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
3497 const TargetInstrInfo *TII, MachineInstr &Root,
3498 SmallVectorImpl<MachineInstr *> &InsInstrs,
3499 unsigned IdxMulOpd, unsigned MaddOpc,
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003500 unsigned VR, const TargetRegisterClass *RC) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003501 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3502
3503 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003504 unsigned ResultReg = Root.getOperand(0).getReg();
3505 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3506 bool Src0IsKill = MUL->getOperand(1).isKill();
3507 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3508 bool Src1IsKill = MUL->getOperand(2).isKill();
3509
3510 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3511 MRI.constrainRegClass(ResultReg, RC);
3512 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3513 MRI.constrainRegClass(SrcReg0, RC);
3514 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3515 MRI.constrainRegClass(SrcReg1, RC);
3516 if (TargetRegisterInfo::isVirtualRegister(VR))
3517 MRI.constrainRegClass(VR, RC);
3518
3519 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
3520 ResultReg)
3521 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3522 .addReg(SrcReg1, getKillRegState(Src1IsKill))
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003523 .addReg(VR);
3524 // Insert the MADD
3525 InsInstrs.push_back(MIB);
3526 return MUL;
3527}
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003528
Sanjay Patelcfe03932015-06-19 23:21:42 +00003529/// When getMachineCombinerPatterns() finds potential patterns,
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003530/// this function generates the instructions that could replace the
3531/// original code sequence
3532void AArch64InstrInfo::genAlternativeCodeSequence(
Sanjay Patel387e66e2015-11-05 19:34:57 +00003533 MachineInstr &Root, MachineCombinerPattern Pattern,
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003534 SmallVectorImpl<MachineInstr *> &InsInstrs,
3535 SmallVectorImpl<MachineInstr *> &DelInstrs,
3536 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
3537 MachineBasicBlock &MBB = *Root.getParent();
3538 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3539 MachineFunction &MF = *MBB.getParent();
Eric Christophere0818912014-09-03 20:36:26 +00003540 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003541
3542 MachineInstr *MUL;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003543 const TargetRegisterClass *RC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003544 unsigned Opc;
3545 switch (Pattern) {
3546 default:
Haicheng Wu08b94622016-01-07 04:01:02 +00003547 // Reassociate instructions.
3548 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
3549 DelInstrs, InstrIdxForVirtReg);
3550 return;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003551 case MachineCombinerPattern::MULADDW_OP1:
3552 case MachineCombinerPattern::MULADDX_OP1:
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003553 // MUL I=A,B,0
3554 // ADD R,I,C
3555 // ==> MADD R,A,B,C
3556 // --- Create(MADD);
Sanjay Patel387e66e2015-11-05 19:34:57 +00003557 if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003558 Opc = AArch64::MADDWrrr;
3559 RC = &AArch64::GPR32RegClass;
3560 } else {
3561 Opc = AArch64::MADDXrrr;
3562 RC = &AArch64::GPR64RegClass;
3563 }
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003564 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003565 break;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003566 case MachineCombinerPattern::MULADDW_OP2:
3567 case MachineCombinerPattern::MULADDX_OP2:
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003568 // MUL I=A,B,0
3569 // ADD R,C,I
3570 // ==> MADD R,A,B,C
3571 // --- Create(MADD);
Sanjay Patel387e66e2015-11-05 19:34:57 +00003572 if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003573 Opc = AArch64::MADDWrrr;
3574 RC = &AArch64::GPR32RegClass;
3575 } else {
3576 Opc = AArch64::MADDXrrr;
3577 RC = &AArch64::GPR64RegClass;
3578 }
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003579 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003580 break;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003581 case MachineCombinerPattern::MULADDWI_OP1:
3582 case MachineCombinerPattern::MULADDXI_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003583 // MUL I=A,B,0
3584 // ADD R,I,Imm
3585 // ==> ORR V, ZR, Imm
3586 // ==> MADD R,A,B,V
3587 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003588 const TargetRegisterClass *OrrRC;
3589 unsigned BitSize, OrrOpc, ZeroReg;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003590 if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003591 OrrOpc = AArch64::ORRWri;
3592 OrrRC = &AArch64::GPR32spRegClass;
3593 BitSize = 32;
3594 ZeroReg = AArch64::WZR;
3595 Opc = AArch64::MADDWrrr;
3596 RC = &AArch64::GPR32RegClass;
3597 } else {
3598 OrrOpc = AArch64::ORRXri;
3599 OrrRC = &AArch64::GPR64spRegClass;
3600 BitSize = 64;
3601 ZeroReg = AArch64::XZR;
3602 Opc = AArch64::MADDXrrr;
3603 RC = &AArch64::GPR64RegClass;
3604 }
3605 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
3606 uint64_t Imm = Root.getOperand(2).getImm();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003607
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003608 if (Root.getOperand(3).isImm()) {
3609 unsigned Val = Root.getOperand(3).getImm();
3610 Imm = Imm << Val;
3611 }
David Majnemer1182dd82016-07-21 23:46:56 +00003612 uint64_t UImm = SignExtend64(Imm, BitSize);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003613 uint64_t Encoding;
3614 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3615 MachineInstrBuilder MIB1 =
3616 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
3617 .addReg(ZeroReg)
3618 .addImm(Encoding);
3619 InsInstrs.push_back(MIB1);
3620 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3621 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003622 }
3623 break;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003624 }
Sanjay Patel387e66e2015-11-05 19:34:57 +00003625 case MachineCombinerPattern::MULSUBW_OP1:
3626 case MachineCombinerPattern::MULSUBX_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003627 // MUL I=A,B,0
3628 // SUB R,I, C
3629 // ==> SUB V, 0, C
3630 // ==> MADD R,A,B,V // = -C + A*B
3631 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003632 const TargetRegisterClass *SubRC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003633 unsigned SubOpc, ZeroReg;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003634 if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003635 SubOpc = AArch64::SUBWrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003636 SubRC = &AArch64::GPR32spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003637 ZeroReg = AArch64::WZR;
3638 Opc = AArch64::MADDWrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003639 RC = &AArch64::GPR32RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003640 } else {
3641 SubOpc = AArch64::SUBXrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003642 SubRC = &AArch64::GPR64spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003643 ZeroReg = AArch64::XZR;
3644 Opc = AArch64::MADDXrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003645 RC = &AArch64::GPR64RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003646 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003647 unsigned NewVR = MRI.createVirtualRegister(SubRC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003648 // SUB NewVR, 0, C
3649 MachineInstrBuilder MIB1 =
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003650 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003651 .addReg(ZeroReg)
3652 .addOperand(Root.getOperand(2));
3653 InsInstrs.push_back(MIB1);
3654 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003655 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3656 break;
3657 }
Sanjay Patel387e66e2015-11-05 19:34:57 +00003658 case MachineCombinerPattern::MULSUBW_OP2:
3659 case MachineCombinerPattern::MULSUBX_OP2:
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003660 // MUL I=A,B,0
3661 // SUB R,C,I
3662 // ==> MSUB R,A,B,C (computes C - A*B)
3663 // --- Create(MSUB);
Sanjay Patel387e66e2015-11-05 19:34:57 +00003664 if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003665 Opc = AArch64::MSUBWrrr;
3666 RC = &AArch64::GPR32RegClass;
3667 } else {
3668 Opc = AArch64::MSUBXrrr;
3669 RC = &AArch64::GPR64RegClass;
3670 }
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003671 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003672 break;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003673 case MachineCombinerPattern::MULSUBWI_OP1:
3674 case MachineCombinerPattern::MULSUBXI_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003675 // MUL I=A,B,0
3676 // SUB R,I, Imm
3677 // ==> ORR V, ZR, -Imm
3678 // ==> MADD R,A,B,V // = -Imm + A*B
3679 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003680 const TargetRegisterClass *OrrRC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003681 unsigned BitSize, OrrOpc, ZeroReg;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003682 if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
Juergen Ributzka25816b02014-08-30 06:16:26 +00003683 OrrOpc = AArch64::ORRWri;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003684 OrrRC = &AArch64::GPR32spRegClass;
3685 BitSize = 32;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003686 ZeroReg = AArch64::WZR;
3687 Opc = AArch64::MADDWrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003688 RC = &AArch64::GPR32RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003689 } else {
3690 OrrOpc = AArch64::ORRXri;
Juergen Ributzkaf9660f02014-11-04 22:20:07 +00003691 OrrRC = &AArch64::GPR64spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003692 BitSize = 64;
3693 ZeroReg = AArch64::XZR;
3694 Opc = AArch64::MADDXrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003695 RC = &AArch64::GPR64RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003696 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003697 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
David Majnemer1182dd82016-07-21 23:46:56 +00003698 uint64_t Imm = Root.getOperand(2).getImm();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003699 if (Root.getOperand(3).isImm()) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003700 unsigned Val = Root.getOperand(3).getImm();
3701 Imm = Imm << Val;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003702 }
David Majnemer1182dd82016-07-21 23:46:56 +00003703 uint64_t UImm = SignExtend64(-Imm, BitSize);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003704 uint64_t Encoding;
3705 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3706 MachineInstrBuilder MIB1 =
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003707 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003708 .addReg(ZeroReg)
3709 .addImm(Encoding);
3710 InsInstrs.push_back(MIB1);
3711 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003712 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003713 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003714 break;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003715 }
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003716 // Floating Point Support
3717 case MachineCombinerPattern::FMULADDS_OP1:
3718 case MachineCombinerPattern::FMULADDD_OP1:
3719 // MUL I=A,B,0
3720 // ADD R,I,C
3721 // ==> MADD R,A,B,C
3722 // --- Create(MADD);
3723 if (Pattern == MachineCombinerPattern::FMULADDS_OP1) {
3724 Opc = AArch64::FMADDSrrr;
3725 RC = &AArch64::FPR32RegClass;
3726 } else {
3727 Opc = AArch64::FMADDDrrr;
3728 RC = &AArch64::FPR64RegClass;
3729 }
3730 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3731 break;
3732 case MachineCombinerPattern::FMULADDS_OP2:
3733 case MachineCombinerPattern::FMULADDD_OP2:
3734 // FMUL I=A,B,0
3735 // FADD R,C,I
3736 // ==> FMADD R,A,B,C
3737 // --- Create(FMADD);
3738 if (Pattern == MachineCombinerPattern::FMULADDS_OP2) {
3739 Opc = AArch64::FMADDSrrr;
3740 RC = &AArch64::FPR32RegClass;
3741 } else {
3742 Opc = AArch64::FMADDDrrr;
3743 RC = &AArch64::FPR64RegClass;
3744 }
3745 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3746 break;
3747
3748 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3749 Opc = AArch64::FMLAv1i32_indexed;
3750 RC = &AArch64::FPR32RegClass;
3751 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3752 FMAInstKind::Indexed);
3753 break;
3754 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
3755 Opc = AArch64::FMLAv1i32_indexed;
3756 RC = &AArch64::FPR32RegClass;
3757 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3758 FMAInstKind::Indexed);
3759 break;
3760
3761 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
3762 Opc = AArch64::FMLAv1i64_indexed;
3763 RC = &AArch64::FPR64RegClass;
3764 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3765 FMAInstKind::Indexed);
3766 break;
3767 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
3768 Opc = AArch64::FMLAv1i64_indexed;
3769 RC = &AArch64::FPR64RegClass;
3770 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3771 FMAInstKind::Indexed);
3772 break;
3773
3774 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
3775 case MachineCombinerPattern::FMLAv2f32_OP1:
3776 RC = &AArch64::FPR64RegClass;
3777 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
3778 Opc = AArch64::FMLAv2i32_indexed;
3779 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3780 FMAInstKind::Indexed);
3781 } else {
3782 Opc = AArch64::FMLAv2f32;
3783 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3784 FMAInstKind::Accumulator);
3785 }
3786 break;
3787 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
3788 case MachineCombinerPattern::FMLAv2f32_OP2:
3789 RC = &AArch64::FPR64RegClass;
3790 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
3791 Opc = AArch64::FMLAv2i32_indexed;
3792 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3793 FMAInstKind::Indexed);
3794 } else {
3795 Opc = AArch64::FMLAv2f32;
3796 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3797 FMAInstKind::Accumulator);
3798 }
3799 break;
3800
3801 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
3802 case MachineCombinerPattern::FMLAv2f64_OP1:
3803 RC = &AArch64::FPR128RegClass;
3804 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
3805 Opc = AArch64::FMLAv2i64_indexed;
3806 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3807 FMAInstKind::Indexed);
3808 } else {
3809 Opc = AArch64::FMLAv2f64;
3810 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3811 FMAInstKind::Accumulator);
3812 }
3813 break;
3814 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
3815 case MachineCombinerPattern::FMLAv2f64_OP2:
3816 RC = &AArch64::FPR128RegClass;
3817 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
3818 Opc = AArch64::FMLAv2i64_indexed;
3819 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3820 FMAInstKind::Indexed);
3821 } else {
3822 Opc = AArch64::FMLAv2f64;
3823 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3824 FMAInstKind::Accumulator);
3825 }
3826 break;
3827
3828 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
3829 case MachineCombinerPattern::FMLAv4f32_OP1:
3830 RC = &AArch64::FPR128RegClass;
3831 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
3832 Opc = AArch64::FMLAv4i32_indexed;
3833 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3834 FMAInstKind::Indexed);
3835 } else {
3836 Opc = AArch64::FMLAv4f32;
3837 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3838 FMAInstKind::Accumulator);
3839 }
3840 break;
3841
3842 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
3843 case MachineCombinerPattern::FMLAv4f32_OP2:
3844 RC = &AArch64::FPR128RegClass;
3845 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
3846 Opc = AArch64::FMLAv4i32_indexed;
3847 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3848 FMAInstKind::Indexed);
3849 } else {
3850 Opc = AArch64::FMLAv4f32;
3851 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3852 FMAInstKind::Accumulator);
3853 }
3854 break;
3855
3856 case MachineCombinerPattern::FMULSUBS_OP1:
3857 case MachineCombinerPattern::FMULSUBD_OP1: {
3858 // FMUL I=A,B,0
3859 // FSUB R,I,C
3860 // ==> FNMSUB R,A,B,C // = -C + A*B
3861 // --- Create(FNMSUB);
3862 if (Pattern == MachineCombinerPattern::FMULSUBS_OP1) {
3863 Opc = AArch64::FNMSUBSrrr;
3864 RC = &AArch64::FPR32RegClass;
3865 } else {
3866 Opc = AArch64::FNMSUBDrrr;
3867 RC = &AArch64::FPR64RegClass;
3868 }
3869 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3870 break;
3871 }
3872 case MachineCombinerPattern::FMULSUBS_OP2:
3873 case MachineCombinerPattern::FMULSUBD_OP2: {
3874 // FMUL I=A,B,0
3875 // FSUB R,C,I
3876 // ==> FMSUB R,A,B,C (computes C - A*B)
3877 // --- Create(FMSUB);
3878 if (Pattern == MachineCombinerPattern::FMULSUBS_OP2) {
3879 Opc = AArch64::FMSUBSrrr;
3880 RC = &AArch64::FPR32RegClass;
3881 } else {
3882 Opc = AArch64::FMSUBDrrr;
3883 RC = &AArch64::FPR64RegClass;
3884 }
3885 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3886 break;
3887
3888 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
3889 Opc = AArch64::FMLSv1i32_indexed;
3890 RC = &AArch64::FPR32RegClass;
3891 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3892 FMAInstKind::Indexed);
3893 break;
3894
3895 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
3896 Opc = AArch64::FMLSv1i64_indexed;
3897 RC = &AArch64::FPR64RegClass;
3898 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3899 FMAInstKind::Indexed);
3900 break;
3901
3902 case MachineCombinerPattern::FMLSv2f32_OP2:
3903 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
3904 RC = &AArch64::FPR64RegClass;
3905 if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
3906 Opc = AArch64::FMLSv2i32_indexed;
3907 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3908 FMAInstKind::Indexed);
3909 } else {
3910 Opc = AArch64::FMLSv2f32;
3911 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3912 FMAInstKind::Accumulator);
3913 }
3914 break;
3915
3916 case MachineCombinerPattern::FMLSv2f64_OP2:
3917 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
3918 RC = &AArch64::FPR128RegClass;
3919 if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
3920 Opc = AArch64::FMLSv2i64_indexed;
3921 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3922 FMAInstKind::Indexed);
3923 } else {
3924 Opc = AArch64::FMLSv2f64;
3925 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3926 FMAInstKind::Accumulator);
3927 }
3928 break;
3929
3930 case MachineCombinerPattern::FMLSv4f32_OP2:
3931 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
3932 RC = &AArch64::FPR128RegClass;
3933 if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
3934 Opc = AArch64::FMLSv4i32_indexed;
3935 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3936 FMAInstKind::Indexed);
3937 } else {
3938 Opc = AArch64::FMLSv4f32;
3939 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3940 FMAInstKind::Accumulator);
3941 }
3942 break;
3943 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003944 } // end switch (Pattern)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003945 // Record MUL and ADD/SUB for deletion
3946 DelInstrs.push_back(MUL);
3947 DelInstrs.push_back(&Root);
3948
3949 return;
3950}
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003951
3952/// \brief Replace csincr-branch sequence by simple conditional branch
3953///
3954/// Examples:
3955/// 1.
3956/// csinc w9, wzr, wzr, <condition code>
3957/// tbnz w9, #0, 0x44
3958/// to
3959/// b.<inverted condition code>
3960///
3961/// 2.
3962/// csinc w9, wzr, wzr, <condition code>
3963/// tbz w9, #0, 0x44
3964/// to
3965/// b.<condition code>
3966///
Chad Rosier4aeab5f2016-03-21 13:43:58 +00003967/// Replace compare and branch sequence by TBZ/TBNZ instruction when the
3968/// compare's constant operand is power of 2.
Balaram Makame9b27252016-03-10 17:54:55 +00003969///
3970/// Examples:
3971/// and w8, w8, #0x400
3972/// cbnz w8, L1
3973/// to
3974/// tbnz w8, #10, L1
3975///
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003976/// \param MI Conditional Branch
3977/// \return True when the simple conditional branch is generated
3978///
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003979bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003980 bool IsNegativeBranch = false;
3981 bool IsTestAndBranch = false;
3982 unsigned TargetBBInMI = 0;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003983 switch (MI.getOpcode()) {
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003984 default:
3985 llvm_unreachable("Unknown branch instruction?");
3986 case AArch64::Bcc:
3987 return false;
3988 case AArch64::CBZW:
3989 case AArch64::CBZX:
3990 TargetBBInMI = 1;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003991 break;
3992 case AArch64::CBNZW:
3993 case AArch64::CBNZX:
3994 TargetBBInMI = 1;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003995 IsNegativeBranch = true;
3996 break;
3997 case AArch64::TBZW:
3998 case AArch64::TBZX:
3999 TargetBBInMI = 2;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004000 IsTestAndBranch = true;
4001 break;
4002 case AArch64::TBNZW:
4003 case AArch64::TBNZX:
4004 TargetBBInMI = 2;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004005 IsNegativeBranch = true;
4006 IsTestAndBranch = true;
4007 break;
4008 }
4009 // So we increment a zero register and test for bits other
4010 // than bit 0? Conservatively bail out in case the verifier
4011 // missed this case.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004012 if (IsTestAndBranch && MI.getOperand(1).getImm())
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004013 return false;
4014
4015 // Find Definition.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004016 assert(MI.getParent() && "Incomplete machine instruciton\n");
4017 MachineBasicBlock *MBB = MI.getParent();
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004018 MachineFunction *MF = MBB->getParent();
4019 MachineRegisterInfo *MRI = &MF->getRegInfo();
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004020 unsigned VReg = MI.getOperand(0).getReg();
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004021 if (!TargetRegisterInfo::isVirtualRegister(VReg))
4022 return false;
4023
4024 MachineInstr *DefMI = MRI->getVRegDef(VReg);
4025
Balaram Makame9b27252016-03-10 17:54:55 +00004026 // Look through COPY instructions to find definition.
4027 while (DefMI->isCopy()) {
4028 unsigned CopyVReg = DefMI->getOperand(1).getReg();
4029 if (!MRI->hasOneNonDBGUse(CopyVReg))
4030 return false;
4031 if (!MRI->hasOneDef(CopyVReg))
4032 return false;
4033 DefMI = MRI->getVRegDef(CopyVReg);
4034 }
4035
4036 switch (DefMI->getOpcode()) {
4037 default:
4038 return false;
4039 // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
4040 case AArch64::ANDWri:
4041 case AArch64::ANDXri: {
4042 if (IsTestAndBranch)
4043 return false;
4044 if (DefMI->getParent() != MBB)
4045 return false;
4046 if (!MRI->hasOneNonDBGUse(VReg))
4047 return false;
4048
Quentin Colombetabe2d012016-04-25 20:54:08 +00004049 bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
Balaram Makame9b27252016-03-10 17:54:55 +00004050 uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
Quentin Colombetabe2d012016-04-25 20:54:08 +00004051 DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
Balaram Makame9b27252016-03-10 17:54:55 +00004052 if (!isPowerOf2_64(Mask))
4053 return false;
4054
4055 MachineOperand &MO = DefMI->getOperand(1);
4056 unsigned NewReg = MO.getReg();
4057 if (!TargetRegisterInfo::isVirtualRegister(NewReg))
4058 return false;
4059
4060 assert(!MRI->def_empty(NewReg) && "Register must be defined.");
4061
4062 MachineBasicBlock &RefToMBB = *MBB;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004063 MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
4064 DebugLoc DL = MI.getDebugLoc();
Balaram Makame9b27252016-03-10 17:54:55 +00004065 unsigned Imm = Log2_64(Mask);
Renato Golin179d1f52016-04-23 19:30:52 +00004066 unsigned Opc = (Imm < 32)
4067 ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
4068 : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
Quentin Colombetabe2d012016-04-25 20:54:08 +00004069 MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
4070 .addReg(NewReg)
4071 .addImm(Imm)
4072 .addMBB(TBB);
Matthias Braune25bbd02016-05-03 04:54:16 +00004073 // Register lives on to the CBZ now.
4074 MO.setIsKill(false);
Quentin Colombetabe2d012016-04-25 20:54:08 +00004075
4076 // For immediate smaller than 32, we need to use the 32-bit
4077 // variant (W) in all cases. Indeed the 64-bit variant does not
4078 // allow to encode them.
4079 // Therefore, if the input register is 64-bit, we need to take the
4080 // 32-bit sub-part.
4081 if (!Is32Bit && Imm < 32)
4082 NewMI->getOperand(0).setSubReg(AArch64::sub_32);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004083 MI.eraseFromParent();
Balaram Makame9b27252016-03-10 17:54:55 +00004084 return true;
4085 }
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004086 // Look for CSINC
Balaram Makame9b27252016-03-10 17:54:55 +00004087 case AArch64::CSINCWr:
4088 case AArch64::CSINCXr: {
4089 if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
4090 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
4091 !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
4092 DefMI->getOperand(2).getReg() == AArch64::XZR))
4093 return false;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004094
Balaram Makame9b27252016-03-10 17:54:55 +00004095 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
4096 return false;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004097
Balaram Makame9b27252016-03-10 17:54:55 +00004098 AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
Balaram Makame9b27252016-03-10 17:54:55 +00004099 // Convert only when the condition code is not modified between
4100 // the CSINC and the branch. The CC may be used by other
4101 // instructions in between.
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00004102 if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
Balaram Makame9b27252016-03-10 17:54:55 +00004103 return false;
4104 MachineBasicBlock &RefToMBB = *MBB;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004105 MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
4106 DebugLoc DL = MI.getDebugLoc();
Balaram Makame9b27252016-03-10 17:54:55 +00004107 if (IsNegativeBranch)
4108 CC = AArch64CC::getInvertedCondCode(CC);
4109 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004110 MI.eraseFromParent();
Balaram Makame9b27252016-03-10 17:54:55 +00004111 return true;
4112 }
4113 }
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004114}
Alex Lorenzf3630112015-08-18 22:52:15 +00004115
4116std::pair<unsigned, unsigned>
4117AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
4118 const unsigned Mask = AArch64II::MO_FRAGMENT;
4119 return std::make_pair(TF & Mask, TF & ~Mask);
4120}
4121
4122ArrayRef<std::pair<unsigned, const char *>>
4123AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
4124 using namespace AArch64II;
Hal Finkel982e8d42015-08-30 08:07:29 +00004125 static const std::pair<unsigned, const char *> TargetFlags[] = {
Alex Lorenzf3630112015-08-18 22:52:15 +00004126 {MO_PAGE, "aarch64-page"},
4127 {MO_PAGEOFF, "aarch64-pageoff"},
4128 {MO_G3, "aarch64-g3"},
4129 {MO_G2, "aarch64-g2"},
4130 {MO_G1, "aarch64-g1"},
4131 {MO_G0, "aarch64-g0"},
4132 {MO_HI12, "aarch64-hi12"}};
4133 return makeArrayRef(TargetFlags);
4134}
4135
4136ArrayRef<std::pair<unsigned, const char *>>
4137AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
4138 using namespace AArch64II;
Hal Finkel982e8d42015-08-30 08:07:29 +00004139 static const std::pair<unsigned, const char *> TargetFlags[] = {
Alex Lorenzf3630112015-08-18 22:52:15 +00004140 {MO_GOT, "aarch64-got"},
4141 {MO_NC, "aarch64-nc"},
Rafael Espindola4d290992016-05-31 18:31:14 +00004142 {MO_TLS, "aarch64-tls"}};
Alex Lorenzf3630112015-08-18 22:52:15 +00004143 return makeArrayRef(TargetFlags);
4144}