blob: f5a2ee03c572d1de609e7e05bcd26e5a12e605b9 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the AArch64 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64InstrInfo.h"
15#include "AArch64Subtarget.h"
16#include "MCTargetDesc/AArch64AddressingModes.h"
Eugene Zelenko049b0172017-01-06 00:30:53 +000017#include "Utils/AArch64BaseInfo.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/CodeGen/MachineBasicBlock.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000022#include "llvm/CodeGen/MachineFrameInfo.h"
Eugene Zelenko049b0172017-01-06 00:30:53 +000023#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineInstr.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/CodeGen/MachineInstrBuilder.h"
26#include "llvm/CodeGen/MachineMemOperand.h"
Eugene Zelenko049b0172017-01-06 00:30:53 +000027#include "llvm/CodeGen/MachineOperand.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000028#include "llvm/CodeGen/MachineRegisterInfo.h"
Diana Picus4b972882016-09-13 07:45:17 +000029#include "llvm/CodeGen/StackMaps.h"
Eugene Zelenko049b0172017-01-06 00:30:53 +000030#include "llvm/IR/DebugLoc.h"
31#include "llvm/IR/GlobalValue.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000032#include "llvm/MC/MCInst.h"
Eugene Zelenko049b0172017-01-06 00:30:53 +000033#include "llvm/MC/MCInstrDesc.h"
34#include "llvm/Support/Casting.h"
35#include "llvm/Support/CodeGen.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Compiler.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000038#include "llvm/Support/ErrorHandling.h"
Eugene Zelenko049b0172017-01-06 00:30:53 +000039#include "llvm/Support/MathExtras.h"
40#include "llvm/Target/TargetMachine.h"
41#include "llvm/Target/TargetOptions.h"
42#include "llvm/Target/TargetRegisterInfo.h"
43#include "llvm/Target/TargetSubtargetInfo.h"
44#include <cassert>
45#include <cstdint>
46#include <iterator>
47#include <utility>
Tim Northover3b0846e2014-05-24 12:50:23 +000048
49using namespace llvm;
50
51#define GET_INSTRINFO_CTOR_DTOR
52#include "AArch64GenInstrInfo.inc"
53
George Burgess IV381fc0e2016-08-25 01:05:08 +000054static const MachineMemOperand::Flags MOSuppressPair =
Justin Lebar288b3372016-07-14 18:15:20 +000055 MachineMemOperand::MOTargetFlag1;
56
Matt Arsenaulte8da1452016-08-02 08:06:17 +000057static cl::opt<unsigned>
58TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
59 cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
60
61static cl::opt<unsigned>
62CBZDisplacementBits("aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
63 cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
64
65static cl::opt<unsigned>
66BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
67 cl::desc("Restrict range of Bcc instructions (DEBUG)"));
68
Tim Northover3b0846e2014-05-24 12:50:23 +000069AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
70 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
Eric Christophera0de2532015-03-18 20:37:30 +000071 RI(STI.getTargetTriple()), Subtarget(STI) {}
Tim Northover3b0846e2014-05-24 12:50:23 +000072
73/// GetInstSize - Return the number of bytes of code the specified
74/// instruction may be. This returns the maximum number of bytes.
Sjoerd Meijer89217f82016-07-28 16:32:22 +000075unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +000076 const MachineBasicBlock &MBB = *MI.getParent();
Tim Northoverd5531f72014-06-17 11:31:42 +000077 const MachineFunction *MF = MBB.getParent();
78 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +000079
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +000080 if (MI.getOpcode() == AArch64::INLINEASM)
81 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
Tim Northoverd5531f72014-06-17 11:31:42 +000082
Diana Picus4b972882016-09-13 07:45:17 +000083 // FIXME: We currently only handle pseudoinstructions that don't get expanded
84 // before the assembly printer.
85 unsigned NumBytes = 0;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +000086 const MCInstrDesc &Desc = MI.getDesc();
Tim Northover3b0846e2014-05-24 12:50:23 +000087 switch (Desc.getOpcode()) {
88 default:
Diana Picusc65d8bd2016-07-27 15:13:25 +000089 // Anything not explicitly designated otherwise is a normal 4-byte insn.
Diana Picus4b972882016-09-13 07:45:17 +000090 NumBytes = 4;
91 break;
Tim Northover3b0846e2014-05-24 12:50:23 +000092 case TargetOpcode::DBG_VALUE:
93 case TargetOpcode::EH_LABEL:
94 case TargetOpcode::IMPLICIT_DEF:
95 case TargetOpcode::KILL:
Diana Picus4b972882016-09-13 07:45:17 +000096 NumBytes = 0;
97 break;
98 case TargetOpcode::STACKMAP:
99 // The upper bound for a stackmap intrinsic is the full length of its shadow
100 NumBytes = StackMapOpers(&MI).getNumPatchBytes();
101 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
102 break;
103 case TargetOpcode::PATCHPOINT:
104 // The size of the patchpoint intrinsic is the number of bytes requested
105 NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
106 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
107 break;
Diana Picusab5a4c72016-08-01 08:38:49 +0000108 case AArch64::TLSDESC_CALLSEQ:
109 // This gets lowered to an instruction sequence which takes 16 bytes
Diana Picus4b972882016-09-13 07:45:17 +0000110 NumBytes = 16;
111 break;
Tim Northover3b0846e2014-05-24 12:50:23 +0000112 }
113
Diana Picus4b972882016-09-13 07:45:17 +0000114 return NumBytes;
Tim Northover3b0846e2014-05-24 12:50:23 +0000115}
116
117static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
118 SmallVectorImpl<MachineOperand> &Cond) {
119 // Block ends with fall-through condbranch.
120 switch (LastInst->getOpcode()) {
121 default:
122 llvm_unreachable("Unknown branch instruction?");
123 case AArch64::Bcc:
124 Target = LastInst->getOperand(1).getMBB();
125 Cond.push_back(LastInst->getOperand(0));
126 break;
127 case AArch64::CBZW:
128 case AArch64::CBZX:
129 case AArch64::CBNZW:
130 case AArch64::CBNZX:
131 Target = LastInst->getOperand(1).getMBB();
132 Cond.push_back(MachineOperand::CreateImm(-1));
133 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
134 Cond.push_back(LastInst->getOperand(0));
135 break;
136 case AArch64::TBZW:
137 case AArch64::TBZX:
138 case AArch64::TBNZW:
139 case AArch64::TBNZX:
140 Target = LastInst->getOperand(2).getMBB();
141 Cond.push_back(MachineOperand::CreateImm(-1));
142 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
143 Cond.push_back(LastInst->getOperand(0));
144 Cond.push_back(LastInst->getOperand(1));
145 }
146}
147
Matt Arsenaulte8da1452016-08-02 08:06:17 +0000148static unsigned getBranchDisplacementBits(unsigned Opc) {
149 switch (Opc) {
150 default:
151 llvm_unreachable("unexpected opcode!");
Matt Arsenault0a3ea892016-10-06 15:38:09 +0000152 case AArch64::B:
153 return 64;
Matt Arsenaulte8da1452016-08-02 08:06:17 +0000154 case AArch64::TBNZW:
155 case AArch64::TBZW:
156 case AArch64::TBNZX:
157 case AArch64::TBZX:
158 return TBZDisplacementBits;
159 case AArch64::CBNZW:
160 case AArch64::CBZW:
161 case AArch64::CBNZX:
162 case AArch64::CBZX:
163 return CBZDisplacementBits;
164 case AArch64::Bcc:
165 return BCCDisplacementBits;
166 }
167}
168
Matt Arsenault0a3ea892016-10-06 15:38:09 +0000169bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
170 int64_t BrOffset) const {
171 unsigned Bits = getBranchDisplacementBits(BranchOp);
172 assert(Bits >= 3 && "max branch displacement must be enough to jump"
173 "over conditional branch expansion");
174 return isIntN(Bits, BrOffset / 4);
Matt Arsenaulte8da1452016-08-02 08:06:17 +0000175}
176
Matt Arsenault0a3ea892016-10-06 15:38:09 +0000177MachineBasicBlock *AArch64InstrInfo::getBranchDestBlock(
178 const MachineInstr &MI) const {
179 switch (MI.getOpcode()) {
180 default:
181 llvm_unreachable("unexpected opcode!");
182 case AArch64::B:
183 return MI.getOperand(0).getMBB();
184 case AArch64::TBZW:
185 case AArch64::TBNZW:
186 case AArch64::TBZX:
187 case AArch64::TBNZX:
188 return MI.getOperand(2).getMBB();
189 case AArch64::CBZW:
190 case AArch64::CBNZW:
191 case AArch64::CBZX:
192 case AArch64::CBNZX:
193 case AArch64::Bcc:
194 return MI.getOperand(1).getMBB();
195 }
Matt Arsenaulte8da1452016-08-02 08:06:17 +0000196}
197
Tim Northover3b0846e2014-05-24 12:50:23 +0000198// Branch analysis.
Jacques Pienaar71c30a12016-07-15 14:41:04 +0000199bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
200 MachineBasicBlock *&TBB,
201 MachineBasicBlock *&FBB,
202 SmallVectorImpl<MachineOperand> &Cond,
203 bool AllowModify) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000204 // If the block has no terminators, it just falls into the block after it.
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000205 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
206 if (I == MBB.end())
Tim Northover3b0846e2014-05-24 12:50:23 +0000207 return false;
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000208
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000209 if (!isUnpredicatedTerminator(*I))
Tim Northover3b0846e2014-05-24 12:50:23 +0000210 return false;
211
212 // Get the last instruction in the block.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000213 MachineInstr *LastInst = &*I;
Tim Northover3b0846e2014-05-24 12:50:23 +0000214
215 // If there is only one terminator instruction, process it.
216 unsigned LastOpc = LastInst->getOpcode();
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000217 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000218 if (isUncondBranchOpcode(LastOpc)) {
219 TBB = LastInst->getOperand(0).getMBB();
220 return false;
221 }
222 if (isCondBranchOpcode(LastOpc)) {
223 // Block ends with fall-through condbranch.
224 parseCondBranch(LastInst, TBB, Cond);
225 return false;
226 }
227 return true; // Can't handle indirect branch.
228 }
229
230 // Get the instruction before it if it is a terminator.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000231 MachineInstr *SecondLastInst = &*I;
Tim Northover3b0846e2014-05-24 12:50:23 +0000232 unsigned SecondLastOpc = SecondLastInst->getOpcode();
233
234 // If AllowModify is true and the block ends with two or more unconditional
235 // branches, delete all but the first unconditional branch.
236 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
237 while (isUncondBranchOpcode(SecondLastOpc)) {
238 LastInst->eraseFromParent();
239 LastInst = SecondLastInst;
240 LastOpc = LastInst->getOpcode();
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000241 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000242 // Return now the only terminator is an unconditional branch.
243 TBB = LastInst->getOperand(0).getMBB();
244 return false;
245 } else {
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000246 SecondLastInst = &*I;
Tim Northover3b0846e2014-05-24 12:50:23 +0000247 SecondLastOpc = SecondLastInst->getOpcode();
248 }
249 }
250 }
251
252 // If there are three terminators, we don't know what sort of block this is.
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000253 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
Tim Northover3b0846e2014-05-24 12:50:23 +0000254 return true;
255
256 // If the block ends with a B and a Bcc, handle it.
257 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
258 parseCondBranch(SecondLastInst, TBB, Cond);
259 FBB = LastInst->getOperand(0).getMBB();
260 return false;
261 }
262
263 // If the block ends with two unconditional branches, handle it. The second
264 // one is not executed, so remove it.
265 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
266 TBB = SecondLastInst->getOperand(0).getMBB();
267 I = LastInst;
268 if (AllowModify)
269 I->eraseFromParent();
270 return false;
271 }
272
273 // ...likewise if it ends with an indirect branch followed by an unconditional
274 // branch.
275 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
276 I = LastInst;
277 if (AllowModify)
278 I->eraseFromParent();
279 return true;
280 }
281
282 // Otherwise, can't handle this.
283 return true;
284}
285
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +0000286bool AArch64InstrInfo::reverseBranchCondition(
Tim Northover3b0846e2014-05-24 12:50:23 +0000287 SmallVectorImpl<MachineOperand> &Cond) const {
288 if (Cond[0].getImm() != -1) {
289 // Regular Bcc
290 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
291 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
292 } else {
293 // Folded compare-and-branch
294 switch (Cond[1].getImm()) {
295 default:
296 llvm_unreachable("Unknown conditional branch!");
297 case AArch64::CBZW:
298 Cond[1].setImm(AArch64::CBNZW);
299 break;
300 case AArch64::CBNZW:
301 Cond[1].setImm(AArch64::CBZW);
302 break;
303 case AArch64::CBZX:
304 Cond[1].setImm(AArch64::CBNZX);
305 break;
306 case AArch64::CBNZX:
307 Cond[1].setImm(AArch64::CBZX);
308 break;
309 case AArch64::TBZW:
310 Cond[1].setImm(AArch64::TBNZW);
311 break;
312 case AArch64::TBNZW:
313 Cond[1].setImm(AArch64::TBZW);
314 break;
315 case AArch64::TBZX:
316 Cond[1].setImm(AArch64::TBNZX);
317 break;
318 case AArch64::TBNZX:
319 Cond[1].setImm(AArch64::TBZX);
320 break;
321 }
322 }
323
324 return false;
325}
326
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +0000327unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000328 int *BytesRemoved) const {
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000329 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
330 if (I == MBB.end())
Tim Northover3b0846e2014-05-24 12:50:23 +0000331 return 0;
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000332
Tim Northover3b0846e2014-05-24 12:50:23 +0000333 if (!isUncondBranchOpcode(I->getOpcode()) &&
334 !isCondBranchOpcode(I->getOpcode()))
335 return 0;
336
337 // Remove the branch.
338 I->eraseFromParent();
339
340 I = MBB.end();
341
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000342 if (I == MBB.begin()) {
343 if (BytesRemoved)
344 *BytesRemoved = 4;
Tim Northover3b0846e2014-05-24 12:50:23 +0000345 return 1;
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000346 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000347 --I;
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000348 if (!isCondBranchOpcode(I->getOpcode())) {
349 if (BytesRemoved)
350 *BytesRemoved = 4;
Tim Northover3b0846e2014-05-24 12:50:23 +0000351 return 1;
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000352 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000353
354 // Remove the branch.
355 I->eraseFromParent();
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000356 if (BytesRemoved)
357 *BytesRemoved = 8;
358
Tim Northover3b0846e2014-05-24 12:50:23 +0000359 return 2;
360}
361
362void AArch64InstrInfo::instantiateCondBranch(
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000363 MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000364 ArrayRef<MachineOperand> Cond) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000365 if (Cond[0].getImm() != -1) {
366 // Regular Bcc
367 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
368 } else {
369 // Folded compare-and-branch
Ahmed Bougacha72001cf2014-11-07 02:50:00 +0000370 // Note that we use addOperand instead of addReg to keep the flags.
Tim Northover3b0846e2014-05-24 12:50:23 +0000371 const MachineInstrBuilder MIB =
Diana Picus116bbab2017-01-13 09:58:52 +0000372 BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000373 if (Cond.size() > 3)
374 MIB.addImm(Cond[3].getImm());
375 MIB.addMBB(TBB);
376 }
377}
378
Matt Arsenaulte8e0f5c2016-09-14 17:24:15 +0000379unsigned AArch64InstrInfo::insertBranch(MachineBasicBlock &MBB,
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000380 MachineBasicBlock *TBB,
381 MachineBasicBlock *FBB,
382 ArrayRef<MachineOperand> Cond,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000383 const DebugLoc &DL,
384 int *BytesAdded) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000385 // Shouldn't be a fall through.
Matt Arsenaulte8e0f5c2016-09-14 17:24:15 +0000386 assert(TBB && "insertBranch must not be told to insert a fallthrough");
Tim Northover3b0846e2014-05-24 12:50:23 +0000387
388 if (!FBB) {
389 if (Cond.empty()) // Unconditional branch?
390 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
391 else
392 instantiateCondBranch(MBB, DL, TBB, Cond);
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000393
394 if (BytesAdded)
395 *BytesAdded = 4;
396
Tim Northover3b0846e2014-05-24 12:50:23 +0000397 return 1;
398 }
399
400 // Two-way conditional branch.
401 instantiateCondBranch(MBB, DL, TBB, Cond);
402 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
Matt Arsenaulta2b036e2016-09-14 17:23:48 +0000403
404 if (BytesAdded)
405 *BytesAdded = 8;
406
Tim Northover3b0846e2014-05-24 12:50:23 +0000407 return 2;
408}
409
410// Find the original register that VReg is copied from.
411static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
412 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
413 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
414 if (!DefMI->isFullCopy())
415 return VReg;
416 VReg = DefMI->getOperand(1).getReg();
417 }
418 return VReg;
419}
420
421// Determine if VReg is defined by an instruction that can be folded into a
422// csel instruction. If so, return the folded opcode, and the replacement
423// register.
424static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
425 unsigned *NewVReg = nullptr) {
426 VReg = removeCopies(MRI, VReg);
427 if (!TargetRegisterInfo::isVirtualRegister(VReg))
428 return 0;
429
430 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
431 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
432 unsigned Opc = 0;
433 unsigned SrcOpNum = 0;
434 switch (DefMI->getOpcode()) {
435 case AArch64::ADDSXri:
436 case AArch64::ADDSWri:
437 // if NZCV is used, do not fold.
438 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
439 return 0;
Justin Bognerb03fd122016-08-17 05:10:15 +0000440 // fall-through to ADDXri and ADDWri.
441 LLVM_FALLTHROUGH;
Tim Northover3b0846e2014-05-24 12:50:23 +0000442 case AArch64::ADDXri:
443 case AArch64::ADDWri:
444 // add x, 1 -> csinc.
445 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
446 DefMI->getOperand(3).getImm() != 0)
447 return 0;
448 SrcOpNum = 1;
449 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
450 break;
451
452 case AArch64::ORNXrr:
453 case AArch64::ORNWrr: {
454 // not x -> csinv, represented as orn dst, xzr, src.
455 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
456 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
457 return 0;
458 SrcOpNum = 2;
459 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
460 break;
461 }
462
463 case AArch64::SUBSXrr:
464 case AArch64::SUBSWrr:
465 // if NZCV is used, do not fold.
466 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
467 return 0;
Justin Bognerb03fd122016-08-17 05:10:15 +0000468 // fall-through to SUBXrr and SUBWrr.
469 LLVM_FALLTHROUGH;
Tim Northover3b0846e2014-05-24 12:50:23 +0000470 case AArch64::SUBXrr:
471 case AArch64::SUBWrr: {
472 // neg x -> csneg, represented as sub dst, xzr, src.
473 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
474 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
475 return 0;
476 SrcOpNum = 2;
477 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
478 break;
479 }
480 default:
481 return 0;
482 }
483 assert(Opc && SrcOpNum && "Missing parameters");
484
485 if (NewVReg)
486 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
487 return Opc;
488}
489
490bool AArch64InstrInfo::canInsertSelect(
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000491 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
Tim Northover3b0846e2014-05-24 12:50:23 +0000492 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
493 int &FalseCycles) const {
494 // Check register classes.
495 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
496 const TargetRegisterClass *RC =
Eric Christophera0de2532015-03-18 20:37:30 +0000497 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
Tim Northover3b0846e2014-05-24 12:50:23 +0000498 if (!RC)
499 return false;
500
501 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
502 unsigned ExtraCondLat = Cond.size() != 1;
503
504 // GPRs are handled by csel.
505 // FIXME: Fold in x+1, -x, and ~x when applicable.
506 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
507 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
508 // Single-cycle csel, csinc, csinv, and csneg.
509 CondCycles = 1 + ExtraCondLat;
510 TrueCycles = FalseCycles = 1;
511 if (canFoldIntoCSel(MRI, TrueReg))
512 TrueCycles = 0;
513 else if (canFoldIntoCSel(MRI, FalseReg))
514 FalseCycles = 0;
515 return true;
516 }
517
518 // Scalar floating point is handled by fcsel.
519 // FIXME: Form fabs, fmin, and fmax when applicable.
520 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
521 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
522 CondCycles = 5 + ExtraCondLat;
523 TrueCycles = FalseCycles = 2;
524 return true;
525 }
526
527 // Can't do vectors.
528 return false;
529}
530
531void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000532 MachineBasicBlock::iterator I,
533 const DebugLoc &DL, unsigned DstReg,
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000534 ArrayRef<MachineOperand> Cond,
Tim Northover3b0846e2014-05-24 12:50:23 +0000535 unsigned TrueReg, unsigned FalseReg) const {
536 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
537
538 // Parse the condition code, see parseCondBranch() above.
539 AArch64CC::CondCode CC;
540 switch (Cond.size()) {
541 default:
542 llvm_unreachable("Unknown condition opcode in Cond");
543 case 1: // b.cc
544 CC = AArch64CC::CondCode(Cond[0].getImm());
545 break;
546 case 3: { // cbz/cbnz
547 // We must insert a compare against 0.
548 bool Is64Bit;
549 switch (Cond[1].getImm()) {
550 default:
551 llvm_unreachable("Unknown branch opcode in Cond");
552 case AArch64::CBZW:
Eugene Zelenko049b0172017-01-06 00:30:53 +0000553 Is64Bit = false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000554 CC = AArch64CC::EQ;
555 break;
556 case AArch64::CBZX:
Eugene Zelenko049b0172017-01-06 00:30:53 +0000557 Is64Bit = true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000558 CC = AArch64CC::EQ;
559 break;
560 case AArch64::CBNZW:
Eugene Zelenko049b0172017-01-06 00:30:53 +0000561 Is64Bit = false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000562 CC = AArch64CC::NE;
563 break;
564 case AArch64::CBNZX:
Eugene Zelenko049b0172017-01-06 00:30:53 +0000565 Is64Bit = true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000566 CC = AArch64CC::NE;
567 break;
568 }
569 unsigned SrcReg = Cond[2].getReg();
570 if (Is64Bit) {
571 // cmp reg, #0 is actually subs xzr, reg, #0.
572 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
573 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
574 .addReg(SrcReg)
575 .addImm(0)
576 .addImm(0);
577 } else {
578 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
579 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
580 .addReg(SrcReg)
581 .addImm(0)
582 .addImm(0);
583 }
584 break;
585 }
586 case 4: { // tbz/tbnz
587 // We must insert a tst instruction.
588 switch (Cond[1].getImm()) {
589 default:
590 llvm_unreachable("Unknown branch opcode in Cond");
591 case AArch64::TBZW:
592 case AArch64::TBZX:
593 CC = AArch64CC::EQ;
594 break;
595 case AArch64::TBNZW:
596 case AArch64::TBNZX:
597 CC = AArch64CC::NE;
598 break;
599 }
600 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
601 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
602 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
603 .addReg(Cond[2].getReg())
604 .addImm(
605 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
606 else
607 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
608 .addReg(Cond[2].getReg())
609 .addImm(
610 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
611 break;
612 }
613 }
614
615 unsigned Opc = 0;
616 const TargetRegisterClass *RC = nullptr;
617 bool TryFold = false;
618 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
619 RC = &AArch64::GPR64RegClass;
620 Opc = AArch64::CSELXr;
621 TryFold = true;
622 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
623 RC = &AArch64::GPR32RegClass;
624 Opc = AArch64::CSELWr;
625 TryFold = true;
626 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
627 RC = &AArch64::FPR64RegClass;
628 Opc = AArch64::FCSELDrrr;
629 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
630 RC = &AArch64::FPR32RegClass;
631 Opc = AArch64::FCSELSrrr;
632 }
633 assert(RC && "Unsupported regclass");
634
635 // Try folding simple instructions into the csel.
636 if (TryFold) {
637 unsigned NewVReg = 0;
638 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
639 if (FoldedOpc) {
640 // The folded opcodes csinc, csinc and csneg apply the operation to
641 // FalseReg, so we need to invert the condition.
642 CC = AArch64CC::getInvertedCondCode(CC);
643 TrueReg = FalseReg;
644 } else
645 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
646
647 // Fold the operation. Leave any dead instructions for DCE to clean up.
648 if (FoldedOpc) {
649 FalseReg = NewVReg;
650 Opc = FoldedOpc;
651 // The extends the live range of NewVReg.
652 MRI.clearKillFlags(NewVReg);
653 }
654 }
655
656 // Pull all virtual register into the appropriate class.
657 MRI.constrainRegClass(TrueReg, RC);
658 MRI.constrainRegClass(FalseReg, RC);
659
660 // Insert the csel.
661 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
662 CC);
663}
664
Lawrence Hu687097a2015-07-23 23:55:28 +0000665/// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000666static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
667 uint64_t Imm = MI.getOperand(1).getImm();
Weiming Zhaob33a5552015-07-23 19:24:53 +0000668 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
669 uint64_t Encoding;
670 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
671}
672
Jiangning Liucd296372014-07-29 02:09:26 +0000673// FIXME: this implementation should be micro-architecture dependent, so a
674// micro-architecture target hook should be introduced here in future.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000675bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
Matthias Braun651cff42016-06-02 18:03:53 +0000676 if (!Subtarget.hasCustomCheapAsMoveHandling())
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000677 return MI.isAsCheapAsAMove();
Jiangning Liucd296372014-07-29 02:09:26 +0000678
Evandro Menezesd23324a2016-05-04 20:47:25 +0000679 unsigned Imm;
680
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000681 switch (MI.getOpcode()) {
Jiangning Liucd296372014-07-29 02:09:26 +0000682 default:
683 return false;
684
685 // add/sub on register without shift
686 case AArch64::ADDWri:
687 case AArch64::ADDXri:
688 case AArch64::SUBWri:
689 case AArch64::SUBXri:
Matthias Braun651cff42016-06-02 18:03:53 +0000690 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000691 MI.getOperand(3).getImm() == 0);
Evandro Menezesd23324a2016-05-04 20:47:25 +0000692
693 // add/sub on register with shift
694 case AArch64::ADDWrs:
695 case AArch64::ADDXrs:
696 case AArch64::SUBWrs:
697 case AArch64::SUBXrs:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000698 Imm = MI.getOperand(3).getImm();
Matthias Braun651cff42016-06-02 18:03:53 +0000699 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
Evandro Menezesd23324a2016-05-04 20:47:25 +0000700 AArch64_AM::getArithShiftValue(Imm) < 4);
Jiangning Liucd296372014-07-29 02:09:26 +0000701
702 // logical ops on immediate
703 case AArch64::ANDWri:
704 case AArch64::ANDXri:
705 case AArch64::EORWri:
706 case AArch64::EORXri:
707 case AArch64::ORRWri:
708 case AArch64::ORRXri:
709 return true;
710
711 // logical ops on register without shift
712 case AArch64::ANDWrr:
713 case AArch64::ANDXrr:
714 case AArch64::BICWrr:
715 case AArch64::BICXrr:
716 case AArch64::EONWrr:
717 case AArch64::EONXrr:
718 case AArch64::EORWrr:
719 case AArch64::EORXrr:
720 case AArch64::ORNWrr:
721 case AArch64::ORNXrr:
722 case AArch64::ORRWrr:
723 case AArch64::ORRXrr:
724 return true;
Evandro Menezesd23324a2016-05-04 20:47:25 +0000725
726 // logical ops on register with shift
727 case AArch64::ANDWrs:
728 case AArch64::ANDXrs:
729 case AArch64::BICWrs:
730 case AArch64::BICXrs:
731 case AArch64::EONWrs:
732 case AArch64::EONXrs:
733 case AArch64::EORWrs:
734 case AArch64::EORXrs:
735 case AArch64::ORNWrs:
736 case AArch64::ORNXrs:
737 case AArch64::ORRWrs:
738 case AArch64::ORRXrs:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000739 Imm = MI.getOperand(3).getImm();
Matthias Braun651cff42016-06-02 18:03:53 +0000740 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
Evandro Menezesd23324a2016-05-04 20:47:25 +0000741 AArch64_AM::getShiftValue(Imm) < 4 &&
742 AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL);
743
Weiming Zhaob33a5552015-07-23 19:24:53 +0000744 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
745 // ORRXri, it is as cheap as MOV
746 case AArch64::MOVi32imm:
747 return canBeExpandedToORR(MI, 32);
748 case AArch64::MOVi64imm:
749 return canBeExpandedToORR(MI, 64);
Haicheng Wu711ca862016-07-12 15:31:41 +0000750
Haicheng Wuf0b01272016-07-15 00:27:01 +0000751 // It is cheap to zero out registers if the subtarget has ZeroCycleZeroing
752 // feature.
Haicheng Wu711ca862016-07-12 15:31:41 +0000753 case AArch64::FMOVS0:
754 case AArch64::FMOVD0:
755 return Subtarget.hasZeroCycleZeroing();
Haicheng Wuf0b01272016-07-15 00:27:01 +0000756 case TargetOpcode::COPY:
757 return (Subtarget.hasZeroCycleZeroing() &&
758 (MI.getOperand(1).getReg() == AArch64::WZR ||
759 MI.getOperand(1).getReg() == AArch64::XZR));
Jiangning Liucd296372014-07-29 02:09:26 +0000760 }
761
762 llvm_unreachable("Unknown opcode to check as cheap as a move!");
763}
764
Tim Northover3b0846e2014-05-24 12:50:23 +0000765bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
766 unsigned &SrcReg, unsigned &DstReg,
767 unsigned &SubIdx) const {
768 switch (MI.getOpcode()) {
769 default:
770 return false;
771 case AArch64::SBFMXri: // aka sxtw
772 case AArch64::UBFMXri: // aka uxtw
773 // Check for the 32 -> 64 bit extension case, these instructions can do
774 // much more.
775 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
776 return false;
777 // This is a signed or unsigned 32 -> 64 bit extension.
778 SrcReg = MI.getOperand(1).getReg();
779 DstReg = MI.getOperand(0).getReg();
780 SubIdx = AArch64::sub_32;
781 return true;
782 }
783}
784
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000785bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
786 MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
Eric Christophera0de2532015-03-18 20:37:30 +0000787 const TargetRegisterInfo *TRI = &getRegisterInfo();
Chad Rosier3528c1e2014-09-08 14:43:48 +0000788 unsigned BaseRegA = 0, BaseRegB = 0;
Chad Rosier0da267d2016-03-09 16:46:48 +0000789 int64_t OffsetA = 0, OffsetB = 0;
790 unsigned WidthA = 0, WidthB = 0;
Chad Rosier3528c1e2014-09-08 14:43:48 +0000791
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000792 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
793 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
Chad Rosier3528c1e2014-09-08 14:43:48 +0000794
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000795 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
796 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
Chad Rosier3528c1e2014-09-08 14:43:48 +0000797 return false;
798
799 // Retrieve the base register, offset from the base register and width. Width
800 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
801 // base registers are identical, and the offset of a lower memory access +
802 // the width doesn't overlap the offset of a higher memory access,
803 // then the memory accesses are different.
Sanjoy Dasb666ea32015-06-15 18:44:14 +0000804 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
805 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
Chad Rosier3528c1e2014-09-08 14:43:48 +0000806 if (BaseRegA == BaseRegB) {
807 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
808 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
809 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
810 if (LowOffset + LowWidth <= HighOffset)
811 return true;
812 }
813 }
814 return false;
815}
816
Tim Northover3b0846e2014-05-24 12:50:23 +0000817/// analyzeCompare - For a comparison instruction, return the source registers
818/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
819/// Return true if the comparison instruction can be analyzed.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000820bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
Tim Northover3b0846e2014-05-24 12:50:23 +0000821 unsigned &SrcReg2, int &CmpMask,
822 int &CmpValue) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000823 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000824 default:
825 break;
826 case AArch64::SUBSWrr:
827 case AArch64::SUBSWrs:
828 case AArch64::SUBSWrx:
829 case AArch64::SUBSXrr:
830 case AArch64::SUBSXrs:
831 case AArch64::SUBSXrx:
832 case AArch64::ADDSWrr:
833 case AArch64::ADDSWrs:
834 case AArch64::ADDSWrx:
835 case AArch64::ADDSXrr:
836 case AArch64::ADDSXrs:
837 case AArch64::ADDSXrx:
838 // Replace SUBSWrr with SUBWrr if NZCV is not used.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000839 SrcReg = MI.getOperand(1).getReg();
840 SrcReg2 = MI.getOperand(2).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +0000841 CmpMask = ~0;
842 CmpValue = 0;
843 return true;
844 case AArch64::SUBSWri:
845 case AArch64::ADDSWri:
846 case AArch64::SUBSXri:
847 case AArch64::ADDSXri:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000848 SrcReg = MI.getOperand(1).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +0000849 SrcReg2 = 0;
850 CmpMask = ~0;
Jiangning Liudcc651f2014-08-08 14:19:29 +0000851 // FIXME: In order to convert CmpValue to 0 or 1
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000852 CmpValue = MI.getOperand(2).getImm() != 0;
Tim Northover3b0846e2014-05-24 12:50:23 +0000853 return true;
854 case AArch64::ANDSWri:
855 case AArch64::ANDSXri:
856 // ANDS does not use the same encoding scheme as the others xxxS
857 // instructions.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000858 SrcReg = MI.getOperand(1).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +0000859 SrcReg2 = 0;
860 CmpMask = ~0;
Jiangning Liudcc651f2014-08-08 14:19:29 +0000861 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
862 // while the type of CmpValue is int. When converting uint64_t to int,
863 // the high 32 bits of uint64_t will be lost.
864 // In fact it causes a bug in spec2006-483.xalancbmk
865 // CmpValue is only used to compare with zero in OptimizeCompareInstr
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000866 CmpValue = AArch64_AM::decodeLogicalImmediate(
867 MI.getOperand(2).getImm(),
868 MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
Tim Northover3b0846e2014-05-24 12:50:23 +0000869 return true;
870 }
871
872 return false;
873}
874
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000875static bool UpdateOperandRegClass(MachineInstr &Instr) {
876 MachineBasicBlock *MBB = Instr.getParent();
Tim Northover3b0846e2014-05-24 12:50:23 +0000877 assert(MBB && "Can't get MachineBasicBlock here");
878 MachineFunction *MF = MBB->getParent();
879 assert(MF && "Can't get MachineFunction here");
Eric Christopher6c901622015-01-28 03:51:33 +0000880 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
881 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000882 MachineRegisterInfo *MRI = &MF->getRegInfo();
883
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000884 for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
Tim Northover3b0846e2014-05-24 12:50:23 +0000885 ++OpIdx) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000886 MachineOperand &MO = Instr.getOperand(OpIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +0000887 const TargetRegisterClass *OpRegCstraints =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000888 Instr.getRegClassConstraint(OpIdx, TII, TRI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000889
890 // If there's no constraint, there's nothing to do.
891 if (!OpRegCstraints)
892 continue;
893 // If the operand is a frame index, there's nothing to do here.
894 // A frame index operand will resolve correctly during PEI.
895 if (MO.isFI())
896 continue;
897
898 assert(MO.isReg() &&
899 "Operand has register constraints without being a register!");
900
901 unsigned Reg = MO.getReg();
902 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
903 if (!OpRegCstraints->contains(Reg))
904 return false;
905 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
906 !MRI->constrainRegClass(Reg, OpRegCstraints))
907 return false;
908 }
909
910 return true;
911}
912
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000913/// \brief Return the opcode that does not set flags when possible - otherwise
914/// return the original opcode. The caller is responsible to do the actual
915/// substitution and legality checking.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000916static unsigned convertFlagSettingOpcode(const MachineInstr &MI) {
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000917 // Don't convert all compare instructions, because for some the zero register
918 // encoding becomes the sp register.
919 bool MIDefinesZeroReg = false;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000920 if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000921 MIDefinesZeroReg = true;
922
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000923 switch (MI.getOpcode()) {
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000924 default:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000925 return MI.getOpcode();
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000926 case AArch64::ADDSWrr:
927 return AArch64::ADDWrr;
928 case AArch64::ADDSWri:
929 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
930 case AArch64::ADDSWrs:
931 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
932 case AArch64::ADDSWrx:
933 return AArch64::ADDWrx;
934 case AArch64::ADDSXrr:
935 return AArch64::ADDXrr;
936 case AArch64::ADDSXri:
937 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
938 case AArch64::ADDSXrs:
939 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
940 case AArch64::ADDSXrx:
941 return AArch64::ADDXrx;
942 case AArch64::SUBSWrr:
943 return AArch64::SUBWrr;
944 case AArch64::SUBSWri:
945 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
946 case AArch64::SUBSWrs:
947 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
948 case AArch64::SUBSWrx:
949 return AArch64::SUBWrx;
950 case AArch64::SUBSXrr:
951 return AArch64::SUBXrr;
952 case AArch64::SUBSXri:
953 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
954 case AArch64::SUBSXrs:
955 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
956 case AArch64::SUBSXrx:
957 return AArch64::SUBXrx;
958 }
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000959}
Tim Northover3b0846e2014-05-24 12:50:23 +0000960
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000961enum AccessKind {
962 AK_Write = 0x01,
963 AK_Read = 0x10,
964 AK_All = 0x11
965};
966
967/// True when condition flags are accessed (either by writing or reading)
968/// on the instruction trace starting at From and ending at To.
969///
970/// Note: If From and To are from different blocks it's assumed CC are accessed
971/// on the path.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000972static bool areCFlagsAccessedBetweenInstrs(
973 MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
974 const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000975 // Early exit if To is at the beginning of the BB.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000976 if (To == To->getParent()->begin())
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000977 return true;
978
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000979 // Check whether the instructions are in the same basic block
980 // If not, assume the condition flags might get modified somewhere.
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000981 if (To->getParent() != From->getParent())
982 return true;
983
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000984 // From must be above To.
Duncan P. N. Exon Smith18720962016-09-11 18:51:28 +0000985 assert(std::find_if(++To.getReverse(), To->getParent()->rend(),
986 [From](MachineInstr &MI) {
987 return MI.getIterator() == From;
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000988 }) != To->getParent()->rend());
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000989
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000990 // We iterate backward starting \p To until we hit \p From.
991 for (--To; To != From; --To) {
992 const MachineInstr &Instr = *To;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000993
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +0000994 if ( ((AccessToCheck & AK_Write) && Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
995 ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000996 return true;
997 }
998 return false;
999}
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001000
1001/// Try to optimize a compare instruction. A compare instruction is an
1002/// instruction which produces AArch64::NZCV. It can be truly compare instruction
1003/// when there are no uses of its destination register.
1004///
1005/// The following steps are tried in order:
1006/// 1. Convert CmpInstr into an unconditional version.
1007/// 2. Remove CmpInstr if above there is an instruction producing a needed
1008/// condition code or an instruction which can be converted into such an instruction.
1009/// Only comparison with zero is supported.
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00001010bool AArch64InstrInfo::optimizeCompareInstr(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001011 MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00001012 int CmpValue, const MachineRegisterInfo *MRI) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001013 assert(CmpInstr.getParent());
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001014 assert(MRI);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00001015
1016 // Replace SUBSWrr with SUBWrr if NZCV is not used.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001017 int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001018 if (DeadNZCVIdx != -1) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001019 if (CmpInstr.definesRegister(AArch64::WZR) ||
1020 CmpInstr.definesRegister(AArch64::XZR)) {
1021 CmpInstr.eraseFromParent();
Juergen Ributzka7a7c4682014-11-18 21:02:40 +00001022 return true;
1023 }
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001024 unsigned Opc = CmpInstr.getOpcode();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00001025 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
1026 if (NewOpc == Opc)
1027 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001028 const MCInstrDesc &MCID = get(NewOpc);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001029 CmpInstr.setDesc(MCID);
1030 CmpInstr.RemoveOperand(DeadNZCVIdx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001031 bool succeeded = UpdateOperandRegClass(CmpInstr);
1032 (void)succeeded;
1033 assert(succeeded && "Some operands reg class are incompatible!");
1034 return true;
1035 }
1036
1037 // Continue only if we have a "ri" where immediate is zero.
Jiangning Liudcc651f2014-08-08 14:19:29 +00001038 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
1039 // function.
1040 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
Tim Northover3b0846e2014-05-24 12:50:23 +00001041 if (CmpValue != 0 || SrcReg2 != 0)
1042 return false;
1043
1044 // CmpInstr is a Compare instruction if destination register is not used.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001045 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
Tim Northover3b0846e2014-05-24 12:50:23 +00001046 return false;
1047
Evgeny Astigeevichfd89fe02016-04-21 08:54:08 +00001048 return substituteCmpToZero(CmpInstr, SrcReg, MRI);
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001049}
Tim Northover3b0846e2014-05-24 12:50:23 +00001050
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001051/// Get opcode of S version of Instr.
1052/// If Instr is S version its opcode is returned.
1053/// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1054/// or we are not interested in it.
1055static unsigned sForm(MachineInstr &Instr) {
1056 switch (Instr.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001057 default:
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001058 return AArch64::INSTRUCTION_LIST_END;
1059
Tim Northover3b0846e2014-05-24 12:50:23 +00001060 case AArch64::ADDSWrr:
1061 case AArch64::ADDSWri:
1062 case AArch64::ADDSXrr:
1063 case AArch64::ADDSXri:
1064 case AArch64::SUBSWrr:
1065 case AArch64::SUBSWri:
1066 case AArch64::SUBSXrr:
1067 case AArch64::SUBSXri:
Eugene Zelenko049b0172017-01-06 00:30:53 +00001068 return Instr.getOpcode();
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001069
1070 case AArch64::ADDWrr: return AArch64::ADDSWrr;
1071 case AArch64::ADDWri: return AArch64::ADDSWri;
1072 case AArch64::ADDXrr: return AArch64::ADDSXrr;
1073 case AArch64::ADDXri: return AArch64::ADDSXri;
1074 case AArch64::ADCWr: return AArch64::ADCSWr;
1075 case AArch64::ADCXr: return AArch64::ADCSXr;
1076 case AArch64::SUBWrr: return AArch64::SUBSWrr;
1077 case AArch64::SUBWri: return AArch64::SUBSWri;
1078 case AArch64::SUBXrr: return AArch64::SUBSXrr;
1079 case AArch64::SUBXri: return AArch64::SUBSXri;
1080 case AArch64::SBCWr: return AArch64::SBCSWr;
1081 case AArch64::SBCXr: return AArch64::SBCSXr;
1082 case AArch64::ANDWri: return AArch64::ANDSWri;
1083 case AArch64::ANDXri: return AArch64::ANDSXri;
Tim Northover3b0846e2014-05-24 12:50:23 +00001084 }
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001085}
1086
1087/// Check if AArch64::NZCV should be alive in successors of MBB.
1088static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB) {
1089 for (auto *BB : MBB->successors())
1090 if (BB->isLiveIn(AArch64::NZCV))
1091 return true;
1092 return false;
1093}
1094
Benjamin Kramerb7d33112016-08-06 11:13:10 +00001095namespace {
Eugene Zelenko049b0172017-01-06 00:30:53 +00001096
Evgeny Astigeevichfd89fe02016-04-21 08:54:08 +00001097struct UsedNZCV {
Eugene Zelenko049b0172017-01-06 00:30:53 +00001098 bool N = false;
1099 bool Z = false;
1100 bool C = false;
1101 bool V = false;
1102
1103 UsedNZCV() = default;
1104
Evgeny Astigeevichfd89fe02016-04-21 08:54:08 +00001105 UsedNZCV& operator |=(const UsedNZCV& UsedFlags) {
1106 this->N |= UsedFlags.N;
1107 this->Z |= UsedFlags.Z;
1108 this->C |= UsedFlags.C;
1109 this->V |= UsedFlags.V;
1110 return *this;
1111 }
1112};
Eugene Zelenko049b0172017-01-06 00:30:53 +00001113
Benjamin Kramerb7d33112016-08-06 11:13:10 +00001114} // end anonymous namespace
Evgeny Astigeevichfd89fe02016-04-21 08:54:08 +00001115
1116/// Find a condition code used by the instruction.
1117/// Returns AArch64CC::Invalid if either the instruction does not use condition
1118/// codes or we don't optimize CmpInstr in the presence of such instructions.
1119static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
1120 switch (Instr.getOpcode()) {
1121 default:
1122 return AArch64CC::Invalid;
1123
1124 case AArch64::Bcc: {
1125 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1126 assert(Idx >= 2);
1127 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm());
1128 }
1129
1130 case AArch64::CSINVWr:
1131 case AArch64::CSINVXr:
1132 case AArch64::CSINCWr:
1133 case AArch64::CSINCXr:
1134 case AArch64::CSELWr:
1135 case AArch64::CSELXr:
1136 case AArch64::CSNEGWr:
1137 case AArch64::CSNEGXr:
1138 case AArch64::FCSELSrrr:
1139 case AArch64::FCSELDrrr: {
1140 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1141 assert(Idx >= 1);
1142 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm());
1143 }
1144 }
1145}
1146
1147static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
1148 assert(CC != AArch64CC::Invalid);
1149 UsedNZCV UsedFlags;
1150 switch (CC) {
1151 default:
1152 break;
1153
1154 case AArch64CC::EQ: // Z set
1155 case AArch64CC::NE: // Z clear
1156 UsedFlags.Z = true;
1157 break;
1158
1159 case AArch64CC::HI: // Z clear and C set
1160 case AArch64CC::LS: // Z set or C clear
1161 UsedFlags.Z = true;
1162 case AArch64CC::HS: // C set
1163 case AArch64CC::LO: // C clear
1164 UsedFlags.C = true;
1165 break;
1166
1167 case AArch64CC::MI: // N set
1168 case AArch64CC::PL: // N clear
1169 UsedFlags.N = true;
1170 break;
1171
1172 case AArch64CC::VS: // V set
1173 case AArch64CC::VC: // V clear
1174 UsedFlags.V = true;
1175 break;
1176
1177 case AArch64CC::GT: // Z clear, N and V the same
1178 case AArch64CC::LE: // Z set, N and V differ
1179 UsedFlags.Z = true;
1180 case AArch64CC::GE: // N and V the same
1181 case AArch64CC::LT: // N and V differ
1182 UsedFlags.N = true;
1183 UsedFlags.V = true;
1184 break;
1185 }
1186 return UsedFlags;
1187}
1188
1189static bool isADDSRegImm(unsigned Opcode) {
1190 return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1191}
1192
1193static bool isSUBSRegImm(unsigned Opcode) {
1194 return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1195}
1196
1197/// Check if CmpInstr can be substituted by MI.
1198///
1199/// CmpInstr can be substituted:
1200/// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1201/// - and, MI and CmpInstr are from the same MachineBB
1202/// - and, condition flags are not alive in successors of the CmpInstr parent
1203/// - and, if MI opcode is the S form there must be no defs of flags between
1204/// MI and CmpInstr
1205/// or if MI opcode is not the S form there must be neither defs of flags
1206/// nor uses of flags between MI and CmpInstr.
1207/// - and C/V flags are not used after CmpInstr
1208static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr,
1209 const TargetRegisterInfo *TRI) {
1210 assert(MI);
1211 assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
1212 assert(CmpInstr);
1213
1214 const unsigned CmpOpcode = CmpInstr->getOpcode();
1215 if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
1216 return false;
1217
1218 if (MI->getParent() != CmpInstr->getParent())
1219 return false;
1220
1221 if (areCFlagsAliveInSuccessors(CmpInstr->getParent()))
1222 return false;
1223
1224 AccessKind AccessToCheck = AK_Write;
1225 if (sForm(*MI) != MI->getOpcode())
1226 AccessToCheck = AK_All;
1227 if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck))
1228 return false;
1229
1230 UsedNZCV NZCVUsedAfterCmp;
1231 for (auto I = std::next(CmpInstr->getIterator()), E = CmpInstr->getParent()->instr_end();
1232 I != E; ++I) {
1233 const MachineInstr &Instr = *I;
1234 if (Instr.readsRegister(AArch64::NZCV, TRI)) {
1235 AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
1236 if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
1237 return false;
1238 NZCVUsedAfterCmp |= getUsedNZCV(CC);
1239 }
1240
1241 if (Instr.modifiesRegister(AArch64::NZCV, TRI))
1242 break;
1243 }
1244
1245 return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
1246}
1247
1248/// Substitute an instruction comparing to zero with another instruction
1249/// which produces needed condition flags.
1250///
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001251/// Return true on success.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001252bool AArch64InstrInfo::substituteCmpToZero(
1253 MachineInstr &CmpInstr, unsigned SrcReg,
1254 const MachineRegisterInfo *MRI) const {
Evgeny Astigeevichfd89fe02016-04-21 08:54:08 +00001255 assert(MRI);
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001256 // Get the unique definition of SrcReg.
1257 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
1258 if (!MI)
1259 return false;
1260
1261 const TargetRegisterInfo *TRI = &getRegisterInfo();
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001262
1263 unsigned NewOpc = sForm(*MI);
1264 if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1265 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001266
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001267 if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00001268 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001269
1270 // Update the instruction to set NZCV.
1271 MI->setDesc(get(NewOpc));
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001272 CmpInstr.eraseFromParent();
1273 bool succeeded = UpdateOperandRegClass(*MI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001274 (void)succeeded;
1275 assert(succeeded && "Some operands reg class are incompatible!");
1276 MI->addRegisterDefined(AArch64::NZCV, TRI);
1277 return true;
1278}
1279
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001280bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1281 if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001282 return false;
1283
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001284 MachineBasicBlock &MBB = *MI.getParent();
1285 DebugLoc DL = MI.getDebugLoc();
1286 unsigned Reg = MI.getOperand(0).getReg();
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001287 const GlobalValue *GV =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001288 cast<GlobalValue>((*MI.memoperands_begin())->getValue());
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001289 const TargetMachine &TM = MBB.getParent()->getTarget();
1290 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1291 const unsigned char MO_NC = AArch64II::MO_NC;
1292
1293 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1294 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1295 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1296 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001297 .addReg(Reg, RegState::Kill)
1298 .addImm(0)
1299 .addMemOperand(*MI.memoperands_begin());
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001300 } else if (TM.getCodeModel() == CodeModel::Large) {
1301 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1302 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1303 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1304 .addReg(Reg, RegState::Kill)
1305 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1306 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1307 .addReg(Reg, RegState::Kill)
1308 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1309 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1310 .addReg(Reg, RegState::Kill)
1311 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1312 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001313 .addReg(Reg, RegState::Kill)
1314 .addImm(0)
1315 .addMemOperand(*MI.memoperands_begin());
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001316 } else {
1317 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1318 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1319 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1320 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1321 .addReg(Reg, RegState::Kill)
1322 .addGlobalAddress(GV, 0, LoFlags)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001323 .addMemOperand(*MI.memoperands_begin());
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +00001324 }
1325
1326 MBB.erase(MI);
1327
1328 return true;
1329}
1330
Tim Northover3b0846e2014-05-24 12:50:23 +00001331/// Return true if this is this instruction has a non-zero immediate
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001332bool AArch64InstrInfo::hasShiftedReg(const MachineInstr &MI) const {
1333 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001334 default:
1335 break;
1336 case AArch64::ADDSWrs:
1337 case AArch64::ADDSXrs:
1338 case AArch64::ADDWrs:
1339 case AArch64::ADDXrs:
1340 case AArch64::ANDSWrs:
1341 case AArch64::ANDSXrs:
1342 case AArch64::ANDWrs:
1343 case AArch64::ANDXrs:
1344 case AArch64::BICSWrs:
1345 case AArch64::BICSXrs:
1346 case AArch64::BICWrs:
1347 case AArch64::BICXrs:
1348 case AArch64::CRC32Brr:
1349 case AArch64::CRC32CBrr:
1350 case AArch64::CRC32CHrr:
1351 case AArch64::CRC32CWrr:
1352 case AArch64::CRC32CXrr:
1353 case AArch64::CRC32Hrr:
1354 case AArch64::CRC32Wrr:
1355 case AArch64::CRC32Xrr:
1356 case AArch64::EONWrs:
1357 case AArch64::EONXrs:
1358 case AArch64::EORWrs:
1359 case AArch64::EORXrs:
1360 case AArch64::ORNWrs:
1361 case AArch64::ORNXrs:
1362 case AArch64::ORRWrs:
1363 case AArch64::ORRXrs:
1364 case AArch64::SUBSWrs:
1365 case AArch64::SUBSXrs:
1366 case AArch64::SUBWrs:
1367 case AArch64::SUBXrs:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001368 if (MI.getOperand(3).isImm()) {
1369 unsigned val = MI.getOperand(3).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001370 return (val != 0);
1371 }
1372 break;
1373 }
1374 return false;
1375}
1376
1377/// Return true if this is this instruction has a non-zero immediate
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001378bool AArch64InstrInfo::hasExtendedReg(const MachineInstr &MI) const {
1379 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001380 default:
1381 break;
1382 case AArch64::ADDSWrx:
1383 case AArch64::ADDSXrx:
1384 case AArch64::ADDSXrx64:
1385 case AArch64::ADDWrx:
1386 case AArch64::ADDXrx:
1387 case AArch64::ADDXrx64:
1388 case AArch64::SUBSWrx:
1389 case AArch64::SUBSXrx:
1390 case AArch64::SUBSXrx64:
1391 case AArch64::SUBWrx:
1392 case AArch64::SUBXrx:
1393 case AArch64::SUBXrx64:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001394 if (MI.getOperand(3).isImm()) {
1395 unsigned val = MI.getOperand(3).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001396 return (val != 0);
1397 }
1398 break;
1399 }
1400
1401 return false;
1402}
1403
1404// Return true if this instruction simply sets its single destination register
1405// to zero. This is equivalent to a register rename of the zero-register.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001406bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) const {
1407 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001408 default:
1409 break;
1410 case AArch64::MOVZWi:
1411 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001412 if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
1413 assert(MI.getDesc().getNumOperands() == 3 &&
1414 MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
Tim Northover3b0846e2014-05-24 12:50:23 +00001415 return true;
1416 }
1417 break;
1418 case AArch64::ANDWri: // and Rd, Rzr, #imm
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001419 return MI.getOperand(1).getReg() == AArch64::WZR;
Tim Northover3b0846e2014-05-24 12:50:23 +00001420 case AArch64::ANDXri:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001421 return MI.getOperand(1).getReg() == AArch64::XZR;
Tim Northover3b0846e2014-05-24 12:50:23 +00001422 case TargetOpcode::COPY:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001423 return MI.getOperand(1).getReg() == AArch64::WZR;
Tim Northover3b0846e2014-05-24 12:50:23 +00001424 }
1425 return false;
1426}
1427
1428// Return true if this instruction simply renames a general register without
1429// modifying bits.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001430bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) const {
1431 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001432 default:
1433 break;
1434 case TargetOpcode::COPY: {
1435 // GPR32 copies will by lowered to ORRXrs
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001436 unsigned DstReg = MI.getOperand(0).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00001437 return (AArch64::GPR32RegClass.contains(DstReg) ||
1438 AArch64::GPR64RegClass.contains(DstReg));
1439 }
1440 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001441 if (MI.getOperand(1).getReg() == AArch64::XZR) {
1442 assert(MI.getDesc().getNumOperands() == 4 &&
1443 MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
Tim Northover3b0846e2014-05-24 12:50:23 +00001444 return true;
1445 }
Renato Golin541d7e72014-08-01 17:27:31 +00001446 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001447 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001448 if (MI.getOperand(2).getImm() == 0) {
1449 assert(MI.getDesc().getNumOperands() == 4 &&
1450 MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
Tim Northover3b0846e2014-05-24 12:50:23 +00001451 return true;
1452 }
Renato Golin541d7e72014-08-01 17:27:31 +00001453 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001454 }
1455 return false;
1456}
1457
1458// Return true if this instruction simply renames a general register without
1459// modifying bits.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001460bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) const {
1461 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001462 default:
1463 break;
1464 case TargetOpcode::COPY: {
1465 // FPR64 copies will by lowered to ORR.16b
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001466 unsigned DstReg = MI.getOperand(0).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00001467 return (AArch64::FPR64RegClass.contains(DstReg) ||
1468 AArch64::FPR128RegClass.contains(DstReg));
1469 }
1470 case AArch64::ORRv16i8:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001471 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
1472 assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001473 "invalid ORRv16i8 operands");
1474 return true;
1475 }
Renato Golin541d7e72014-08-01 17:27:31 +00001476 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001477 }
1478 return false;
1479}
1480
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001481unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
Tim Northover3b0846e2014-05-24 12:50:23 +00001482 int &FrameIndex) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001483 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001484 default:
1485 break;
1486 case AArch64::LDRWui:
1487 case AArch64::LDRXui:
1488 case AArch64::LDRBui:
1489 case AArch64::LDRHui:
1490 case AArch64::LDRSui:
1491 case AArch64::LDRDui:
1492 case AArch64::LDRQui:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001493 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1494 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1495 FrameIndex = MI.getOperand(1).getIndex();
1496 return MI.getOperand(0).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00001497 }
1498 break;
1499 }
1500
1501 return 0;
1502}
1503
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001504unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
Tim Northover3b0846e2014-05-24 12:50:23 +00001505 int &FrameIndex) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001506 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001507 default:
1508 break;
1509 case AArch64::STRWui:
1510 case AArch64::STRXui:
1511 case AArch64::STRBui:
1512 case AArch64::STRHui:
1513 case AArch64::STRSui:
1514 case AArch64::STRDui:
1515 case AArch64::STRQui:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001516 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1517 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1518 FrameIndex = MI.getOperand(1).getIndex();
1519 return MI.getOperand(0).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00001520 }
1521 break;
1522 }
1523 return 0;
1524}
1525
1526/// Return true if this is load/store scales or extends its register offset.
1527/// This refers to scaling a dynamic index as opposed to scaled immediates.
1528/// MI should be a memory op that allows scaled addressing.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001529bool AArch64InstrInfo::isScaledAddr(const MachineInstr &MI) const {
1530 switch (MI.getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001531 default:
1532 break;
1533 case AArch64::LDRBBroW:
1534 case AArch64::LDRBroW:
1535 case AArch64::LDRDroW:
1536 case AArch64::LDRHHroW:
1537 case AArch64::LDRHroW:
1538 case AArch64::LDRQroW:
1539 case AArch64::LDRSBWroW:
1540 case AArch64::LDRSBXroW:
1541 case AArch64::LDRSHWroW:
1542 case AArch64::LDRSHXroW:
1543 case AArch64::LDRSWroW:
1544 case AArch64::LDRSroW:
1545 case AArch64::LDRWroW:
1546 case AArch64::LDRXroW:
1547 case AArch64::STRBBroW:
1548 case AArch64::STRBroW:
1549 case AArch64::STRDroW:
1550 case AArch64::STRHHroW:
1551 case AArch64::STRHroW:
1552 case AArch64::STRQroW:
1553 case AArch64::STRSroW:
1554 case AArch64::STRWroW:
1555 case AArch64::STRXroW:
1556 case AArch64::LDRBBroX:
1557 case AArch64::LDRBroX:
1558 case AArch64::LDRDroX:
1559 case AArch64::LDRHHroX:
1560 case AArch64::LDRHroX:
1561 case AArch64::LDRQroX:
1562 case AArch64::LDRSBWroX:
1563 case AArch64::LDRSBXroX:
1564 case AArch64::LDRSHWroX:
1565 case AArch64::LDRSHXroX:
1566 case AArch64::LDRSWroX:
1567 case AArch64::LDRSroX:
1568 case AArch64::LDRWroX:
1569 case AArch64::LDRXroX:
1570 case AArch64::STRBBroX:
1571 case AArch64::STRBroX:
1572 case AArch64::STRDroX:
1573 case AArch64::STRHHroX:
1574 case AArch64::STRHroX:
1575 case AArch64::STRQroX:
1576 case AArch64::STRSroX:
1577 case AArch64::STRWroX:
1578 case AArch64::STRXroX:
1579
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001580 unsigned Val = MI.getOperand(3).getImm();
Tim Northover3b0846e2014-05-24 12:50:23 +00001581 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1582 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1583 }
1584 return false;
1585}
1586
1587/// Check all MachineMemOperands for a hint to suppress pairing.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001588bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) const {
Eugene Zelenko049b0172017-01-06 00:30:53 +00001589 return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
Justin Lebar288b3372016-07-14 18:15:20 +00001590 return MMO->getFlags() & MOSuppressPair;
1591 });
Tim Northover3b0846e2014-05-24 12:50:23 +00001592}
1593
1594/// Set a flag on the first MachineMemOperand to suppress pairing.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001595void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) const {
1596 if (MI.memoperands_empty())
Tim Northover3b0846e2014-05-24 12:50:23 +00001597 return;
Justin Lebar288b3372016-07-14 18:15:20 +00001598 (*MI.memoperands_begin())->setFlags(MOSuppressPair);
Tim Northover3b0846e2014-05-24 12:50:23 +00001599}
1600
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001601bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const {
1602 switch (Opc) {
1603 default:
1604 return false;
1605 case AArch64::STURSi:
1606 case AArch64::STURDi:
1607 case AArch64::STURQi:
1608 case AArch64::STURBBi:
1609 case AArch64::STURHHi:
1610 case AArch64::STURWi:
1611 case AArch64::STURXi:
1612 case AArch64::LDURSi:
1613 case AArch64::LDURDi:
1614 case AArch64::LDURQi:
1615 case AArch64::LDURWi:
1616 case AArch64::LDURXi:
1617 case AArch64::LDURSWi:
1618 case AArch64::LDURHHi:
1619 case AArch64::LDURBBi:
1620 case AArch64::LDURSBWi:
1621 case AArch64::LDURSHWi:
1622 return true;
1623 }
1624}
1625
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001626bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr &MI) const {
1627 return isUnscaledLdSt(MI.getOpcode());
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001628}
1629
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001630// Is this a candidate for ld/st merging or pairing? For example, we don't
1631// touch volatiles or load/stores that have a hint to avoid pair formation.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001632bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001633 // If this is a volatile load/store, don't mess with it.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001634 if (MI.hasOrderedMemoryRef())
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001635 return false;
1636
1637 // Make sure this is a reg+imm (as opposed to an address reloc).
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001638 assert(MI.getOperand(1).isReg() && "Expected a reg operand.");
1639 if (!MI.getOperand(2).isImm())
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001640 return false;
1641
1642 // Can't merge/pair if the instruction modifies the base register.
1643 // e.g., ldr x0, [x0]
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001644 unsigned BaseReg = MI.getOperand(1).getReg();
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001645 const TargetRegisterInfo *TRI = &getRegisterInfo();
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001646 if (MI.modifiesRegister(BaseReg, TRI))
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001647 return false;
1648
1649 // Check if this load/store has a hint to avoid pair formation.
1650 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1651 if (isLdStPairSuppressed(MI))
1652 return false;
1653
Matthias Braun651cff42016-06-02 18:03:53 +00001654 // On some CPUs quad load/store pairs are slower than two single load/stores.
1655 if (Subtarget.avoidQuadLdStPairs()) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001656 switch (MI.getOpcode()) {
Matthias Braunbcfd2362016-05-28 01:06:51 +00001657 default:
1658 break;
Matthias Braunbcfd2362016-05-28 01:06:51 +00001659 case AArch64::LDURQi:
1660 case AArch64::STURQi:
1661 case AArch64::LDRQui:
1662 case AArch64::STRQui:
1663 return false;
Evandro Menezes8d53f882016-04-13 18:31:45 +00001664 }
Matthias Braunbcfd2362016-05-28 01:06:51 +00001665 }
Evandro Menezes8d53f882016-04-13 18:31:45 +00001666
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001667 return true;
1668}
1669
Chad Rosierc27a18f2016-03-09 16:00:35 +00001670bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001671 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
Chad Rosierc27a18f2016-03-09 16:00:35 +00001672 const TargetRegisterInfo *TRI) const {
Geoff Berry22dfbc52016-08-12 15:26:00 +00001673 unsigned Width;
1674 return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001675}
1676
Sanjoy Dasb666ea32015-06-15 18:44:14 +00001677bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001678 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
Chad Rosier3528c1e2014-09-08 14:43:48 +00001679 const TargetRegisterInfo *TRI) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001680 assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
Chad Rosier3528c1e2014-09-08 14:43:48 +00001681 // Handle only loads/stores with base register followed by immediate offset.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001682 if (LdSt.getNumExplicitOperands() == 3) {
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001683 // Non-paired instruction (e.g., ldr x1, [x0, #8]).
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001684 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001685 return false;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001686 } else if (LdSt.getNumExplicitOperands() == 4) {
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001687 // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001688 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isReg() ||
1689 !LdSt.getOperand(3).isImm())
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001690 return false;
1691 } else
Chad Rosier3528c1e2014-09-08 14:43:48 +00001692 return false;
1693
1694 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1695 // Unscaled instructions have scaling factor set to 1.
Chad Rosier0da267d2016-03-09 16:46:48 +00001696 unsigned Scale = 0;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001697 switch (LdSt.getOpcode()) {
Chad Rosier3528c1e2014-09-08 14:43:48 +00001698 default:
1699 return false;
1700 case AArch64::LDURQi:
1701 case AArch64::STURQi:
1702 Width = 16;
1703 Scale = 1;
1704 break;
1705 case AArch64::LDURXi:
1706 case AArch64::LDURDi:
1707 case AArch64::STURXi:
1708 case AArch64::STURDi:
1709 Width = 8;
1710 Scale = 1;
1711 break;
1712 case AArch64::LDURWi:
1713 case AArch64::LDURSi:
1714 case AArch64::LDURSWi:
1715 case AArch64::STURWi:
1716 case AArch64::STURSi:
1717 Width = 4;
1718 Scale = 1;
1719 break;
1720 case AArch64::LDURHi:
1721 case AArch64::LDURHHi:
1722 case AArch64::LDURSHXi:
1723 case AArch64::LDURSHWi:
1724 case AArch64::STURHi:
1725 case AArch64::STURHHi:
1726 Width = 2;
1727 Scale = 1;
1728 break;
1729 case AArch64::LDURBi:
1730 case AArch64::LDURBBi:
1731 case AArch64::LDURSBXi:
1732 case AArch64::LDURSBWi:
1733 case AArch64::STURBi:
1734 case AArch64::STURBBi:
1735 Width = 1;
1736 Scale = 1;
1737 break;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001738 case AArch64::LDPQi:
1739 case AArch64::LDNPQi:
1740 case AArch64::STPQi:
1741 case AArch64::STNPQi:
1742 Scale = 16;
1743 Width = 32;
1744 break;
Chad Rosierd90e2eb2015-09-18 14:15:19 +00001745 case AArch64::LDRQui:
1746 case AArch64::STRQui:
1747 Scale = Width = 16;
1748 break;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001749 case AArch64::LDPXi:
1750 case AArch64::LDPDi:
1751 case AArch64::LDNPXi:
1752 case AArch64::LDNPDi:
1753 case AArch64::STPXi:
1754 case AArch64::STPDi:
1755 case AArch64::STNPXi:
1756 case AArch64::STNPDi:
1757 Scale = 8;
1758 Width = 16;
1759 break;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001760 case AArch64::LDRXui:
Chad Rosier84a0afd2015-09-18 14:13:18 +00001761 case AArch64::LDRDui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001762 case AArch64::STRXui:
Chad Rosier84a0afd2015-09-18 14:13:18 +00001763 case AArch64::STRDui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001764 Scale = Width = 8;
1765 break;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001766 case AArch64::LDPWi:
1767 case AArch64::LDPSi:
1768 case AArch64::LDNPWi:
1769 case AArch64::LDNPSi:
1770 case AArch64::STPWi:
1771 case AArch64::STPSi:
1772 case AArch64::STNPWi:
1773 case AArch64::STNPSi:
1774 Scale = 4;
1775 Width = 8;
1776 break;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001777 case AArch64::LDRWui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001778 case AArch64::LDRSui:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001779 case AArch64::LDRSWui:
Chad Rosier84a0afd2015-09-18 14:13:18 +00001780 case AArch64::STRWui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001781 case AArch64::STRSui:
1782 Scale = Width = 4;
1783 break;
Chad Rosier84a0afd2015-09-18 14:13:18 +00001784 case AArch64::LDRHui:
1785 case AArch64::LDRHHui:
1786 case AArch64::STRHui:
1787 case AArch64::STRHHui:
1788 Scale = Width = 2;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001789 break;
Chad Rosierd90e2eb2015-09-18 14:15:19 +00001790 case AArch64::LDRBui:
1791 case AArch64::LDRBBui:
1792 case AArch64::STRBui:
1793 case AArch64::STRBBui:
1794 Scale = Width = 1;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001795 break;
Chad Rosier064261d2016-02-01 20:54:36 +00001796 }
Chad Rosier3528c1e2014-09-08 14:43:48 +00001797
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001798 if (LdSt.getNumExplicitOperands() == 3) {
1799 BaseReg = LdSt.getOperand(1).getReg();
1800 Offset = LdSt.getOperand(2).getImm() * Scale;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001801 } else {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001802 assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
1803 BaseReg = LdSt.getOperand(2).getReg();
1804 Offset = LdSt.getOperand(3).getImm() * Scale;
Chad Rosier1fbe9bc2016-04-15 18:09:10 +00001805 }
Chad Rosier3528c1e2014-09-08 14:43:48 +00001806 return true;
1807}
1808
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001809// Scale the unscaled offsets. Returns false if the unscaled offset can't be
1810// scaled.
1811static bool scaleOffset(unsigned Opc, int64_t &Offset) {
1812 unsigned OffsetStride = 1;
1813 switch (Opc) {
1814 default:
1815 return false;
1816 case AArch64::LDURQi:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001817 case AArch64::STURQi:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001818 OffsetStride = 16;
1819 break;
1820 case AArch64::LDURXi:
1821 case AArch64::LDURDi:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001822 case AArch64::STURXi:
1823 case AArch64::STURDi:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001824 OffsetStride = 8;
1825 break;
1826 case AArch64::LDURWi:
1827 case AArch64::LDURSi:
1828 case AArch64::LDURSWi:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001829 case AArch64::STURWi:
1830 case AArch64::STURSi:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001831 OffsetStride = 4;
1832 break;
1833 }
1834 // If the byte-offset isn't a multiple of the stride, we can't scale this
1835 // offset.
1836 if (Offset % OffsetStride != 0)
1837 return false;
1838
1839 // Convert the byte-offset used by unscaled into an "element" offset used
1840 // by the scaled pair load/store instructions.
1841 Offset /= OffsetStride;
1842 return true;
1843}
1844
1845static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
1846 if (FirstOpc == SecondOpc)
1847 return true;
1848 // We can also pair sign-ext and zero-ext instructions.
1849 switch (FirstOpc) {
1850 default:
1851 return false;
1852 case AArch64::LDRWui:
1853 case AArch64::LDURWi:
1854 return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
1855 case AArch64::LDRSWui:
1856 case AArch64::LDURSWi:
1857 return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
1858 }
1859 // These instructions can't be paired based on their opcodes.
1860 return false;
1861}
1862
Tim Northover3b0846e2014-05-24 12:50:23 +00001863/// Detect opportunities for ldp/stp formation.
1864///
Sanjoy Dasb666ea32015-06-15 18:44:14 +00001865/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001866bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
1867 MachineInstr &SecondLdSt,
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001868 unsigned NumLoads) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00001869 // Only cluster up to a single pair.
1870 if (NumLoads > 1)
1871 return false;
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001872
Geoff Berry22dfbc52016-08-12 15:26:00 +00001873 if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
1874 return false;
1875
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001876 // Can we pair these instructions based on their opcodes?
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001877 unsigned FirstOpc = FirstLdSt.getOpcode();
1878 unsigned SecondOpc = SecondLdSt.getOpcode();
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001879 if (!canPairLdStOpc(FirstOpc, SecondOpc))
Tim Northover3b0846e2014-05-24 12:50:23 +00001880 return false;
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001881
1882 // Can't merge volatiles or load/stores that have a hint to avoid pair
1883 // formation, for example.
1884 if (!isCandidateToMergeOrPair(FirstLdSt) ||
1885 !isCandidateToMergeOrPair(SecondLdSt))
Tim Northover3b0846e2014-05-24 12:50:23 +00001886 return false;
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001887
1888 // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001889 int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001890 if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
1891 return false;
1892
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001893 int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001894 if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
1895 return false;
1896
1897 // Pairwise instructions have a 7-bit signed offset field.
1898 if (Offset1 > 63 || Offset1 < -64)
1899 return false;
1900
Tim Northover3b0846e2014-05-24 12:50:23 +00001901 // The caller should already have ordered First/SecondLdSt by offset.
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001902 assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
1903 return Offset1 + 1 == Offset2;
Tim Northover3b0846e2014-05-24 12:50:23 +00001904}
1905
Matthias Braun115efcd2016-11-28 20:11:54 +00001906bool AArch64InstrInfo::shouldScheduleAdjacent(
1907 const MachineInstr &First, const MachineInstr &Second) const {
Matthias Braun46a52382016-10-04 19:28:21 +00001908 if (Subtarget.hasArithmeticBccFusion()) {
Matthias Braun651cff42016-06-02 18:03:53 +00001909 // Fuse CMN, CMP, TST followed by Bcc.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001910 unsigned SecondOpcode = Second.getOpcode();
Matthias Braunc8b67e62015-07-20 23:11:42 +00001911 if (SecondOpcode == AArch64::Bcc) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001912 switch (First.getOpcode()) {
Matthias Braunc8b67e62015-07-20 23:11:42 +00001913 default:
1914 return false;
Matthias Braunc8b67e62015-07-20 23:11:42 +00001915 case AArch64::ADDSWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001916 case AArch64::ADDSWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001917 case AArch64::ADDSXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001918 case AArch64::ADDSXrr:
1919 case AArch64::ANDSWri:
1920 case AArch64::ANDSWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001921 case AArch64::ANDSXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001922 case AArch64::ANDSXrr:
1923 case AArch64::SUBSWri:
1924 case AArch64::SUBSWrr:
1925 case AArch64::SUBSXri:
1926 case AArch64::SUBSXrr:
1927 case AArch64::BICSWrr:
1928 case AArch64::BICSXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001929 return true;
Matthias Braun46a52382016-10-04 19:28:21 +00001930 case AArch64::ADDSWrs:
1931 case AArch64::ADDSXrs:
1932 case AArch64::ANDSWrs:
1933 case AArch64::ANDSXrs:
1934 case AArch64::SUBSWrs:
1935 case AArch64::SUBSXrs:
1936 case AArch64::BICSWrs:
1937 case AArch64::BICSXrs:
1938 // Shift value can be 0 making these behave like the "rr" variant...
1939 return !hasShiftedReg(Second);
Matthias Braunc8b67e62015-07-20 23:11:42 +00001940 }
Matthias Braune536f4f2015-07-20 22:34:47 +00001941 }
Matthias Braun46a52382016-10-04 19:28:21 +00001942 }
1943 if (Subtarget.hasArithmeticCbzFusion()) {
Matthias Braun651cff42016-06-02 18:03:53 +00001944 // Fuse ALU operations followed by CBZ/CBNZ.
Matthias Braun46a52382016-10-04 19:28:21 +00001945 unsigned SecondOpcode = Second.getOpcode();
Matthias Braunc8b67e62015-07-20 23:11:42 +00001946 if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
1947 SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001948 switch (First.getOpcode()) {
Matthias Braunc8b67e62015-07-20 23:11:42 +00001949 default:
1950 return false;
1951 case AArch64::ADDWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001952 case AArch64::ADDWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001953 case AArch64::ADDXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001954 case AArch64::ADDXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001955 case AArch64::ANDWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001956 case AArch64::ANDWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001957 case AArch64::ANDXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001958 case AArch64::ANDXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001959 case AArch64::EORWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001960 case AArch64::EORWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001961 case AArch64::EORXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001962 case AArch64::EORXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001963 case AArch64::ORRWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001964 case AArch64::ORRWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001965 case AArch64::ORRXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001966 case AArch64::ORRXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001967 case AArch64::SUBWri:
Matthias Braun46a52382016-10-04 19:28:21 +00001968 case AArch64::SUBWrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001969 case AArch64::SUBXri:
Matthias Braun46a52382016-10-04 19:28:21 +00001970 case AArch64::SUBXrr:
Matthias Braunc8b67e62015-07-20 23:11:42 +00001971 return true;
Matthias Braun46a52382016-10-04 19:28:21 +00001972 case AArch64::ADDWrs:
1973 case AArch64::ADDXrs:
1974 case AArch64::ANDWrs:
1975 case AArch64::ANDXrs:
1976 case AArch64::SUBWrs:
1977 case AArch64::SUBXrs:
1978 case AArch64::BICWrs:
1979 case AArch64::BICXrs:
1980 // Shift value can be 0 making these behave like the "rr" variant...
1981 return !hasShiftedReg(Second);
Matthias Braunc8b67e62015-07-20 23:11:42 +00001982 }
Matthias Braune536f4f2015-07-20 22:34:47 +00001983 }
1984 }
1985 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001986}
1987
Adrian Prantl87b7eb92014-10-01 18:55:02 +00001988MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1989 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001990 const MDNode *Expr, const DebugLoc &DL) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00001991 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1992 .addFrameIndex(FrameIx)
1993 .addImm(0)
1994 .addImm(Offset)
Adrian Prantl87b7eb92014-10-01 18:55:02 +00001995 .addMetadata(Var)
1996 .addMetadata(Expr);
Tim Northover3b0846e2014-05-24 12:50:23 +00001997 return &*MIB;
1998}
1999
2000static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
2001 unsigned Reg, unsigned SubIdx,
2002 unsigned State,
2003 const TargetRegisterInfo *TRI) {
2004 if (!SubIdx)
2005 return MIB.addReg(Reg, State);
2006
2007 if (TargetRegisterInfo::isPhysicalRegister(Reg))
2008 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
2009 return MIB.addReg(Reg, State, SubIdx);
2010}
2011
2012static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
2013 unsigned NumRegs) {
2014 // We really want the positive remainder mod 32 here, that happens to be
2015 // easily obtainable with a mask.
2016 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
2017}
2018
2019void AArch64InstrInfo::copyPhysRegTuple(
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002020 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL,
Tim Northover3b0846e2014-05-24 12:50:23 +00002021 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
Eugene Zelenko049b0172017-01-06 00:30:53 +00002022 ArrayRef<unsigned> Indices) const {
Eric Christopher58f32662014-06-10 22:57:21 +00002023 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002024 "Unexpected register copy without NEON");
Eric Christophera0de2532015-03-18 20:37:30 +00002025 const TargetRegisterInfo *TRI = &getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00002026 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2027 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2028 unsigned NumRegs = Indices.size();
2029
2030 int SubReg = 0, End = NumRegs, Incr = 1;
2031 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
2032 SubReg = NumRegs - 1;
2033 End = -1;
2034 Incr = -1;
2035 }
2036
2037 for (; SubReg != End; SubReg += Incr) {
James Molloyf8aa57a2015-04-16 11:37:40 +00002038 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
Tim Northover3b0846e2014-05-24 12:50:23 +00002039 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
2040 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
2041 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
2042 }
2043}
2044
2045void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002046 MachineBasicBlock::iterator I,
2047 const DebugLoc &DL, unsigned DestReg,
2048 unsigned SrcReg, bool KillSrc) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00002049 if (AArch64::GPR32spRegClass.contains(DestReg) &&
2050 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
Eric Christophera0de2532015-03-18 20:37:30 +00002051 const TargetRegisterInfo *TRI = &getRegisterInfo();
2052
Tim Northover3b0846e2014-05-24 12:50:23 +00002053 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
2054 // If either operand is WSP, expand to ADD #0.
2055 if (Subtarget.hasZeroCycleRegMove()) {
2056 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
2057 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2058 &AArch64::GPR64spRegClass);
2059 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2060 &AArch64::GPR64spRegClass);
2061 // This instruction is reading and writing X registers. This may upset
2062 // the register scavenger and machine verifier, so we need to indicate
2063 // that we are reading an undefined value from SrcRegX, but a proper
2064 // value from SrcReg.
2065 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
2066 .addReg(SrcRegX, RegState::Undef)
2067 .addImm(0)
2068 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2069 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2070 } else {
2071 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
2072 .addReg(SrcReg, getKillRegState(KillSrc))
2073 .addImm(0)
2074 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2075 }
2076 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
2077 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
2078 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2079 } else {
2080 if (Subtarget.hasZeroCycleRegMove()) {
2081 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
2082 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2083 &AArch64::GPR64spRegClass);
2084 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2085 &AArch64::GPR64spRegClass);
2086 // This instruction is reading and writing X registers. This may upset
2087 // the register scavenger and machine verifier, so we need to indicate
2088 // that we are reading an undefined value from SrcRegX, but a proper
2089 // value from SrcReg.
2090 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
2091 .addReg(AArch64::XZR)
2092 .addReg(SrcRegX, RegState::Undef)
2093 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2094 } else {
2095 // Otherwise, expand to ORR WZR.
2096 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
2097 .addReg(AArch64::WZR)
2098 .addReg(SrcReg, getKillRegState(KillSrc));
2099 }
2100 }
2101 return;
2102 }
2103
2104 if (AArch64::GPR64spRegClass.contains(DestReg) &&
2105 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
2106 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
2107 // If either operand is SP, expand to ADD #0.
2108 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
2109 .addReg(SrcReg, getKillRegState(KillSrc))
2110 .addImm(0)
2111 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2112 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
2113 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
2114 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2115 } else {
2116 // Otherwise, expand to ORR XZR.
2117 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
2118 .addReg(AArch64::XZR)
2119 .addReg(SrcReg, getKillRegState(KillSrc));
2120 }
2121 return;
2122 }
2123
2124 // Copy a DDDD register quad by copying the individual sub-registers.
2125 if (AArch64::DDDDRegClass.contains(DestReg) &&
2126 AArch64::DDDDRegClass.contains(SrcReg)) {
2127 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2128 AArch64::dsub2, AArch64::dsub3 };
2129 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2130 Indices);
2131 return;
2132 }
2133
2134 // Copy a DDD register triple by copying the individual sub-registers.
2135 if (AArch64::DDDRegClass.contains(DestReg) &&
2136 AArch64::DDDRegClass.contains(SrcReg)) {
2137 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2138 AArch64::dsub2 };
2139 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2140 Indices);
2141 return;
2142 }
2143
2144 // Copy a DD register pair by copying the individual sub-registers.
2145 if (AArch64::DDRegClass.contains(DestReg) &&
2146 AArch64::DDRegClass.contains(SrcReg)) {
2147 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
2148 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2149 Indices);
2150 return;
2151 }
2152
2153 // Copy a QQQQ register quad by copying the individual sub-registers.
2154 if (AArch64::QQQQRegClass.contains(DestReg) &&
2155 AArch64::QQQQRegClass.contains(SrcReg)) {
2156 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2157 AArch64::qsub2, AArch64::qsub3 };
2158 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2159 Indices);
2160 return;
2161 }
2162
2163 // Copy a QQQ register triple by copying the individual sub-registers.
2164 if (AArch64::QQQRegClass.contains(DestReg) &&
2165 AArch64::QQQRegClass.contains(SrcReg)) {
2166 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2167 AArch64::qsub2 };
2168 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2169 Indices);
2170 return;
2171 }
2172
2173 // Copy a QQ register pair by copying the individual sub-registers.
2174 if (AArch64::QQRegClass.contains(DestReg) &&
2175 AArch64::QQRegClass.contains(SrcReg)) {
2176 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
2177 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2178 Indices);
2179 return;
2180 }
2181
2182 if (AArch64::FPR128RegClass.contains(DestReg) &&
2183 AArch64::FPR128RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002184 if(Subtarget.hasNEON()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002185 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2186 .addReg(SrcReg)
2187 .addReg(SrcReg, getKillRegState(KillSrc));
2188 } else {
2189 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
2190 .addReg(AArch64::SP, RegState::Define)
2191 .addReg(SrcReg, getKillRegState(KillSrc))
2192 .addReg(AArch64::SP)
2193 .addImm(-16);
2194 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
2195 .addReg(AArch64::SP, RegState::Define)
2196 .addReg(DestReg, RegState::Define)
2197 .addReg(AArch64::SP)
2198 .addImm(16);
2199 }
2200 return;
2201 }
2202
2203 if (AArch64::FPR64RegClass.contains(DestReg) &&
2204 AArch64::FPR64RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002205 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00002206 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
2207 &AArch64::FPR128RegClass);
2208 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
2209 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002210 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2211 .addReg(SrcReg)
2212 .addReg(SrcReg, getKillRegState(KillSrc));
2213 } else {
2214 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
2215 .addReg(SrcReg, getKillRegState(KillSrc));
2216 }
2217 return;
2218 }
2219
2220 if (AArch64::FPR32RegClass.contains(DestReg) &&
2221 AArch64::FPR32RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002222 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00002223 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
2224 &AArch64::FPR128RegClass);
2225 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
2226 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002227 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2228 .addReg(SrcReg)
2229 .addReg(SrcReg, getKillRegState(KillSrc));
2230 } else {
2231 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2232 .addReg(SrcReg, getKillRegState(KillSrc));
2233 }
2234 return;
2235 }
2236
2237 if (AArch64::FPR16RegClass.contains(DestReg) &&
2238 AArch64::FPR16RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002239 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00002240 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2241 &AArch64::FPR128RegClass);
2242 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2243 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002244 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2245 .addReg(SrcReg)
2246 .addReg(SrcReg, getKillRegState(KillSrc));
2247 } else {
Eric Christophera0de2532015-03-18 20:37:30 +00002248 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2249 &AArch64::FPR32RegClass);
2250 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2251 &AArch64::FPR32RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002252 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2253 .addReg(SrcReg, getKillRegState(KillSrc));
2254 }
2255 return;
2256 }
2257
2258 if (AArch64::FPR8RegClass.contains(DestReg) &&
2259 AArch64::FPR8RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002260 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00002261 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
Tim Northover3b0846e2014-05-24 12:50:23 +00002262 &AArch64::FPR128RegClass);
Eric Christophera0de2532015-03-18 20:37:30 +00002263 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2264 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002265 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2266 .addReg(SrcReg)
2267 .addReg(SrcReg, getKillRegState(KillSrc));
2268 } else {
Eric Christophera0de2532015-03-18 20:37:30 +00002269 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2270 &AArch64::FPR32RegClass);
2271 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2272 &AArch64::FPR32RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00002273 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2274 .addReg(SrcReg, getKillRegState(KillSrc));
2275 }
2276 return;
2277 }
2278
2279 // Copies between GPR64 and FPR64.
2280 if (AArch64::FPR64RegClass.contains(DestReg) &&
2281 AArch64::GPR64RegClass.contains(SrcReg)) {
2282 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
2283 .addReg(SrcReg, getKillRegState(KillSrc));
2284 return;
2285 }
2286 if (AArch64::GPR64RegClass.contains(DestReg) &&
2287 AArch64::FPR64RegClass.contains(SrcReg)) {
2288 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
2289 .addReg(SrcReg, getKillRegState(KillSrc));
2290 return;
2291 }
2292 // Copies between GPR32 and FPR32.
2293 if (AArch64::FPR32RegClass.contains(DestReg) &&
2294 AArch64::GPR32RegClass.contains(SrcReg)) {
2295 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
2296 .addReg(SrcReg, getKillRegState(KillSrc));
2297 return;
2298 }
2299 if (AArch64::GPR32RegClass.contains(DestReg) &&
2300 AArch64::FPR32RegClass.contains(SrcReg)) {
2301 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
2302 .addReg(SrcReg, getKillRegState(KillSrc));
2303 return;
2304 }
2305
Tim Northover1bed9af2014-05-27 12:16:02 +00002306 if (DestReg == AArch64::NZCV) {
2307 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
2308 BuildMI(MBB, I, DL, get(AArch64::MSR))
2309 .addImm(AArch64SysReg::NZCV)
2310 .addReg(SrcReg, getKillRegState(KillSrc))
2311 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
2312 return;
2313 }
2314
2315 if (SrcReg == AArch64::NZCV) {
2316 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
Quentin Colombet658d9db2016-04-22 18:46:17 +00002317 BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
Tim Northover1bed9af2014-05-27 12:16:02 +00002318 .addImm(AArch64SysReg::NZCV)
2319 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
2320 return;
2321 }
2322
2323 llvm_unreachable("unimplemented reg-to-reg copy");
Tim Northover3b0846e2014-05-24 12:50:23 +00002324}
2325
2326void AArch64InstrInfo::storeRegToStackSlot(
2327 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
2328 bool isKill, int FI, const TargetRegisterClass *RC,
2329 const TargetRegisterInfo *TRI) const {
2330 DebugLoc DL;
2331 if (MBBI != MBB.end())
2332 DL = MBBI->getDebugLoc();
2333 MachineFunction &MF = *MBB.getParent();
Matthias Braun941a7052016-07-28 18:40:00 +00002334 MachineFrameInfo &MFI = MF.getFrameInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00002335 unsigned Align = MFI.getObjectAlignment(FI);
2336
Alex Lorenze40c8a22015-08-11 23:09:45 +00002337 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
Tim Northover3b0846e2014-05-24 12:50:23 +00002338 MachineMemOperand *MMO = MF.getMachineMemOperand(
2339 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
2340 unsigned Opc = 0;
2341 bool Offset = true;
2342 switch (RC->getSize()) {
2343 case 1:
2344 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2345 Opc = AArch64::STRBui;
2346 break;
2347 case 2:
2348 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2349 Opc = AArch64::STRHui;
2350 break;
2351 case 4:
2352 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2353 Opc = AArch64::STRWui;
2354 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2355 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
2356 else
2357 assert(SrcReg != AArch64::WSP);
2358 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2359 Opc = AArch64::STRSui;
2360 break;
2361 case 8:
2362 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2363 Opc = AArch64::STRXui;
2364 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2365 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2366 else
2367 assert(SrcReg != AArch64::SP);
2368 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2369 Opc = AArch64::STRDui;
2370 break;
2371 case 16:
2372 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2373 Opc = AArch64::STRQui;
2374 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002375 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002376 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002377 Opc = AArch64::ST1Twov1d;
2378 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002379 }
2380 break;
2381 case 24:
2382 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002383 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002384 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002385 Opc = AArch64::ST1Threev1d;
2386 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002387 }
2388 break;
2389 case 32:
2390 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002391 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002392 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002393 Opc = AArch64::ST1Fourv1d;
2394 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002395 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002396 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002397 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002398 Opc = AArch64::ST1Twov2d;
2399 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002400 }
2401 break;
2402 case 48:
2403 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002404 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002405 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002406 Opc = AArch64::ST1Threev2d;
2407 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002408 }
2409 break;
2410 case 64:
2411 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002412 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002413 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002414 Opc = AArch64::ST1Fourv2d;
2415 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002416 }
2417 break;
2418 }
2419 assert(Opc && "Unknown register class");
2420
James Molloyf8aa57a2015-04-16 11:37:40 +00002421 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
Tim Northover3b0846e2014-05-24 12:50:23 +00002422 .addReg(SrcReg, getKillRegState(isKill))
2423 .addFrameIndex(FI);
2424
2425 if (Offset)
2426 MI.addImm(0);
2427 MI.addMemOperand(MMO);
2428}
2429
2430void AArch64InstrInfo::loadRegFromStackSlot(
2431 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
2432 int FI, const TargetRegisterClass *RC,
2433 const TargetRegisterInfo *TRI) const {
2434 DebugLoc DL;
2435 if (MBBI != MBB.end())
2436 DL = MBBI->getDebugLoc();
2437 MachineFunction &MF = *MBB.getParent();
Matthias Braun941a7052016-07-28 18:40:00 +00002438 MachineFrameInfo &MFI = MF.getFrameInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00002439 unsigned Align = MFI.getObjectAlignment(FI);
Alex Lorenze40c8a22015-08-11 23:09:45 +00002440 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
Tim Northover3b0846e2014-05-24 12:50:23 +00002441 MachineMemOperand *MMO = MF.getMachineMemOperand(
2442 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
2443
2444 unsigned Opc = 0;
2445 bool Offset = true;
2446 switch (RC->getSize()) {
2447 case 1:
2448 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2449 Opc = AArch64::LDRBui;
2450 break;
2451 case 2:
2452 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2453 Opc = AArch64::LDRHui;
2454 break;
2455 case 4:
2456 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2457 Opc = AArch64::LDRWui;
2458 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2459 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
2460 else
2461 assert(DestReg != AArch64::WSP);
2462 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2463 Opc = AArch64::LDRSui;
2464 break;
2465 case 8:
2466 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2467 Opc = AArch64::LDRXui;
2468 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2469 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
2470 else
2471 assert(DestReg != AArch64::SP);
2472 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2473 Opc = AArch64::LDRDui;
2474 break;
2475 case 16:
2476 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2477 Opc = AArch64::LDRQui;
2478 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002479 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002480 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002481 Opc = AArch64::LD1Twov1d;
2482 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002483 }
2484 break;
2485 case 24:
2486 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002487 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002488 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002489 Opc = AArch64::LD1Threev1d;
2490 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002491 }
2492 break;
2493 case 32:
2494 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002495 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002496 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002497 Opc = AArch64::LD1Fourv1d;
2498 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002499 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002500 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002501 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002502 Opc = AArch64::LD1Twov2d;
2503 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002504 }
2505 break;
2506 case 48:
2507 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002508 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002509 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002510 Opc = AArch64::LD1Threev2d;
2511 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002512 }
2513 break;
2514 case 64:
2515 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002516 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002517 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002518 Opc = AArch64::LD1Fourv2d;
2519 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002520 }
2521 break;
2522 }
2523 assert(Opc && "Unknown register class");
2524
James Molloyf8aa57a2015-04-16 11:37:40 +00002525 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
Tim Northover3b0846e2014-05-24 12:50:23 +00002526 .addReg(DestReg, getDefRegState(true))
2527 .addFrameIndex(FI);
2528 if (Offset)
2529 MI.addImm(0);
2530 MI.addMemOperand(MMO);
2531}
2532
2533void llvm::emitFrameOffset(MachineBasicBlock &MBB,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002534 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
Tim Northover3b0846e2014-05-24 12:50:23 +00002535 unsigned DestReg, unsigned SrcReg, int Offset,
Eric Christopherbc76b972014-06-10 17:33:39 +00002536 const TargetInstrInfo *TII,
Tim Northover3b0846e2014-05-24 12:50:23 +00002537 MachineInstr::MIFlag Flag, bool SetNZCV) {
2538 if (DestReg == SrcReg && Offset == 0)
2539 return;
2540
Geoff Berrya5335642016-05-06 16:34:59 +00002541 assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
2542 "SP increment/decrement not 16-byte aligned");
2543
Tim Northover3b0846e2014-05-24 12:50:23 +00002544 bool isSub = Offset < 0;
2545 if (isSub)
2546 Offset = -Offset;
2547
2548 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2549 // scratch register. If DestReg is a virtual register, use it as the
2550 // scratch register; otherwise, create a new virtual register (to be
2551 // replaced by the scavenger at the end of PEI). That case can be optimized
2552 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2553 // register can be loaded with offset%8 and the add/sub can use an extending
2554 // instruction with LSL#3.
2555 // Currently the function handles any offsets but generates a poor sequence
2556 // of code.
2557 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2558
2559 unsigned Opc;
2560 if (SetNZCV)
2561 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2562 else
2563 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2564 const unsigned MaxEncoding = 0xfff;
2565 const unsigned ShiftSize = 12;
2566 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2567 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2568 unsigned ThisVal;
2569 if (((unsigned)Offset) > MaxEncodableValue) {
2570 ThisVal = MaxEncodableValue;
2571 } else {
2572 ThisVal = Offset & MaxEncodableValue;
2573 }
2574 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2575 "Encoding cannot handle value that big");
2576 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2577 .addReg(SrcReg)
2578 .addImm(ThisVal >> ShiftSize)
2579 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2580 .setMIFlag(Flag);
2581
2582 SrcReg = DestReg;
2583 Offset -= ThisVal;
2584 if (Offset == 0)
2585 return;
2586 }
2587 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2588 .addReg(SrcReg)
2589 .addImm(Offset)
2590 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2591 .setMIFlag(Flag);
2592}
2593
Keno Fischere70b31f2015-06-08 20:09:58 +00002594MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002595 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
Jonas Paulsson8e5b0c62016-05-10 08:09:37 +00002596 MachineBasicBlock::iterator InsertPt, int FrameIndex,
2597 LiveIntervals *LIS) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00002598 // This is a bit of a hack. Consider this instruction:
2599 //
2600 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2601 //
2602 // We explicitly chose GPR64all for the virtual register so such a copy might
2603 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2604 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2605 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2606 //
2607 // To prevent that, we are going to constrain the %vreg0 register class here.
2608 //
2609 // <rdar://problem/11522048>
2610 //
Geoff Berryd46b6e82017-01-05 21:51:42 +00002611 if (MI.isFullCopy()) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002612 unsigned DstReg = MI.getOperand(0).getReg();
2613 unsigned SrcReg = MI.getOperand(1).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +00002614 if (SrcReg == AArch64::SP &&
2615 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2616 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2617 return nullptr;
2618 }
2619 if (DstReg == AArch64::SP &&
2620 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2621 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2622 return nullptr;
2623 }
2624 }
2625
Geoff Berryd46b6e82017-01-05 21:51:42 +00002626 // Handle the case where a copy is being spilled or filled but the source
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002627 // and destination register class don't match. For example:
Geoff Berry7c078fc2016-11-29 18:28:32 +00002628 //
2629 // %vreg0<def> = COPY %XZR; GPR64common:%vreg0
2630 //
2631 // In this case we can still safely fold away the COPY and generate the
2632 // following spill code:
2633 //
2634 // STRXui %XZR, <fi#0>
2635 //
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002636 // This also eliminates spilled cross register class COPYs (e.g. between x and
2637 // d regs) of the same size. For example:
2638 //
2639 // %vreg0<def> = COPY %vreg1; GPR64:%vreg0, FPR64:%vreg1
2640 //
Geoff Berryd46b6e82017-01-05 21:51:42 +00002641 // will be filled as
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002642 //
2643 // LDRDui %vreg0, fi<#0>
2644 //
2645 // instead of
2646 //
2647 // LDRXui %vregTemp, fi<#0>
2648 // %vreg0 = FMOV %vregTemp
2649 //
Geoff Berryd46b6e82017-01-05 21:51:42 +00002650 if (MI.isCopy() && Ops.size() == 1 &&
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002651 // Make sure we're only folding the explicit COPY defs/uses.
2652 (Ops[0] == 0 || Ops[0] == 1)) {
Geoff Berryd46b6e82017-01-05 21:51:42 +00002653 bool IsSpill = Ops[0] == 0;
2654 bool IsFill = !IsSpill;
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002655 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
2656 const MachineRegisterInfo &MRI = MF.getRegInfo();
Geoff Berry7c078fc2016-11-29 18:28:32 +00002657 MachineBasicBlock &MBB = *MI.getParent();
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002658 const MachineOperand &DstMO = MI.getOperand(0);
Geoff Berry7c078fc2016-11-29 18:28:32 +00002659 const MachineOperand &SrcMO = MI.getOperand(1);
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002660 unsigned DstReg = DstMO.getReg();
Geoff Berry7c078fc2016-11-29 18:28:32 +00002661 unsigned SrcReg = SrcMO.getReg();
Geoff Berryd46b6e82017-01-05 21:51:42 +00002662 // This is slightly expensive to compute for physical regs since
2663 // getMinimalPhysRegClass is slow.
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002664 auto getRegClass = [&](unsigned Reg) {
2665 return TargetRegisterInfo::isVirtualRegister(Reg)
2666 ? MRI.getRegClass(Reg)
2667 : TRI.getMinimalPhysRegClass(Reg);
2668 };
Geoff Berryd46b6e82017-01-05 21:51:42 +00002669
2670 if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
2671 assert(getRegClass(DstReg)->getSize() == getRegClass(SrcReg)->getSize() &&
2672 "Mismatched register size in non subreg COPY");
2673 if (IsSpill)
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002674 storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
Geoff Berryd46b6e82017-01-05 21:51:42 +00002675 getRegClass(SrcReg), &TRI);
Geoff Berry7ffce7b2016-12-01 23:43:55 +00002676 else
Geoff Berryd46b6e82017-01-05 21:51:42 +00002677 loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
2678 getRegClass(DstReg), &TRI);
Geoff Berry7c078fc2016-11-29 18:28:32 +00002679 return &*--InsertPt;
2680 }
Geoff Berryd46b6e82017-01-05 21:51:42 +00002681
2682 // Handle cases like spilling def of:
2683 //
2684 // %vreg0:sub_32<def,read-undef> = COPY %WZR; GPR64common:%vreg0
2685 //
2686 // where the physical register source can be widened and stored to the full
2687 // virtual reg destination stack slot, in this case producing:
2688 //
2689 // STRXui %XZR, <fi#0>
2690 //
2691 if (IsSpill && DstMO.isUndef() &&
2692 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
2693 assert(SrcMO.getSubReg() == 0 &&
2694 "Unexpected subreg on physical register");
2695 const TargetRegisterClass *SpillRC;
2696 unsigned SpillSubreg;
2697 switch (DstMO.getSubReg()) {
2698 default:
2699 SpillRC = nullptr;
2700 break;
2701 case AArch64::sub_32:
2702 case AArch64::ssub:
2703 if (AArch64::GPR32RegClass.contains(SrcReg)) {
2704 SpillRC = &AArch64::GPR64RegClass;
2705 SpillSubreg = AArch64::sub_32;
2706 } else if (AArch64::FPR32RegClass.contains(SrcReg)) {
2707 SpillRC = &AArch64::FPR64RegClass;
2708 SpillSubreg = AArch64::ssub;
2709 } else
2710 SpillRC = nullptr;
2711 break;
2712 case AArch64::dsub:
2713 if (AArch64::FPR64RegClass.contains(SrcReg)) {
2714 SpillRC = &AArch64::FPR128RegClass;
2715 SpillSubreg = AArch64::dsub;
2716 } else
2717 SpillRC = nullptr;
2718 break;
2719 }
2720
2721 if (SpillRC)
2722 if (unsigned WidenedSrcReg =
2723 TRI.getMatchingSuperReg(SrcReg, SpillSubreg, SpillRC)) {
2724 storeRegToStackSlot(MBB, InsertPt, WidenedSrcReg, SrcMO.isKill(),
2725 FrameIndex, SpillRC, &TRI);
2726 return &*--InsertPt;
2727 }
2728 }
2729
2730 // Handle cases like filling use of:
2731 //
2732 // %vreg0:sub_32<def,read-undef> = COPY %vreg1; GPR64:%vreg0, GPR32:%vreg1
2733 //
2734 // where we can load the full virtual reg source stack slot, into the subreg
2735 // destination, in this case producing:
2736 //
2737 // LDRWui %vreg0:sub_32<def,read-undef>, <fi#0>
2738 //
2739 if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
2740 const TargetRegisterClass *FillRC;
2741 switch (DstMO.getSubReg()) {
2742 default:
2743 FillRC = nullptr;
2744 break;
2745 case AArch64::sub_32:
2746 FillRC = &AArch64::GPR32RegClass;
2747 break;
2748 case AArch64::ssub:
2749 FillRC = &AArch64::FPR32RegClass;
2750 break;
2751 case AArch64::dsub:
2752 FillRC = &AArch64::FPR64RegClass;
2753 break;
2754 }
2755
2756 if (FillRC) {
2757 assert(getRegClass(SrcReg)->getSize() == FillRC->getSize() &&
2758 "Mismatched regclass size on folded subreg COPY");
2759 loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI);
2760 MachineInstr &LoadMI = *--InsertPt;
2761 MachineOperand &LoadDst = LoadMI.getOperand(0);
2762 assert(LoadDst.getSubReg() == 0 && "unexpected subreg on fill load");
2763 LoadDst.setSubReg(DstMO.getSubReg());
2764 LoadDst.setIsUndef();
2765 return &LoadMI;
2766 }
2767 }
Geoff Berry7c078fc2016-11-29 18:28:32 +00002768 }
2769
Tim Northover3b0846e2014-05-24 12:50:23 +00002770 // Cannot fold.
2771 return nullptr;
2772}
2773
2774int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2775 bool *OutUseUnscaledOp,
2776 unsigned *OutUnscaledOp,
2777 int *EmittableOffset) {
2778 int Scale = 1;
2779 bool IsSigned = false;
2780 // The ImmIdx should be changed case by case if it is not 2.
2781 unsigned ImmIdx = 2;
2782 unsigned UnscaledOp = 0;
2783 // Set output values in case of early exit.
2784 if (EmittableOffset)
2785 *EmittableOffset = 0;
2786 if (OutUseUnscaledOp)
2787 *OutUseUnscaledOp = false;
2788 if (OutUnscaledOp)
2789 *OutUnscaledOp = 0;
2790 switch (MI.getOpcode()) {
2791 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002792 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
Tim Northover3b0846e2014-05-24 12:50:23 +00002793 // Vector spills/fills can't take an immediate offset.
2794 case AArch64::LD1Twov2d:
2795 case AArch64::LD1Threev2d:
2796 case AArch64::LD1Fourv2d:
2797 case AArch64::LD1Twov1d:
2798 case AArch64::LD1Threev1d:
2799 case AArch64::LD1Fourv1d:
2800 case AArch64::ST1Twov2d:
2801 case AArch64::ST1Threev2d:
2802 case AArch64::ST1Fourv2d:
2803 case AArch64::ST1Twov1d:
2804 case AArch64::ST1Threev1d:
2805 case AArch64::ST1Fourv1d:
2806 return AArch64FrameOffsetCannotUpdate;
2807 case AArch64::PRFMui:
2808 Scale = 8;
2809 UnscaledOp = AArch64::PRFUMi;
2810 break;
2811 case AArch64::LDRXui:
2812 Scale = 8;
2813 UnscaledOp = AArch64::LDURXi;
2814 break;
2815 case AArch64::LDRWui:
2816 Scale = 4;
2817 UnscaledOp = AArch64::LDURWi;
2818 break;
2819 case AArch64::LDRBui:
2820 Scale = 1;
2821 UnscaledOp = AArch64::LDURBi;
2822 break;
2823 case AArch64::LDRHui:
2824 Scale = 2;
2825 UnscaledOp = AArch64::LDURHi;
2826 break;
2827 case AArch64::LDRSui:
2828 Scale = 4;
2829 UnscaledOp = AArch64::LDURSi;
2830 break;
2831 case AArch64::LDRDui:
2832 Scale = 8;
2833 UnscaledOp = AArch64::LDURDi;
2834 break;
2835 case AArch64::LDRQui:
2836 Scale = 16;
2837 UnscaledOp = AArch64::LDURQi;
2838 break;
2839 case AArch64::LDRBBui:
2840 Scale = 1;
2841 UnscaledOp = AArch64::LDURBBi;
2842 break;
2843 case AArch64::LDRHHui:
2844 Scale = 2;
2845 UnscaledOp = AArch64::LDURHHi;
2846 break;
2847 case AArch64::LDRSBXui:
2848 Scale = 1;
2849 UnscaledOp = AArch64::LDURSBXi;
2850 break;
2851 case AArch64::LDRSBWui:
2852 Scale = 1;
2853 UnscaledOp = AArch64::LDURSBWi;
2854 break;
2855 case AArch64::LDRSHXui:
2856 Scale = 2;
2857 UnscaledOp = AArch64::LDURSHXi;
2858 break;
2859 case AArch64::LDRSHWui:
2860 Scale = 2;
2861 UnscaledOp = AArch64::LDURSHWi;
2862 break;
2863 case AArch64::LDRSWui:
2864 Scale = 4;
2865 UnscaledOp = AArch64::LDURSWi;
2866 break;
2867
2868 case AArch64::STRXui:
2869 Scale = 8;
2870 UnscaledOp = AArch64::STURXi;
2871 break;
2872 case AArch64::STRWui:
2873 Scale = 4;
2874 UnscaledOp = AArch64::STURWi;
2875 break;
2876 case AArch64::STRBui:
2877 Scale = 1;
2878 UnscaledOp = AArch64::STURBi;
2879 break;
2880 case AArch64::STRHui:
2881 Scale = 2;
2882 UnscaledOp = AArch64::STURHi;
2883 break;
2884 case AArch64::STRSui:
2885 Scale = 4;
2886 UnscaledOp = AArch64::STURSi;
2887 break;
2888 case AArch64::STRDui:
2889 Scale = 8;
2890 UnscaledOp = AArch64::STURDi;
2891 break;
2892 case AArch64::STRQui:
2893 Scale = 16;
2894 UnscaledOp = AArch64::STURQi;
2895 break;
2896 case AArch64::STRBBui:
2897 Scale = 1;
2898 UnscaledOp = AArch64::STURBBi;
2899 break;
2900 case AArch64::STRHHui:
2901 Scale = 2;
2902 UnscaledOp = AArch64::STURHHi;
2903 break;
2904
2905 case AArch64::LDPXi:
2906 case AArch64::LDPDi:
2907 case AArch64::STPXi:
2908 case AArch64::STPDi:
Ahmed Bougacha05541452015-09-10 01:54:43 +00002909 case AArch64::LDNPXi:
2910 case AArch64::LDNPDi:
2911 case AArch64::STNPXi:
2912 case AArch64::STNPDi:
2913 ImmIdx = 3;
Tim Northover3b0846e2014-05-24 12:50:23 +00002914 IsSigned = true;
2915 Scale = 8;
2916 break;
2917 case AArch64::LDPQi:
2918 case AArch64::STPQi:
Ahmed Bougacha05541452015-09-10 01:54:43 +00002919 case AArch64::LDNPQi:
2920 case AArch64::STNPQi:
2921 ImmIdx = 3;
Tim Northover3b0846e2014-05-24 12:50:23 +00002922 IsSigned = true;
2923 Scale = 16;
2924 break;
2925 case AArch64::LDPWi:
2926 case AArch64::LDPSi:
2927 case AArch64::STPWi:
2928 case AArch64::STPSi:
Ahmed Bougacha05541452015-09-10 01:54:43 +00002929 case AArch64::LDNPWi:
2930 case AArch64::LDNPSi:
2931 case AArch64::STNPWi:
2932 case AArch64::STNPSi:
2933 ImmIdx = 3;
Tim Northover3b0846e2014-05-24 12:50:23 +00002934 IsSigned = true;
2935 Scale = 4;
2936 break;
2937
2938 case AArch64::LDURXi:
2939 case AArch64::LDURWi:
2940 case AArch64::LDURBi:
2941 case AArch64::LDURHi:
2942 case AArch64::LDURSi:
2943 case AArch64::LDURDi:
2944 case AArch64::LDURQi:
2945 case AArch64::LDURHHi:
2946 case AArch64::LDURBBi:
2947 case AArch64::LDURSBXi:
2948 case AArch64::LDURSBWi:
2949 case AArch64::LDURSHXi:
2950 case AArch64::LDURSHWi:
2951 case AArch64::LDURSWi:
2952 case AArch64::STURXi:
2953 case AArch64::STURWi:
2954 case AArch64::STURBi:
2955 case AArch64::STURHi:
2956 case AArch64::STURSi:
2957 case AArch64::STURDi:
2958 case AArch64::STURQi:
2959 case AArch64::STURBBi:
2960 case AArch64::STURHHi:
2961 Scale = 1;
2962 break;
2963 }
2964
2965 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2966
2967 bool useUnscaledOp = false;
2968 // If the offset doesn't match the scale, we rewrite the instruction to
2969 // use the unscaled instruction instead. Likewise, if we have a negative
2970 // offset (and have an unscaled op to use).
2971 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2972 useUnscaledOp = true;
2973
2974 // Use an unscaled addressing mode if the instruction has a negative offset
2975 // (or if the instruction is already using an unscaled addressing mode).
2976 unsigned MaskBits;
2977 if (IsSigned) {
2978 // ldp/stp instructions.
2979 MaskBits = 7;
2980 Offset /= Scale;
2981 } else if (UnscaledOp == 0 || useUnscaledOp) {
2982 MaskBits = 9;
2983 IsSigned = true;
2984 Scale = 1;
2985 } else {
2986 MaskBits = 12;
2987 IsSigned = false;
2988 Offset /= Scale;
2989 }
2990
2991 // Attempt to fold address computation.
2992 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2993 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2994 if (Offset >= MinOff && Offset <= MaxOff) {
2995 if (EmittableOffset)
2996 *EmittableOffset = Offset;
2997 Offset = 0;
2998 } else {
2999 int NewOff = Offset < 0 ? MinOff : MaxOff;
3000 if (EmittableOffset)
3001 *EmittableOffset = NewOff;
3002 Offset = (Offset - NewOff) * Scale;
3003 }
3004 if (OutUseUnscaledOp)
3005 *OutUseUnscaledOp = useUnscaledOp;
3006 if (OutUnscaledOp)
3007 *OutUnscaledOp = UnscaledOp;
3008 return AArch64FrameOffsetCanUpdate |
3009 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
3010}
3011
3012bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
3013 unsigned FrameReg, int &Offset,
3014 const AArch64InstrInfo *TII) {
3015 unsigned Opcode = MI.getOpcode();
3016 unsigned ImmIdx = FrameRegIdx + 1;
3017
3018 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
3019 Offset += MI.getOperand(ImmIdx).getImm();
3020 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
3021 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
3022 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
3023 MI.eraseFromParent();
3024 Offset = 0;
3025 return true;
3026 }
3027
3028 int NewOffset;
3029 unsigned UnscaledOp;
3030 bool UseUnscaledOp;
3031 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
3032 &UnscaledOp, &NewOffset);
3033 if (Status & AArch64FrameOffsetCanUpdate) {
3034 if (Status & AArch64FrameOffsetIsLegal)
3035 // Replace the FrameIndex with FrameReg.
3036 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
3037 if (UseUnscaledOp)
3038 MI.setDesc(TII->get(UnscaledOp));
3039
3040 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
3041 return Offset == 0;
3042 }
3043
3044 return false;
3045}
3046
3047void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
3048 NopInst.setOpcode(AArch64::HINT);
Jim Grosbache9119e42015-05-13 18:37:00 +00003049 NopInst.addOperand(MCOperand::createImm(0));
Tim Northover3b0846e2014-05-24 12:50:23 +00003050}
Chad Rosier9d1a5562016-05-02 14:56:21 +00003051
3052// AArch64 supports MachineCombiner.
Benjamin Kramer8c90fd72014-09-03 11:41:21 +00003053bool AArch64InstrInfo::useMachineCombiner() const {
Chad Rosier9d1a5562016-05-02 14:56:21 +00003054
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003055 return true;
3056}
Eugene Zelenko049b0172017-01-06 00:30:53 +00003057
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003058// True when Opc sets flag
3059static bool isCombineInstrSettingFlag(unsigned Opc) {
3060 switch (Opc) {
3061 case AArch64::ADDSWrr:
3062 case AArch64::ADDSWri:
3063 case AArch64::ADDSXrr:
3064 case AArch64::ADDSXri:
3065 case AArch64::SUBSWrr:
3066 case AArch64::SUBSXrr:
3067 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3068 case AArch64::SUBSWri:
3069 case AArch64::SUBSXri:
3070 return true;
3071 default:
3072 break;
3073 }
3074 return false;
3075}
Eugene Zelenko049b0172017-01-06 00:30:53 +00003076
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003077// 32b Opcodes that can be combined with a MUL
3078static bool isCombineInstrCandidate32(unsigned Opc) {
3079 switch (Opc) {
3080 case AArch64::ADDWrr:
3081 case AArch64::ADDWri:
3082 case AArch64::SUBWrr:
3083 case AArch64::ADDSWrr:
3084 case AArch64::ADDSWri:
3085 case AArch64::SUBSWrr:
3086 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3087 case AArch64::SUBWri:
3088 case AArch64::SUBSWri:
3089 return true;
3090 default:
3091 break;
3092 }
3093 return false;
3094}
Eugene Zelenko049b0172017-01-06 00:30:53 +00003095
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003096// 64b Opcodes that can be combined with a MUL
3097static bool isCombineInstrCandidate64(unsigned Opc) {
3098 switch (Opc) {
3099 case AArch64::ADDXrr:
3100 case AArch64::ADDXri:
3101 case AArch64::SUBXrr:
3102 case AArch64::ADDSXrr:
3103 case AArch64::ADDSXri:
3104 case AArch64::SUBSXrr:
3105 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3106 case AArch64::SUBXri:
3107 case AArch64::SUBSXri:
3108 return true;
3109 default:
3110 break;
3111 }
3112 return false;
3113}
Eugene Zelenko049b0172017-01-06 00:30:53 +00003114
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003115// FP Opcodes that can be combined with a FMUL
3116static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
3117 switch (Inst.getOpcode()) {
Evandro Menezes19b2aed2016-09-15 19:55:23 +00003118 default:
3119 break;
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003120 case AArch64::FADDSrr:
3121 case AArch64::FADDDrr:
3122 case AArch64::FADDv2f32:
3123 case AArch64::FADDv2f64:
3124 case AArch64::FADDv4f32:
3125 case AArch64::FSUBSrr:
3126 case AArch64::FSUBDrr:
3127 case AArch64::FSUBv2f32:
3128 case AArch64::FSUBv2f64:
3129 case AArch64::FSUBv4f32:
Logan Chience542ee2017-01-05 23:41:33 +00003130 TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
3131 return (Options.UnsafeFPMath ||
3132 Options.AllowFPOpFusion == FPOpFusion::Fast);
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003133 }
3134 return false;
3135}
Eugene Zelenko049b0172017-01-06 00:30:53 +00003136
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003137// Opcodes that can be combined with a MUL
3138static bool isCombineInstrCandidate(unsigned Opc) {
3139 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
3140}
3141
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003142//
3143// Utility routine that checks if \param MO is defined by an
3144// \param CombineOpc instruction in the basic block \param MBB
3145static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
3146 unsigned CombineOpc, unsigned ZeroReg = 0,
3147 bool CheckZeroReg = false) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003148 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3149 MachineInstr *MI = nullptr;
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003150
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003151 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
3152 MI = MRI.getUniqueVRegDef(MO.getReg());
3153 // And it needs to be in the trace (otherwise, it won't have a depth).
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003154 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003155 return false;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003156 // Must only used by the user we combine with.
Gerolf Hoflehnerfe2c11f2014-08-13 22:07:36 +00003157 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003158 return false;
3159
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003160 if (CheckZeroReg) {
3161 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
3162 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
3163 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
3164 // The third input reg must be zero.
3165 if (MI->getOperand(3).getReg() != ZeroReg)
3166 return false;
3167 }
3168
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003169 return true;
3170}
3171
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003172//
3173// Is \param MO defined by an integer multiply and can be combined?
3174static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3175 unsigned MulOpc, unsigned ZeroReg) {
3176 return canCombine(MBB, MO, MulOpc, ZeroReg, true);
3177}
3178
3179//
3180// Is \param MO defined by a floating-point multiply and can be combined?
3181static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3182 unsigned MulOpc) {
3183 return canCombine(MBB, MO, MulOpc);
3184}
3185
Haicheng Wu08b94622016-01-07 04:01:02 +00003186// TODO: There are many more machine instruction opcodes to match:
3187// 1. Other data types (integer, vectors)
3188// 2. Other math / logic operations (xor, or)
3189// 3. Other forms of the same operation (intrinsics and other variants)
3190bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
3191 switch (Inst.getOpcode()) {
3192 case AArch64::FADDDrr:
3193 case AArch64::FADDSrr:
3194 case AArch64::FADDv2f32:
3195 case AArch64::FADDv2f64:
3196 case AArch64::FADDv4f32:
3197 case AArch64::FMULDrr:
3198 case AArch64::FMULSrr:
3199 case AArch64::FMULX32:
3200 case AArch64::FMULX64:
3201 case AArch64::FMULXv2f32:
3202 case AArch64::FMULXv2f64:
3203 case AArch64::FMULXv4f32:
3204 case AArch64::FMULv2f32:
3205 case AArch64::FMULv2f64:
3206 case AArch64::FMULv4f32:
3207 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
3208 default:
3209 return false;
3210 }
3211}
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003212
Haicheng Wu08b94622016-01-07 04:01:02 +00003213/// Find instructions that can be turned into madd.
3214static bool getMaddPatterns(MachineInstr &Root,
3215 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003216 unsigned Opc = Root.getOpcode();
3217 MachineBasicBlock &MBB = *Root.getParent();
3218 bool Found = false;
3219
3220 if (!isCombineInstrCandidate(Opc))
Chad Rosier85c85942016-03-23 20:07:28 +00003221 return false;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003222 if (isCombineInstrSettingFlag(Opc)) {
3223 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
3224 // When NZCV is live bail out.
3225 if (Cmp_NZCV == -1)
Chad Rosier85c85942016-03-23 20:07:28 +00003226 return false;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003227 unsigned NewOpc = convertFlagSettingOpcode(Root);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003228 // When opcode can't change bail out.
3229 // CHECKME: do we miss any cases for opcode conversion?
3230 if (NewOpc == Opc)
Chad Rosier85c85942016-03-23 20:07:28 +00003231 return false;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003232 Opc = NewOpc;
3233 }
3234
3235 switch (Opc) {
3236 default:
3237 break;
3238 case AArch64::ADDWrr:
3239 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3240 "ADDWrr does not have register operands");
3241 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3242 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003243 Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003244 Found = true;
3245 }
3246 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3247 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003248 Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003249 Found = true;
3250 }
3251 break;
3252 case AArch64::ADDXrr:
3253 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3254 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003255 Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003256 Found = true;
3257 }
3258 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3259 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003260 Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003261 Found = true;
3262 }
3263 break;
3264 case AArch64::SUBWrr:
3265 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3266 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003267 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003268 Found = true;
3269 }
3270 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3271 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003272 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003273 Found = true;
3274 }
3275 break;
3276 case AArch64::SUBXrr:
3277 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3278 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003279 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003280 Found = true;
3281 }
3282 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3283 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003284 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003285 Found = true;
3286 }
3287 break;
3288 case AArch64::ADDWri:
3289 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3290 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003291 Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003292 Found = true;
3293 }
3294 break;
3295 case AArch64::ADDXri:
3296 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3297 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003298 Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003299 Found = true;
3300 }
3301 break;
3302 case AArch64::SUBWri:
3303 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3304 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003305 Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003306 Found = true;
3307 }
3308 break;
3309 case AArch64::SUBXri:
3310 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3311 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00003312 Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003313 Found = true;
3314 }
3315 break;
3316 }
3317 return Found;
3318}
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003319/// Floating-Point Support
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003320
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003321/// Find instructions that can be turned into madd.
3322static bool getFMAPatterns(MachineInstr &Root,
3323 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3324
3325 if (!isCombineInstrCandidateFP(Root))
Eugene Zelenko049b0172017-01-06 00:30:53 +00003326 return false;
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003327
3328 MachineBasicBlock &MBB = *Root.getParent();
3329 bool Found = false;
3330
3331 switch (Root.getOpcode()) {
3332 default:
3333 assert(false && "Unsupported FP instruction in combiner\n");
3334 break;
3335 case AArch64::FADDSrr:
3336 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3337 "FADDWrr does not have register operands");
3338 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3339 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP1);
3340 Found = true;
3341 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3342 AArch64::FMULv1i32_indexed)) {
3343 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP1);
3344 Found = true;
3345 }
3346 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3347 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP2);
3348 Found = true;
3349 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3350 AArch64::FMULv1i32_indexed)) {
3351 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP2);
3352 Found = true;
3353 }
3354 break;
3355 case AArch64::FADDDrr:
3356 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3357 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP1);
3358 Found = true;
3359 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3360 AArch64::FMULv1i64_indexed)) {
3361 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP1);
3362 Found = true;
3363 }
3364 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3365 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP2);
3366 Found = true;
3367 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3368 AArch64::FMULv1i64_indexed)) {
3369 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP2);
3370 Found = true;
3371 }
3372 break;
3373 case AArch64::FADDv2f32:
3374 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3375 AArch64::FMULv2i32_indexed)) {
3376 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP1);
3377 Found = true;
3378 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3379 AArch64::FMULv2f32)) {
3380 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP1);
3381 Found = true;
3382 }
3383 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3384 AArch64::FMULv2i32_indexed)) {
3385 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP2);
3386 Found = true;
3387 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3388 AArch64::FMULv2f32)) {
3389 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP2);
3390 Found = true;
3391 }
3392 break;
3393 case AArch64::FADDv2f64:
3394 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3395 AArch64::FMULv2i64_indexed)) {
3396 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP1);
3397 Found = true;
3398 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3399 AArch64::FMULv2f64)) {
3400 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP1);
3401 Found = true;
3402 }
3403 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3404 AArch64::FMULv2i64_indexed)) {
3405 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP2);
3406 Found = true;
3407 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3408 AArch64::FMULv2f64)) {
3409 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP2);
3410 Found = true;
3411 }
3412 break;
3413 case AArch64::FADDv4f32:
3414 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3415 AArch64::FMULv4i32_indexed)) {
3416 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP1);
3417 Found = true;
3418 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3419 AArch64::FMULv4f32)) {
3420 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP1);
3421 Found = true;
3422 }
3423 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3424 AArch64::FMULv4i32_indexed)) {
3425 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP2);
3426 Found = true;
3427 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3428 AArch64::FMULv4f32)) {
3429 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP2);
3430 Found = true;
3431 }
3432 break;
3433
3434 case AArch64::FSUBSrr:
3435 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3436 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP1);
3437 Found = true;
3438 }
3439 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3440 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP2);
3441 Found = true;
3442 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3443 AArch64::FMULv1i32_indexed)) {
3444 Patterns.push_back(MachineCombinerPattern::FMLSv1i32_indexed_OP2);
3445 Found = true;
3446 }
3447 break;
3448 case AArch64::FSUBDrr:
3449 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3450 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP1);
3451 Found = true;
3452 }
3453 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3454 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP2);
3455 Found = true;
3456 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3457 AArch64::FMULv1i64_indexed)) {
3458 Patterns.push_back(MachineCombinerPattern::FMLSv1i64_indexed_OP2);
3459 Found = true;
3460 }
3461 break;
3462 case AArch64::FSUBv2f32:
3463 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3464 AArch64::FMULv2i32_indexed)) {
3465 Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP2);
3466 Found = true;
3467 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3468 AArch64::FMULv2f32)) {
3469 Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP2);
3470 Found = true;
3471 }
3472 break;
3473 case AArch64::FSUBv2f64:
3474 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3475 AArch64::FMULv2i64_indexed)) {
3476 Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP2);
3477 Found = true;
3478 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3479 AArch64::FMULv2f64)) {
3480 Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP2);
3481 Found = true;
3482 }
3483 break;
3484 case AArch64::FSUBv4f32:
3485 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3486 AArch64::FMULv4i32_indexed)) {
3487 Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP2);
3488 Found = true;
3489 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3490 AArch64::FMULv4f32)) {
3491 Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP2);
3492 Found = true;
3493 }
3494 break;
3495 }
3496 return Found;
3497}
3498
3499/// Return true when a code sequence can improve throughput. It
3500/// should be called only for instructions in loops.
3501/// \param Pattern - combiner pattern
3502bool
3503AArch64InstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
3504 switch (Pattern) {
3505 default:
3506 break;
3507 case MachineCombinerPattern::FMULADDS_OP1:
3508 case MachineCombinerPattern::FMULADDS_OP2:
3509 case MachineCombinerPattern::FMULSUBS_OP1:
3510 case MachineCombinerPattern::FMULSUBS_OP2:
3511 case MachineCombinerPattern::FMULADDD_OP1:
3512 case MachineCombinerPattern::FMULADDD_OP2:
3513 case MachineCombinerPattern::FMULSUBD_OP1:
3514 case MachineCombinerPattern::FMULSUBD_OP2:
3515 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3516 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
3517 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
3518 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
3519 case MachineCombinerPattern::FMLAv2f32_OP2:
3520 case MachineCombinerPattern::FMLAv2f32_OP1:
3521 case MachineCombinerPattern::FMLAv2f64_OP1:
3522 case MachineCombinerPattern::FMLAv2f64_OP2:
3523 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
3524 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
3525 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
3526 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
3527 case MachineCombinerPattern::FMLAv4f32_OP1:
3528 case MachineCombinerPattern::FMLAv4f32_OP2:
3529 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
3530 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
3531 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
3532 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
3533 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
3534 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
3535 case MachineCombinerPattern::FMLSv2f32_OP2:
3536 case MachineCombinerPattern::FMLSv2f64_OP2:
3537 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
3538 case MachineCombinerPattern::FMLSv4f32_OP2:
3539 return true;
3540 } // end switch (Pattern)
3541 return false;
3542}
Haicheng Wu08b94622016-01-07 04:01:02 +00003543/// Return true when there is potentially a faster code sequence for an
3544/// instruction chain ending in \p Root. All potential patterns are listed in
3545/// the \p Pattern vector. Pattern should be sorted in priority order since the
3546/// pattern evaluator stops checking as soon as it finds a faster sequence.
3547
3548bool AArch64InstrInfo::getMachineCombinerPatterns(
3549 MachineInstr &Root,
3550 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003551 // Integer patterns
Haicheng Wu08b94622016-01-07 04:01:02 +00003552 if (getMaddPatterns(Root, Patterns))
3553 return true;
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003554 // Floating point patterns
3555 if (getFMAPatterns(Root, Patterns))
3556 return true;
Haicheng Wu08b94622016-01-07 04:01:02 +00003557
3558 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
3559}
3560
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003561enum class FMAInstKind { Default, Indexed, Accumulator };
3562/// genFusedMultiply - Generate fused multiply instructions.
3563/// This function supports both integer and floating point instructions.
3564/// A typical example:
3565/// F|MUL I=A,B,0
3566/// F|ADD R,I,C
3567/// ==> F|MADD R,A,B,C
3568/// \param Root is the F|ADD instruction
NAKAMURA Takumi40da2672014-08-08 02:04:18 +00003569/// \param [out] InsInstrs is a vector of machine instructions and will
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003570/// contain the generated madd instruction
3571/// \param IdxMulOpd is index of operand in Root that is the result of
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003572/// the F|MUL. In the example above IdxMulOpd is 1.
3573/// \param MaddOpc the opcode fo the f|madd instruction
3574static MachineInstr *
3575genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
3576 const TargetInstrInfo *TII, MachineInstr &Root,
3577 SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
3578 unsigned MaddOpc, const TargetRegisterClass *RC,
3579 FMAInstKind kind = FMAInstKind::Default) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003580 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3581
3582 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
3583 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003584 unsigned ResultReg = Root.getOperand(0).getReg();
3585 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3586 bool Src0IsKill = MUL->getOperand(1).isKill();
3587 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3588 bool Src1IsKill = MUL->getOperand(2).isKill();
3589 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
3590 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
3591
3592 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3593 MRI.constrainRegClass(ResultReg, RC);
3594 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3595 MRI.constrainRegClass(SrcReg0, RC);
3596 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3597 MRI.constrainRegClass(SrcReg1, RC);
3598 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
3599 MRI.constrainRegClass(SrcReg2, RC);
3600
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003601 MachineInstrBuilder MIB;
3602 if (kind == FMAInstKind::Default)
3603 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3604 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3605 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3606 .addReg(SrcReg2, getKillRegState(Src2IsKill));
3607 else if (kind == FMAInstKind::Indexed)
3608 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3609 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3610 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3611 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3612 .addImm(MUL->getOperand(3).getImm());
3613 else if (kind == FMAInstKind::Accumulator)
3614 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3615 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3616 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3617 .addReg(SrcReg1, getKillRegState(Src1IsKill));
3618 else
3619 assert(false && "Invalid FMA instruction kind \n");
3620 // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003621 InsInstrs.push_back(MIB);
3622 return MUL;
3623}
3624
3625/// genMaddR - Generate madd instruction and combine mul and add using
3626/// an extra virtual register
3627/// Example - an ADD intermediate needs to be stored in a register:
3628/// MUL I=A,B,0
3629/// ADD R,I,Imm
3630/// ==> ORR V, ZR, Imm
3631/// ==> MADD R,A,B,V
3632/// \param Root is the ADD instruction
NAKAMURA Takumi40da2672014-08-08 02:04:18 +00003633/// \param [out] InsInstrs is a vector of machine instructions and will
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003634/// contain the generated madd instruction
3635/// \param IdxMulOpd is index of operand in Root that is the result of
3636/// the MUL. In the example above IdxMulOpd is 1.
3637/// \param MaddOpc the opcode fo the madd instruction
3638/// \param VR is a virtual register that holds the value of an ADD operand
3639/// (V in the example above).
3640static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
3641 const TargetInstrInfo *TII, MachineInstr &Root,
3642 SmallVectorImpl<MachineInstr *> &InsInstrs,
3643 unsigned IdxMulOpd, unsigned MaddOpc,
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003644 unsigned VR, const TargetRegisterClass *RC) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003645 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3646
3647 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003648 unsigned ResultReg = Root.getOperand(0).getReg();
3649 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3650 bool Src0IsKill = MUL->getOperand(1).isKill();
3651 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3652 bool Src1IsKill = MUL->getOperand(2).isKill();
3653
3654 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3655 MRI.constrainRegClass(ResultReg, RC);
3656 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3657 MRI.constrainRegClass(SrcReg0, RC);
3658 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3659 MRI.constrainRegClass(SrcReg1, RC);
3660 if (TargetRegisterInfo::isVirtualRegister(VR))
3661 MRI.constrainRegClass(VR, RC);
3662
3663 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
3664 ResultReg)
3665 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3666 .addReg(SrcReg1, getKillRegState(Src1IsKill))
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003667 .addReg(VR);
3668 // Insert the MADD
3669 InsInstrs.push_back(MIB);
3670 return MUL;
3671}
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003672
Sanjay Patelcfe03932015-06-19 23:21:42 +00003673/// When getMachineCombinerPatterns() finds potential patterns,
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003674/// this function generates the instructions that could replace the
3675/// original code sequence
3676void AArch64InstrInfo::genAlternativeCodeSequence(
Sanjay Patel387e66e2015-11-05 19:34:57 +00003677 MachineInstr &Root, MachineCombinerPattern Pattern,
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003678 SmallVectorImpl<MachineInstr *> &InsInstrs,
3679 SmallVectorImpl<MachineInstr *> &DelInstrs,
3680 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
3681 MachineBasicBlock &MBB = *Root.getParent();
3682 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3683 MachineFunction &MF = *MBB.getParent();
Eric Christophere0818912014-09-03 20:36:26 +00003684 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003685
3686 MachineInstr *MUL;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003687 const TargetRegisterClass *RC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003688 unsigned Opc;
3689 switch (Pattern) {
3690 default:
Haicheng Wu08b94622016-01-07 04:01:02 +00003691 // Reassociate instructions.
3692 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
3693 DelInstrs, InstrIdxForVirtReg);
3694 return;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003695 case MachineCombinerPattern::MULADDW_OP1:
3696 case MachineCombinerPattern::MULADDX_OP1:
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003697 // MUL I=A,B,0
3698 // ADD R,I,C
3699 // ==> MADD R,A,B,C
3700 // --- Create(MADD);
Sanjay Patel387e66e2015-11-05 19:34:57 +00003701 if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003702 Opc = AArch64::MADDWrrr;
3703 RC = &AArch64::GPR32RegClass;
3704 } else {
3705 Opc = AArch64::MADDXrrr;
3706 RC = &AArch64::GPR64RegClass;
3707 }
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003708 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003709 break;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003710 case MachineCombinerPattern::MULADDW_OP2:
3711 case MachineCombinerPattern::MULADDX_OP2:
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003712 // MUL I=A,B,0
3713 // ADD R,C,I
3714 // ==> MADD R,A,B,C
3715 // --- Create(MADD);
Sanjay Patel387e66e2015-11-05 19:34:57 +00003716 if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003717 Opc = AArch64::MADDWrrr;
3718 RC = &AArch64::GPR32RegClass;
3719 } else {
3720 Opc = AArch64::MADDXrrr;
3721 RC = &AArch64::GPR64RegClass;
3722 }
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003723 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003724 break;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003725 case MachineCombinerPattern::MULADDWI_OP1:
3726 case MachineCombinerPattern::MULADDXI_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003727 // MUL I=A,B,0
3728 // ADD R,I,Imm
3729 // ==> ORR V, ZR, Imm
3730 // ==> MADD R,A,B,V
3731 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003732 const TargetRegisterClass *OrrRC;
3733 unsigned BitSize, OrrOpc, ZeroReg;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003734 if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003735 OrrOpc = AArch64::ORRWri;
3736 OrrRC = &AArch64::GPR32spRegClass;
3737 BitSize = 32;
3738 ZeroReg = AArch64::WZR;
3739 Opc = AArch64::MADDWrrr;
3740 RC = &AArch64::GPR32RegClass;
3741 } else {
3742 OrrOpc = AArch64::ORRXri;
3743 OrrRC = &AArch64::GPR64spRegClass;
3744 BitSize = 64;
3745 ZeroReg = AArch64::XZR;
3746 Opc = AArch64::MADDXrrr;
3747 RC = &AArch64::GPR64RegClass;
3748 }
3749 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
3750 uint64_t Imm = Root.getOperand(2).getImm();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003751
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003752 if (Root.getOperand(3).isImm()) {
3753 unsigned Val = Root.getOperand(3).getImm();
3754 Imm = Imm << Val;
3755 }
David Majnemer1182dd82016-07-21 23:46:56 +00003756 uint64_t UImm = SignExtend64(Imm, BitSize);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003757 uint64_t Encoding;
3758 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3759 MachineInstrBuilder MIB1 =
3760 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
3761 .addReg(ZeroReg)
3762 .addImm(Encoding);
3763 InsInstrs.push_back(MIB1);
3764 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3765 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003766 }
3767 break;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003768 }
Sanjay Patel387e66e2015-11-05 19:34:57 +00003769 case MachineCombinerPattern::MULSUBW_OP1:
3770 case MachineCombinerPattern::MULSUBX_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003771 // MUL I=A,B,0
3772 // SUB R,I, C
3773 // ==> SUB V, 0, C
3774 // ==> MADD R,A,B,V // = -C + A*B
3775 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003776 const TargetRegisterClass *SubRC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003777 unsigned SubOpc, ZeroReg;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003778 if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003779 SubOpc = AArch64::SUBWrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003780 SubRC = &AArch64::GPR32spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003781 ZeroReg = AArch64::WZR;
3782 Opc = AArch64::MADDWrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003783 RC = &AArch64::GPR32RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003784 } else {
3785 SubOpc = AArch64::SUBXrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003786 SubRC = &AArch64::GPR64spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003787 ZeroReg = AArch64::XZR;
3788 Opc = AArch64::MADDXrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003789 RC = &AArch64::GPR64RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003790 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003791 unsigned NewVR = MRI.createVirtualRegister(SubRC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003792 // SUB NewVR, 0, C
3793 MachineInstrBuilder MIB1 =
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003794 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003795 .addReg(ZeroReg)
Diana Picus116bbab2017-01-13 09:58:52 +00003796 .add(Root.getOperand(2));
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003797 InsInstrs.push_back(MIB1);
3798 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003799 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3800 break;
3801 }
Sanjay Patel387e66e2015-11-05 19:34:57 +00003802 case MachineCombinerPattern::MULSUBW_OP2:
3803 case MachineCombinerPattern::MULSUBX_OP2:
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003804 // MUL I=A,B,0
3805 // SUB R,C,I
3806 // ==> MSUB R,A,B,C (computes C - A*B)
3807 // --- Create(MSUB);
Sanjay Patel387e66e2015-11-05 19:34:57 +00003808 if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003809 Opc = AArch64::MSUBWrrr;
3810 RC = &AArch64::GPR32RegClass;
3811 } else {
3812 Opc = AArch64::MSUBXrrr;
3813 RC = &AArch64::GPR64RegClass;
3814 }
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003815 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003816 break;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003817 case MachineCombinerPattern::MULSUBWI_OP1:
3818 case MachineCombinerPattern::MULSUBXI_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003819 // MUL I=A,B,0
3820 // SUB R,I, Imm
3821 // ==> ORR V, ZR, -Imm
3822 // ==> MADD R,A,B,V // = -Imm + A*B
3823 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003824 const TargetRegisterClass *OrrRC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003825 unsigned BitSize, OrrOpc, ZeroReg;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003826 if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
Juergen Ributzka25816b02014-08-30 06:16:26 +00003827 OrrOpc = AArch64::ORRWri;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003828 OrrRC = &AArch64::GPR32spRegClass;
3829 BitSize = 32;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003830 ZeroReg = AArch64::WZR;
3831 Opc = AArch64::MADDWrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003832 RC = &AArch64::GPR32RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003833 } else {
3834 OrrOpc = AArch64::ORRXri;
Juergen Ributzkaf9660f02014-11-04 22:20:07 +00003835 OrrRC = &AArch64::GPR64spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003836 BitSize = 64;
3837 ZeroReg = AArch64::XZR;
3838 Opc = AArch64::MADDXrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003839 RC = &AArch64::GPR64RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003840 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003841 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
David Majnemer1182dd82016-07-21 23:46:56 +00003842 uint64_t Imm = Root.getOperand(2).getImm();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003843 if (Root.getOperand(3).isImm()) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003844 unsigned Val = Root.getOperand(3).getImm();
3845 Imm = Imm << Val;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003846 }
David Majnemer1182dd82016-07-21 23:46:56 +00003847 uint64_t UImm = SignExtend64(-Imm, BitSize);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003848 uint64_t Encoding;
3849 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3850 MachineInstrBuilder MIB1 =
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003851 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003852 .addReg(ZeroReg)
3853 .addImm(Encoding);
3854 InsInstrs.push_back(MIB1);
3855 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003856 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003857 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003858 break;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003859 }
Gerolf Hoflehner01b3a6182016-04-24 05:14:01 +00003860 // Floating Point Support
3861 case MachineCombinerPattern::FMULADDS_OP1:
3862 case MachineCombinerPattern::FMULADDD_OP1:
3863 // MUL I=A,B,0
3864 // ADD R,I,C
3865 // ==> MADD R,A,B,C
3866 // --- Create(MADD);
3867 if (Pattern == MachineCombinerPattern::FMULADDS_OP1) {
3868 Opc = AArch64::FMADDSrrr;
3869 RC = &AArch64::FPR32RegClass;
3870 } else {
3871 Opc = AArch64::FMADDDrrr;
3872 RC = &AArch64::FPR64RegClass;
3873 }
3874 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3875 break;
3876 case MachineCombinerPattern::FMULADDS_OP2:
3877 case MachineCombinerPattern::FMULADDD_OP2:
3878 // FMUL I=A,B,0
3879 // FADD R,C,I
3880 // ==> FMADD R,A,B,C
3881 // --- Create(FMADD);
3882 if (Pattern == MachineCombinerPattern::FMULADDS_OP2) {
3883 Opc = AArch64::FMADDSrrr;
3884 RC = &AArch64::FPR32RegClass;
3885 } else {
3886 Opc = AArch64::FMADDDrrr;
3887 RC = &AArch64::FPR64RegClass;
3888 }
3889 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3890 break;
3891
3892 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3893 Opc = AArch64::FMLAv1i32_indexed;
3894 RC = &AArch64::FPR32RegClass;
3895 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3896 FMAInstKind::Indexed);
3897 break;
3898 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
3899 Opc = AArch64::FMLAv1i32_indexed;
3900 RC = &AArch64::FPR32RegClass;
3901 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3902 FMAInstKind::Indexed);
3903 break;
3904
3905 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
3906 Opc = AArch64::FMLAv1i64_indexed;
3907 RC = &AArch64::FPR64RegClass;
3908 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3909 FMAInstKind::Indexed);
3910 break;
3911 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
3912 Opc = AArch64::FMLAv1i64_indexed;
3913 RC = &AArch64::FPR64RegClass;
3914 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3915 FMAInstKind::Indexed);
3916 break;
3917
3918 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
3919 case MachineCombinerPattern::FMLAv2f32_OP1:
3920 RC = &AArch64::FPR64RegClass;
3921 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
3922 Opc = AArch64::FMLAv2i32_indexed;
3923 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3924 FMAInstKind::Indexed);
3925 } else {
3926 Opc = AArch64::FMLAv2f32;
3927 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3928 FMAInstKind::Accumulator);
3929 }
3930 break;
3931 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
3932 case MachineCombinerPattern::FMLAv2f32_OP2:
3933 RC = &AArch64::FPR64RegClass;
3934 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
3935 Opc = AArch64::FMLAv2i32_indexed;
3936 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3937 FMAInstKind::Indexed);
3938 } else {
3939 Opc = AArch64::FMLAv2f32;
3940 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3941 FMAInstKind::Accumulator);
3942 }
3943 break;
3944
3945 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
3946 case MachineCombinerPattern::FMLAv2f64_OP1:
3947 RC = &AArch64::FPR128RegClass;
3948 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
3949 Opc = AArch64::FMLAv2i64_indexed;
3950 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3951 FMAInstKind::Indexed);
3952 } else {
3953 Opc = AArch64::FMLAv2f64;
3954 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3955 FMAInstKind::Accumulator);
3956 }
3957 break;
3958 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
3959 case MachineCombinerPattern::FMLAv2f64_OP2:
3960 RC = &AArch64::FPR128RegClass;
3961 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
3962 Opc = AArch64::FMLAv2i64_indexed;
3963 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3964 FMAInstKind::Indexed);
3965 } else {
3966 Opc = AArch64::FMLAv2f64;
3967 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3968 FMAInstKind::Accumulator);
3969 }
3970 break;
3971
3972 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
3973 case MachineCombinerPattern::FMLAv4f32_OP1:
3974 RC = &AArch64::FPR128RegClass;
3975 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
3976 Opc = AArch64::FMLAv4i32_indexed;
3977 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3978 FMAInstKind::Indexed);
3979 } else {
3980 Opc = AArch64::FMLAv4f32;
3981 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3982 FMAInstKind::Accumulator);
3983 }
3984 break;
3985
3986 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
3987 case MachineCombinerPattern::FMLAv4f32_OP2:
3988 RC = &AArch64::FPR128RegClass;
3989 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
3990 Opc = AArch64::FMLAv4i32_indexed;
3991 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3992 FMAInstKind::Indexed);
3993 } else {
3994 Opc = AArch64::FMLAv4f32;
3995 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3996 FMAInstKind::Accumulator);
3997 }
3998 break;
3999
4000 case MachineCombinerPattern::FMULSUBS_OP1:
4001 case MachineCombinerPattern::FMULSUBD_OP1: {
4002 // FMUL I=A,B,0
4003 // FSUB R,I,C
4004 // ==> FNMSUB R,A,B,C // = -C + A*B
4005 // --- Create(FNMSUB);
4006 if (Pattern == MachineCombinerPattern::FMULSUBS_OP1) {
4007 Opc = AArch64::FNMSUBSrrr;
4008 RC = &AArch64::FPR32RegClass;
4009 } else {
4010 Opc = AArch64::FNMSUBDrrr;
4011 RC = &AArch64::FPR64RegClass;
4012 }
4013 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4014 break;
4015 }
4016 case MachineCombinerPattern::FMULSUBS_OP2:
4017 case MachineCombinerPattern::FMULSUBD_OP2: {
4018 // FMUL I=A,B,0
4019 // FSUB R,C,I
4020 // ==> FMSUB R,A,B,C (computes C - A*B)
4021 // --- Create(FMSUB);
4022 if (Pattern == MachineCombinerPattern::FMULSUBS_OP2) {
4023 Opc = AArch64::FMSUBSrrr;
4024 RC = &AArch64::FPR32RegClass;
4025 } else {
4026 Opc = AArch64::FMSUBDrrr;
4027 RC = &AArch64::FPR64RegClass;
4028 }
4029 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4030 break;
4031
4032 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
4033 Opc = AArch64::FMLSv1i32_indexed;
4034 RC = &AArch64::FPR32RegClass;
4035 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4036 FMAInstKind::Indexed);
4037 break;
4038
4039 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
4040 Opc = AArch64::FMLSv1i64_indexed;
4041 RC = &AArch64::FPR64RegClass;
4042 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4043 FMAInstKind::Indexed);
4044 break;
4045
4046 case MachineCombinerPattern::FMLSv2f32_OP2:
4047 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
4048 RC = &AArch64::FPR64RegClass;
4049 if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
4050 Opc = AArch64::FMLSv2i32_indexed;
4051 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4052 FMAInstKind::Indexed);
4053 } else {
4054 Opc = AArch64::FMLSv2f32;
4055 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4056 FMAInstKind::Accumulator);
4057 }
4058 break;
4059
4060 case MachineCombinerPattern::FMLSv2f64_OP2:
4061 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
4062 RC = &AArch64::FPR128RegClass;
4063 if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
4064 Opc = AArch64::FMLSv2i64_indexed;
4065 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4066 FMAInstKind::Indexed);
4067 } else {
4068 Opc = AArch64::FMLSv2f64;
4069 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4070 FMAInstKind::Accumulator);
4071 }
4072 break;
4073
4074 case MachineCombinerPattern::FMLSv4f32_OP2:
4075 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
4076 RC = &AArch64::FPR128RegClass;
4077 if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
4078 Opc = AArch64::FMLSv4i32_indexed;
4079 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4080 FMAInstKind::Indexed);
4081 } else {
4082 Opc = AArch64::FMLSv4f32;
4083 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4084 FMAInstKind::Accumulator);
4085 }
4086 break;
4087 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00004088 } // end switch (Pattern)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00004089 // Record MUL and ADD/SUB for deletion
4090 DelInstrs.push_back(MUL);
4091 DelInstrs.push_back(&Root);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00004092}
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004093
4094/// \brief Replace csincr-branch sequence by simple conditional branch
4095///
4096/// Examples:
4097/// 1.
4098/// csinc w9, wzr, wzr, <condition code>
4099/// tbnz w9, #0, 0x44
4100/// to
4101/// b.<inverted condition code>
4102///
4103/// 2.
4104/// csinc w9, wzr, wzr, <condition code>
4105/// tbz w9, #0, 0x44
4106/// to
4107/// b.<condition code>
4108///
Chad Rosier4aeab5f2016-03-21 13:43:58 +00004109/// Replace compare and branch sequence by TBZ/TBNZ instruction when the
4110/// compare's constant operand is power of 2.
Balaram Makame9b27252016-03-10 17:54:55 +00004111///
4112/// Examples:
4113/// and w8, w8, #0x400
4114/// cbnz w8, L1
4115/// to
4116/// tbnz w8, #10, L1
4117///
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004118/// \param MI Conditional Branch
4119/// \return True when the simple conditional branch is generated
4120///
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004121bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004122 bool IsNegativeBranch = false;
4123 bool IsTestAndBranch = false;
4124 unsigned TargetBBInMI = 0;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004125 switch (MI.getOpcode()) {
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004126 default:
4127 llvm_unreachable("Unknown branch instruction?");
4128 case AArch64::Bcc:
4129 return false;
4130 case AArch64::CBZW:
4131 case AArch64::CBZX:
4132 TargetBBInMI = 1;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004133 break;
4134 case AArch64::CBNZW:
4135 case AArch64::CBNZX:
4136 TargetBBInMI = 1;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004137 IsNegativeBranch = true;
4138 break;
4139 case AArch64::TBZW:
4140 case AArch64::TBZX:
4141 TargetBBInMI = 2;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004142 IsTestAndBranch = true;
4143 break;
4144 case AArch64::TBNZW:
4145 case AArch64::TBNZX:
4146 TargetBBInMI = 2;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004147 IsNegativeBranch = true;
4148 IsTestAndBranch = true;
4149 break;
4150 }
4151 // So we increment a zero register and test for bits other
4152 // than bit 0? Conservatively bail out in case the verifier
4153 // missed this case.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004154 if (IsTestAndBranch && MI.getOperand(1).getImm())
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004155 return false;
4156
4157 // Find Definition.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004158 assert(MI.getParent() && "Incomplete machine instruciton\n");
4159 MachineBasicBlock *MBB = MI.getParent();
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004160 MachineFunction *MF = MBB->getParent();
4161 MachineRegisterInfo *MRI = &MF->getRegInfo();
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004162 unsigned VReg = MI.getOperand(0).getReg();
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004163 if (!TargetRegisterInfo::isVirtualRegister(VReg))
4164 return false;
4165
4166 MachineInstr *DefMI = MRI->getVRegDef(VReg);
4167
Balaram Makame9b27252016-03-10 17:54:55 +00004168 // Look through COPY instructions to find definition.
4169 while (DefMI->isCopy()) {
4170 unsigned CopyVReg = DefMI->getOperand(1).getReg();
4171 if (!MRI->hasOneNonDBGUse(CopyVReg))
4172 return false;
4173 if (!MRI->hasOneDef(CopyVReg))
4174 return false;
4175 DefMI = MRI->getVRegDef(CopyVReg);
4176 }
4177
4178 switch (DefMI->getOpcode()) {
4179 default:
4180 return false;
4181 // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
4182 case AArch64::ANDWri:
4183 case AArch64::ANDXri: {
4184 if (IsTestAndBranch)
4185 return false;
4186 if (DefMI->getParent() != MBB)
4187 return false;
4188 if (!MRI->hasOneNonDBGUse(VReg))
4189 return false;
4190
Quentin Colombetabe2d012016-04-25 20:54:08 +00004191 bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
Balaram Makame9b27252016-03-10 17:54:55 +00004192 uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
Quentin Colombetabe2d012016-04-25 20:54:08 +00004193 DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
Balaram Makame9b27252016-03-10 17:54:55 +00004194 if (!isPowerOf2_64(Mask))
4195 return false;
4196
4197 MachineOperand &MO = DefMI->getOperand(1);
4198 unsigned NewReg = MO.getReg();
4199 if (!TargetRegisterInfo::isVirtualRegister(NewReg))
4200 return false;
4201
4202 assert(!MRI->def_empty(NewReg) && "Register must be defined.");
4203
4204 MachineBasicBlock &RefToMBB = *MBB;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004205 MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
4206 DebugLoc DL = MI.getDebugLoc();
Balaram Makame9b27252016-03-10 17:54:55 +00004207 unsigned Imm = Log2_64(Mask);
Renato Golin179d1f52016-04-23 19:30:52 +00004208 unsigned Opc = (Imm < 32)
4209 ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
4210 : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
Quentin Colombetabe2d012016-04-25 20:54:08 +00004211 MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
4212 .addReg(NewReg)
4213 .addImm(Imm)
4214 .addMBB(TBB);
Matthias Braune25bbd02016-05-03 04:54:16 +00004215 // Register lives on to the CBZ now.
4216 MO.setIsKill(false);
Quentin Colombetabe2d012016-04-25 20:54:08 +00004217
4218 // For immediate smaller than 32, we need to use the 32-bit
4219 // variant (W) in all cases. Indeed the 64-bit variant does not
4220 // allow to encode them.
4221 // Therefore, if the input register is 64-bit, we need to take the
4222 // 32-bit sub-part.
4223 if (!Is32Bit && Imm < 32)
4224 NewMI->getOperand(0).setSubReg(AArch64::sub_32);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004225 MI.eraseFromParent();
Balaram Makame9b27252016-03-10 17:54:55 +00004226 return true;
4227 }
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004228 // Look for CSINC
Balaram Makame9b27252016-03-10 17:54:55 +00004229 case AArch64::CSINCWr:
4230 case AArch64::CSINCXr: {
4231 if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
4232 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
4233 !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
4234 DefMI->getOperand(2).getReg() == AArch64::XZR))
4235 return false;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004236
Balaram Makame9b27252016-03-10 17:54:55 +00004237 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
4238 return false;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004239
Balaram Makame9b27252016-03-10 17:54:55 +00004240 AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
Balaram Makame9b27252016-03-10 17:54:55 +00004241 // Convert only when the condition code is not modified between
4242 // the CSINC and the branch. The CC may be used by other
4243 // instructions in between.
Evgeny Astigeevich9c24ebf2016-04-06 11:39:00 +00004244 if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
Balaram Makame9b27252016-03-10 17:54:55 +00004245 return false;
4246 MachineBasicBlock &RefToMBB = *MBB;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004247 MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
4248 DebugLoc DL = MI.getDebugLoc();
Balaram Makame9b27252016-03-10 17:54:55 +00004249 if (IsNegativeBranch)
4250 CC = AArch64CC::getInvertedCondCode(CC);
4251 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004252 MI.eraseFromParent();
Balaram Makame9b27252016-03-10 17:54:55 +00004253 return true;
4254 }
4255 }
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00004256}
Alex Lorenzf3630112015-08-18 22:52:15 +00004257
4258std::pair<unsigned, unsigned>
4259AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
4260 const unsigned Mask = AArch64II::MO_FRAGMENT;
4261 return std::make_pair(TF & Mask, TF & ~Mask);
4262}
4263
4264ArrayRef<std::pair<unsigned, const char *>>
4265AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
4266 using namespace AArch64II;
Eugene Zelenko049b0172017-01-06 00:30:53 +00004267
Hal Finkel982e8d42015-08-30 08:07:29 +00004268 static const std::pair<unsigned, const char *> TargetFlags[] = {
Alex Lorenzf3630112015-08-18 22:52:15 +00004269 {MO_PAGE, "aarch64-page"},
4270 {MO_PAGEOFF, "aarch64-pageoff"},
4271 {MO_G3, "aarch64-g3"},
4272 {MO_G2, "aarch64-g2"},
4273 {MO_G1, "aarch64-g1"},
4274 {MO_G0, "aarch64-g0"},
4275 {MO_HI12, "aarch64-hi12"}};
4276 return makeArrayRef(TargetFlags);
4277}
4278
4279ArrayRef<std::pair<unsigned, const char *>>
4280AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
4281 using namespace AArch64II;
Eugene Zelenko049b0172017-01-06 00:30:53 +00004282
Hal Finkel982e8d42015-08-30 08:07:29 +00004283 static const std::pair<unsigned, const char *> TargetFlags[] = {
Alex Lorenzf3630112015-08-18 22:52:15 +00004284 {MO_GOT, "aarch64-got"},
4285 {MO_NC, "aarch64-nc"},
Rafael Espindola4d290992016-05-31 18:31:14 +00004286 {MO_TLS, "aarch64-tls"}};
Alex Lorenzf3630112015-08-18 22:52:15 +00004287 return makeArrayRef(TargetFlags);
4288}