blob: 43db52fd082a874b41c95fa1c6c07ecc16bea424 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the AArch64 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64InstrInfo.h"
15#include "AArch64Subtarget.h"
16#include "MCTargetDesc/AArch64AddressingModes.h"
17#include "llvm/CodeGen/MachineFrameInfo.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineMemOperand.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/CodeGen/PseudoSourceValue.h"
22#include "llvm/MC/MCInst.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/TargetRegistry.h"
25
26using namespace llvm;
27
28#define GET_INSTRINFO_CTOR_DTOR
29#include "AArch64GenInstrInfo.inc"
30
31AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
32 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
Eric Christophera0de2532015-03-18 20:37:30 +000033 RI(STI.getTargetTriple()), Subtarget(STI) {}
Tim Northover3b0846e2014-05-24 12:50:23 +000034
35/// GetInstSize - Return the number of bytes of code the specified
36/// instruction may be. This returns the maximum number of bytes.
37unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
Tim Northoverd5531f72014-06-17 11:31:42 +000038 const MachineBasicBlock &MBB = *MI->getParent();
39 const MachineFunction *MF = MBB.getParent();
40 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +000041
Tim Northoverd5531f72014-06-17 11:31:42 +000042 if (MI->getOpcode() == AArch64::INLINEASM)
43 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
44
45 const MCInstrDesc &Desc = MI->getDesc();
Tim Northover3b0846e2014-05-24 12:50:23 +000046 switch (Desc.getOpcode()) {
47 default:
48 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
49 return 4;
50 case TargetOpcode::DBG_VALUE:
51 case TargetOpcode::EH_LABEL:
52 case TargetOpcode::IMPLICIT_DEF:
53 case TargetOpcode::KILL:
54 return 0;
55 }
56
57 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
58}
59
60static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
61 SmallVectorImpl<MachineOperand> &Cond) {
62 // Block ends with fall-through condbranch.
63 switch (LastInst->getOpcode()) {
64 default:
65 llvm_unreachable("Unknown branch instruction?");
66 case AArch64::Bcc:
67 Target = LastInst->getOperand(1).getMBB();
68 Cond.push_back(LastInst->getOperand(0));
69 break;
70 case AArch64::CBZW:
71 case AArch64::CBZX:
72 case AArch64::CBNZW:
73 case AArch64::CBNZX:
74 Target = LastInst->getOperand(1).getMBB();
75 Cond.push_back(MachineOperand::CreateImm(-1));
76 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
77 Cond.push_back(LastInst->getOperand(0));
78 break;
79 case AArch64::TBZW:
80 case AArch64::TBZX:
81 case AArch64::TBNZW:
82 case AArch64::TBNZX:
83 Target = LastInst->getOperand(2).getMBB();
84 Cond.push_back(MachineOperand::CreateImm(-1));
85 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
86 Cond.push_back(LastInst->getOperand(0));
87 Cond.push_back(LastInst->getOperand(1));
88 }
89}
90
91// Branch analysis.
92bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
93 MachineBasicBlock *&TBB,
94 MachineBasicBlock *&FBB,
95 SmallVectorImpl<MachineOperand> &Cond,
96 bool AllowModify) const {
97 // If the block has no terminators, it just falls into the block after it.
Benjamin Kramere61cbd12015-06-25 13:28:24 +000098 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
99 if (I == MBB.end())
Tim Northover3b0846e2014-05-24 12:50:23 +0000100 return false;
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000101
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000102 if (!isUnpredicatedTerminator(*I))
Tim Northover3b0846e2014-05-24 12:50:23 +0000103 return false;
104
105 // Get the last instruction in the block.
106 MachineInstr *LastInst = I;
107
108 // If there is only one terminator instruction, process it.
109 unsigned LastOpc = LastInst->getOpcode();
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000110 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000111 if (isUncondBranchOpcode(LastOpc)) {
112 TBB = LastInst->getOperand(0).getMBB();
113 return false;
114 }
115 if (isCondBranchOpcode(LastOpc)) {
116 // Block ends with fall-through condbranch.
117 parseCondBranch(LastInst, TBB, Cond);
118 return false;
119 }
120 return true; // Can't handle indirect branch.
121 }
122
123 // Get the instruction before it if it is a terminator.
124 MachineInstr *SecondLastInst = I;
125 unsigned SecondLastOpc = SecondLastInst->getOpcode();
126
127 // If AllowModify is true and the block ends with two or more unconditional
128 // branches, delete all but the first unconditional branch.
129 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
130 while (isUncondBranchOpcode(SecondLastOpc)) {
131 LastInst->eraseFromParent();
132 LastInst = SecondLastInst;
133 LastOpc = LastInst->getOpcode();
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000134 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000135 // Return now the only terminator is an unconditional branch.
136 TBB = LastInst->getOperand(0).getMBB();
137 return false;
138 } else {
139 SecondLastInst = I;
140 SecondLastOpc = SecondLastInst->getOpcode();
141 }
142 }
143 }
144
145 // If there are three terminators, we don't know what sort of block this is.
Duncan P. N. Exon Smith6307eb52016-02-23 02:46:52 +0000146 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
Tim Northover3b0846e2014-05-24 12:50:23 +0000147 return true;
148
149 // If the block ends with a B and a Bcc, handle it.
150 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
151 parseCondBranch(SecondLastInst, TBB, Cond);
152 FBB = LastInst->getOperand(0).getMBB();
153 return false;
154 }
155
156 // If the block ends with two unconditional branches, handle it. The second
157 // one is not executed, so remove it.
158 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
159 TBB = SecondLastInst->getOperand(0).getMBB();
160 I = LastInst;
161 if (AllowModify)
162 I->eraseFromParent();
163 return false;
164 }
165
166 // ...likewise if it ends with an indirect branch followed by an unconditional
167 // branch.
168 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
169 I = LastInst;
170 if (AllowModify)
171 I->eraseFromParent();
172 return true;
173 }
174
175 // Otherwise, can't handle this.
176 return true;
177}
178
179bool AArch64InstrInfo::ReverseBranchCondition(
180 SmallVectorImpl<MachineOperand> &Cond) const {
181 if (Cond[0].getImm() != -1) {
182 // Regular Bcc
183 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
184 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
185 } else {
186 // Folded compare-and-branch
187 switch (Cond[1].getImm()) {
188 default:
189 llvm_unreachable("Unknown conditional branch!");
190 case AArch64::CBZW:
191 Cond[1].setImm(AArch64::CBNZW);
192 break;
193 case AArch64::CBNZW:
194 Cond[1].setImm(AArch64::CBZW);
195 break;
196 case AArch64::CBZX:
197 Cond[1].setImm(AArch64::CBNZX);
198 break;
199 case AArch64::CBNZX:
200 Cond[1].setImm(AArch64::CBZX);
201 break;
202 case AArch64::TBZW:
203 Cond[1].setImm(AArch64::TBNZW);
204 break;
205 case AArch64::TBNZW:
206 Cond[1].setImm(AArch64::TBZW);
207 break;
208 case AArch64::TBZX:
209 Cond[1].setImm(AArch64::TBNZX);
210 break;
211 case AArch64::TBNZX:
212 Cond[1].setImm(AArch64::TBZX);
213 break;
214 }
215 }
216
217 return false;
218}
219
220unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000221 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
222 if (I == MBB.end())
Tim Northover3b0846e2014-05-24 12:50:23 +0000223 return 0;
Benjamin Kramere61cbd12015-06-25 13:28:24 +0000224
Tim Northover3b0846e2014-05-24 12:50:23 +0000225 if (!isUncondBranchOpcode(I->getOpcode()) &&
226 !isCondBranchOpcode(I->getOpcode()))
227 return 0;
228
229 // Remove the branch.
230 I->eraseFromParent();
231
232 I = MBB.end();
233
234 if (I == MBB.begin())
235 return 1;
236 --I;
237 if (!isCondBranchOpcode(I->getOpcode()))
238 return 1;
239
240 // Remove the branch.
241 I->eraseFromParent();
242 return 2;
243}
244
245void AArch64InstrInfo::instantiateCondBranch(
246 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000247 ArrayRef<MachineOperand> Cond) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000248 if (Cond[0].getImm() != -1) {
249 // Regular Bcc
250 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
251 } else {
252 // Folded compare-and-branch
Ahmed Bougacha72001cf2014-11-07 02:50:00 +0000253 // Note that we use addOperand instead of addReg to keep the flags.
Tim Northover3b0846e2014-05-24 12:50:23 +0000254 const MachineInstrBuilder MIB =
Ahmed Bougacha72001cf2014-11-07 02:50:00 +0000255 BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000256 if (Cond.size() > 3)
257 MIB.addImm(Cond[3].getImm());
258 MIB.addMBB(TBB);
259 }
260}
261
262unsigned AArch64InstrInfo::InsertBranch(
263 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000264 ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000265 // Shouldn't be a fall through.
266 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
267
268 if (!FBB) {
269 if (Cond.empty()) // Unconditional branch?
270 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
271 else
272 instantiateCondBranch(MBB, DL, TBB, Cond);
273 return 1;
274 }
275
276 // Two-way conditional branch.
277 instantiateCondBranch(MBB, DL, TBB, Cond);
278 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
279 return 2;
280}
281
282// Find the original register that VReg is copied from.
283static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
284 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
285 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
286 if (!DefMI->isFullCopy())
287 return VReg;
288 VReg = DefMI->getOperand(1).getReg();
289 }
290 return VReg;
291}
292
293// Determine if VReg is defined by an instruction that can be folded into a
294// csel instruction. If so, return the folded opcode, and the replacement
295// register.
296static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
297 unsigned *NewVReg = nullptr) {
298 VReg = removeCopies(MRI, VReg);
299 if (!TargetRegisterInfo::isVirtualRegister(VReg))
300 return 0;
301
302 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
303 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
304 unsigned Opc = 0;
305 unsigned SrcOpNum = 0;
306 switch (DefMI->getOpcode()) {
307 case AArch64::ADDSXri:
308 case AArch64::ADDSWri:
309 // if NZCV is used, do not fold.
310 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
311 return 0;
312 // fall-through to ADDXri and ADDWri.
313 case AArch64::ADDXri:
314 case AArch64::ADDWri:
315 // add x, 1 -> csinc.
316 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
317 DefMI->getOperand(3).getImm() != 0)
318 return 0;
319 SrcOpNum = 1;
320 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
321 break;
322
323 case AArch64::ORNXrr:
324 case AArch64::ORNWrr: {
325 // not x -> csinv, represented as orn dst, xzr, src.
326 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
327 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
328 return 0;
329 SrcOpNum = 2;
330 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
331 break;
332 }
333
334 case AArch64::SUBSXrr:
335 case AArch64::SUBSWrr:
336 // if NZCV is used, do not fold.
337 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
338 return 0;
339 // fall-through to SUBXrr and SUBWrr.
340 case AArch64::SUBXrr:
341 case AArch64::SUBWrr: {
342 // neg x -> csneg, represented as sub dst, xzr, src.
343 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
344 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
345 return 0;
346 SrcOpNum = 2;
347 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
348 break;
349 }
350 default:
351 return 0;
352 }
353 assert(Opc && SrcOpNum && "Missing parameters");
354
355 if (NewVReg)
356 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
357 return Opc;
358}
359
360bool AArch64InstrInfo::canInsertSelect(
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000361 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
Tim Northover3b0846e2014-05-24 12:50:23 +0000362 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
363 int &FalseCycles) const {
364 // Check register classes.
365 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
366 const TargetRegisterClass *RC =
Eric Christophera0de2532015-03-18 20:37:30 +0000367 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
Tim Northover3b0846e2014-05-24 12:50:23 +0000368 if (!RC)
369 return false;
370
371 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
372 unsigned ExtraCondLat = Cond.size() != 1;
373
374 // GPRs are handled by csel.
375 // FIXME: Fold in x+1, -x, and ~x when applicable.
376 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
377 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
378 // Single-cycle csel, csinc, csinv, and csneg.
379 CondCycles = 1 + ExtraCondLat;
380 TrueCycles = FalseCycles = 1;
381 if (canFoldIntoCSel(MRI, TrueReg))
382 TrueCycles = 0;
383 else if (canFoldIntoCSel(MRI, FalseReg))
384 FalseCycles = 0;
385 return true;
386 }
387
388 // Scalar floating point is handled by fcsel.
389 // FIXME: Form fabs, fmin, and fmax when applicable.
390 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
391 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
392 CondCycles = 5 + ExtraCondLat;
393 TrueCycles = FalseCycles = 2;
394 return true;
395 }
396
397 // Can't do vectors.
398 return false;
399}
400
401void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
402 MachineBasicBlock::iterator I, DebugLoc DL,
403 unsigned DstReg,
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000404 ArrayRef<MachineOperand> Cond,
Tim Northover3b0846e2014-05-24 12:50:23 +0000405 unsigned TrueReg, unsigned FalseReg) const {
406 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
407
408 // Parse the condition code, see parseCondBranch() above.
409 AArch64CC::CondCode CC;
410 switch (Cond.size()) {
411 default:
412 llvm_unreachable("Unknown condition opcode in Cond");
413 case 1: // b.cc
414 CC = AArch64CC::CondCode(Cond[0].getImm());
415 break;
416 case 3: { // cbz/cbnz
417 // We must insert a compare against 0.
418 bool Is64Bit;
419 switch (Cond[1].getImm()) {
420 default:
421 llvm_unreachable("Unknown branch opcode in Cond");
422 case AArch64::CBZW:
423 Is64Bit = 0;
424 CC = AArch64CC::EQ;
425 break;
426 case AArch64::CBZX:
427 Is64Bit = 1;
428 CC = AArch64CC::EQ;
429 break;
430 case AArch64::CBNZW:
431 Is64Bit = 0;
432 CC = AArch64CC::NE;
433 break;
434 case AArch64::CBNZX:
435 Is64Bit = 1;
436 CC = AArch64CC::NE;
437 break;
438 }
439 unsigned SrcReg = Cond[2].getReg();
440 if (Is64Bit) {
441 // cmp reg, #0 is actually subs xzr, reg, #0.
442 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
443 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
444 .addReg(SrcReg)
445 .addImm(0)
446 .addImm(0);
447 } else {
448 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
449 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
450 .addReg(SrcReg)
451 .addImm(0)
452 .addImm(0);
453 }
454 break;
455 }
456 case 4: { // tbz/tbnz
457 // We must insert a tst instruction.
458 switch (Cond[1].getImm()) {
459 default:
460 llvm_unreachable("Unknown branch opcode in Cond");
461 case AArch64::TBZW:
462 case AArch64::TBZX:
463 CC = AArch64CC::EQ;
464 break;
465 case AArch64::TBNZW:
466 case AArch64::TBNZX:
467 CC = AArch64CC::NE;
468 break;
469 }
470 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
471 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
472 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
473 .addReg(Cond[2].getReg())
474 .addImm(
475 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
476 else
477 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
478 .addReg(Cond[2].getReg())
479 .addImm(
480 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
481 break;
482 }
483 }
484
485 unsigned Opc = 0;
486 const TargetRegisterClass *RC = nullptr;
487 bool TryFold = false;
488 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
489 RC = &AArch64::GPR64RegClass;
490 Opc = AArch64::CSELXr;
491 TryFold = true;
492 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
493 RC = &AArch64::GPR32RegClass;
494 Opc = AArch64::CSELWr;
495 TryFold = true;
496 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
497 RC = &AArch64::FPR64RegClass;
498 Opc = AArch64::FCSELDrrr;
499 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
500 RC = &AArch64::FPR32RegClass;
501 Opc = AArch64::FCSELSrrr;
502 }
503 assert(RC && "Unsupported regclass");
504
505 // Try folding simple instructions into the csel.
506 if (TryFold) {
507 unsigned NewVReg = 0;
508 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
509 if (FoldedOpc) {
510 // The folded opcodes csinc, csinc and csneg apply the operation to
511 // FalseReg, so we need to invert the condition.
512 CC = AArch64CC::getInvertedCondCode(CC);
513 TrueReg = FalseReg;
514 } else
515 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
516
517 // Fold the operation. Leave any dead instructions for DCE to clean up.
518 if (FoldedOpc) {
519 FalseReg = NewVReg;
520 Opc = FoldedOpc;
521 // The extends the live range of NewVReg.
522 MRI.clearKillFlags(NewVReg);
523 }
524 }
525
526 // Pull all virtual register into the appropriate class.
527 MRI.constrainRegClass(TrueReg, RC);
528 MRI.constrainRegClass(FalseReg, RC);
529
530 // Insert the csel.
531 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
532 CC);
533}
534
Lawrence Hu687097a2015-07-23 23:55:28 +0000535/// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
Weiming Zhaob33a5552015-07-23 19:24:53 +0000536static bool canBeExpandedToORR(const MachineInstr *MI, unsigned BitSize) {
537 uint64_t Imm = MI->getOperand(1).getImm();
538 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
539 uint64_t Encoding;
540 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
541}
542
Jiangning Liucd296372014-07-29 02:09:26 +0000543// FIXME: this implementation should be micro-architecture dependent, so a
544// micro-architecture target hook should be introduced here in future.
545bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
Chad Rosiercd2be7f2016-02-12 15:51:51 +0000546 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53() &&
547 !Subtarget.isKryo())
Jiangning Liucd296372014-07-29 02:09:26 +0000548 return MI->isAsCheapAsAMove();
549
550 switch (MI->getOpcode()) {
551 default:
552 return false;
553
554 // add/sub on register without shift
555 case AArch64::ADDWri:
556 case AArch64::ADDXri:
557 case AArch64::SUBWri:
558 case AArch64::SUBXri:
559 return (MI->getOperand(3).getImm() == 0);
560
561 // logical ops on immediate
562 case AArch64::ANDWri:
563 case AArch64::ANDXri:
564 case AArch64::EORWri:
565 case AArch64::EORXri:
566 case AArch64::ORRWri:
567 case AArch64::ORRXri:
568 return true;
569
570 // logical ops on register without shift
571 case AArch64::ANDWrr:
572 case AArch64::ANDXrr:
573 case AArch64::BICWrr:
574 case AArch64::BICXrr:
575 case AArch64::EONWrr:
576 case AArch64::EONXrr:
577 case AArch64::EORWrr:
578 case AArch64::EORXrr:
579 case AArch64::ORNWrr:
580 case AArch64::ORNXrr:
581 case AArch64::ORRWrr:
582 case AArch64::ORRXrr:
583 return true;
Weiming Zhaob33a5552015-07-23 19:24:53 +0000584 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
585 // ORRXri, it is as cheap as MOV
586 case AArch64::MOVi32imm:
587 return canBeExpandedToORR(MI, 32);
588 case AArch64::MOVi64imm:
589 return canBeExpandedToORR(MI, 64);
Jiangning Liucd296372014-07-29 02:09:26 +0000590 }
591
592 llvm_unreachable("Unknown opcode to check as cheap as a move!");
593}
594
Tim Northover3b0846e2014-05-24 12:50:23 +0000595bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
596 unsigned &SrcReg, unsigned &DstReg,
597 unsigned &SubIdx) const {
598 switch (MI.getOpcode()) {
599 default:
600 return false;
601 case AArch64::SBFMXri: // aka sxtw
602 case AArch64::UBFMXri: // aka uxtw
603 // Check for the 32 -> 64 bit extension case, these instructions can do
604 // much more.
605 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
606 return false;
607 // This is a signed or unsigned 32 -> 64 bit extension.
608 SrcReg = MI.getOperand(1).getReg();
609 DstReg = MI.getOperand(0).getReg();
610 SubIdx = AArch64::sub_32;
611 return true;
612 }
613}
614
Chad Rosier3528c1e2014-09-08 14:43:48 +0000615bool
616AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
617 MachineInstr *MIb,
618 AliasAnalysis *AA) const {
Eric Christophera0de2532015-03-18 20:37:30 +0000619 const TargetRegisterInfo *TRI = &getRegisterInfo();
Chad Rosier3528c1e2014-09-08 14:43:48 +0000620 unsigned BaseRegA = 0, BaseRegB = 0;
Chad Rosier0da267d2016-03-09 16:46:48 +0000621 int64_t OffsetA = 0, OffsetB = 0;
622 unsigned WidthA = 0, WidthB = 0;
Chad Rosier3528c1e2014-09-08 14:43:48 +0000623
Chad Rosiera73b3592015-05-21 21:59:57 +0000624 assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
625 assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
Chad Rosier3528c1e2014-09-08 14:43:48 +0000626
627 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
628 MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
629 return false;
630
631 // Retrieve the base register, offset from the base register and width. Width
632 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
633 // base registers are identical, and the offset of a lower memory access +
634 // the width doesn't overlap the offset of a higher memory access,
635 // then the memory accesses are different.
Sanjoy Dasb666ea32015-06-15 18:44:14 +0000636 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
637 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
Chad Rosier3528c1e2014-09-08 14:43:48 +0000638 if (BaseRegA == BaseRegB) {
639 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
640 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
641 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
642 if (LowOffset + LowWidth <= HighOffset)
643 return true;
644 }
645 }
646 return false;
647}
648
Tim Northover3b0846e2014-05-24 12:50:23 +0000649/// analyzeCompare - For a comparison instruction, return the source registers
650/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
651/// Return true if the comparison instruction can be analyzed.
652bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
653 unsigned &SrcReg2, int &CmpMask,
654 int &CmpValue) const {
655 switch (MI->getOpcode()) {
656 default:
657 break;
658 case AArch64::SUBSWrr:
659 case AArch64::SUBSWrs:
660 case AArch64::SUBSWrx:
661 case AArch64::SUBSXrr:
662 case AArch64::SUBSXrs:
663 case AArch64::SUBSXrx:
664 case AArch64::ADDSWrr:
665 case AArch64::ADDSWrs:
666 case AArch64::ADDSWrx:
667 case AArch64::ADDSXrr:
668 case AArch64::ADDSXrs:
669 case AArch64::ADDSXrx:
670 // Replace SUBSWrr with SUBWrr if NZCV is not used.
671 SrcReg = MI->getOperand(1).getReg();
672 SrcReg2 = MI->getOperand(2).getReg();
673 CmpMask = ~0;
674 CmpValue = 0;
675 return true;
676 case AArch64::SUBSWri:
677 case AArch64::ADDSWri:
678 case AArch64::SUBSXri:
679 case AArch64::ADDSXri:
680 SrcReg = MI->getOperand(1).getReg();
681 SrcReg2 = 0;
682 CmpMask = ~0;
Jiangning Liudcc651f2014-08-08 14:19:29 +0000683 // FIXME: In order to convert CmpValue to 0 or 1
684 CmpValue = (MI->getOperand(2).getImm() != 0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000685 return true;
686 case AArch64::ANDSWri:
687 case AArch64::ANDSXri:
688 // ANDS does not use the same encoding scheme as the others xxxS
689 // instructions.
690 SrcReg = MI->getOperand(1).getReg();
691 SrcReg2 = 0;
692 CmpMask = ~0;
Jiangning Liudcc651f2014-08-08 14:19:29 +0000693 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
694 // while the type of CmpValue is int. When converting uint64_t to int,
695 // the high 32 bits of uint64_t will be lost.
696 // In fact it causes a bug in spec2006-483.xalancbmk
697 // CmpValue is only used to compare with zero in OptimizeCompareInstr
698 CmpValue = (AArch64_AM::decodeLogicalImmediate(
699 MI->getOperand(2).getImm(),
700 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000701 return true;
702 }
703
704 return false;
705}
706
707static bool UpdateOperandRegClass(MachineInstr *Instr) {
708 MachineBasicBlock *MBB = Instr->getParent();
709 assert(MBB && "Can't get MachineBasicBlock here");
710 MachineFunction *MF = MBB->getParent();
711 assert(MF && "Can't get MachineFunction here");
Eric Christopher6c901622015-01-28 03:51:33 +0000712 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
713 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000714 MachineRegisterInfo *MRI = &MF->getRegInfo();
715
716 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
717 ++OpIdx) {
718 MachineOperand &MO = Instr->getOperand(OpIdx);
719 const TargetRegisterClass *OpRegCstraints =
720 Instr->getRegClassConstraint(OpIdx, TII, TRI);
721
722 // If there's no constraint, there's nothing to do.
723 if (!OpRegCstraints)
724 continue;
725 // If the operand is a frame index, there's nothing to do here.
726 // A frame index operand will resolve correctly during PEI.
727 if (MO.isFI())
728 continue;
729
730 assert(MO.isReg() &&
731 "Operand has register constraints without being a register!");
732
733 unsigned Reg = MO.getReg();
734 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
735 if (!OpRegCstraints->contains(Reg))
736 return false;
737 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
738 !MRI->constrainRegClass(Reg, OpRegCstraints))
739 return false;
740 }
741
742 return true;
743}
744
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000745/// \brief Return the opcode that does not set flags when possible - otherwise
746/// return the original opcode. The caller is responsible to do the actual
747/// substitution and legality checking.
748static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
749 // Don't convert all compare instructions, because for some the zero register
750 // encoding becomes the sp register.
751 bool MIDefinesZeroReg = false;
752 if (MI->definesRegister(AArch64::WZR) || MI->definesRegister(AArch64::XZR))
753 MIDefinesZeroReg = true;
754
755 switch (MI->getOpcode()) {
756 default:
757 return MI->getOpcode();
758 case AArch64::ADDSWrr:
759 return AArch64::ADDWrr;
760 case AArch64::ADDSWri:
761 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
762 case AArch64::ADDSWrs:
763 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
764 case AArch64::ADDSWrx:
765 return AArch64::ADDWrx;
766 case AArch64::ADDSXrr:
767 return AArch64::ADDXrr;
768 case AArch64::ADDSXri:
769 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
770 case AArch64::ADDSXrs:
771 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
772 case AArch64::ADDSXrx:
773 return AArch64::ADDXrx;
774 case AArch64::SUBSWrr:
775 return AArch64::SUBWrr;
776 case AArch64::SUBSWri:
777 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
778 case AArch64::SUBSWrs:
779 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
780 case AArch64::SUBSWrx:
781 return AArch64::SUBWrx;
782 case AArch64::SUBSXrr:
783 return AArch64::SUBXrr;
784 case AArch64::SUBSXri:
785 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
786 case AArch64::SUBSXrs:
787 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
788 case AArch64::SUBSXrx:
789 return AArch64::SUBXrx;
790 }
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000791}
Tim Northover3b0846e2014-05-24 12:50:23 +0000792
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000793/// True when condition code could be modified on the instruction
794/// trace starting at from and ending at to.
795static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To,
796 const bool CheckOnlyCCWrites,
797 const TargetRegisterInfo *TRI) {
798 // We iterate backward starting \p To until we hit \p From
799 MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
800
801 // Early exit if To is at the beginning of the BB.
802 if (I == B)
803 return true;
804
805 // Check whether the definition of SrcReg is in the same basic block as
806 // Compare. If not, assume the condition code gets modified on some path.
807 if (To->getParent() != From->getParent())
808 return true;
809
810 // Check that NZCV isn't set on the trace.
811 for (--I; I != E; --I) {
812 const MachineInstr &Instr = *I;
813
814 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
815 (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI)))
816 // This instruction modifies or uses NZCV after the one we want to
817 // change.
818 return true;
819 if (I == B)
820 // We currently don't allow the instruction trace to cross basic
821 // block boundaries
822 return true;
823 }
824 return false;
825}
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000826/// optimizeCompareInstr - Convert the instruction supplying the argument to the
827/// comparison into one that sets the zero bit in the flags register.
828bool AArch64InstrInfo::optimizeCompareInstr(
829 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
830 int CmpValue, const MachineRegisterInfo *MRI) const {
831
832 // Replace SUBSWrr with SUBWrr if NZCV is not used.
833 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
834 if (Cmp_NZCV != -1) {
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000835 if (CmpInstr->definesRegister(AArch64::WZR) ||
836 CmpInstr->definesRegister(AArch64::XZR)) {
837 CmpInstr->eraseFromParent();
838 return true;
839 }
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000840 unsigned Opc = CmpInstr->getOpcode();
841 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
842 if (NewOpc == Opc)
843 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000844 const MCInstrDesc &MCID = get(NewOpc);
845 CmpInstr->setDesc(MCID);
846 CmpInstr->RemoveOperand(Cmp_NZCV);
847 bool succeeded = UpdateOperandRegClass(CmpInstr);
848 (void)succeeded;
849 assert(succeeded && "Some operands reg class are incompatible!");
850 return true;
851 }
852
853 // Continue only if we have a "ri" where immediate is zero.
Jiangning Liudcc651f2014-08-08 14:19:29 +0000854 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
855 // function.
856 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
Tim Northover3b0846e2014-05-24 12:50:23 +0000857 if (CmpValue != 0 || SrcReg2 != 0)
858 return false;
859
860 // CmpInstr is a Compare instruction if destination register is not used.
861 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
862 return false;
863
864 // Get the unique definition of SrcReg.
865 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
866 if (!MI)
867 return false;
868
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000869 bool CheckOnlyCCWrites = false;
Eric Christophera0de2532015-03-18 20:37:30 +0000870 const TargetRegisterInfo *TRI = &getRegisterInfo();
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000871 if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI))
872 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000873
874 unsigned NewOpc = MI->getOpcode();
875 switch (MI->getOpcode()) {
876 default:
877 return false;
878 case AArch64::ADDSWrr:
879 case AArch64::ADDSWri:
880 case AArch64::ADDSXrr:
881 case AArch64::ADDSXri:
882 case AArch64::SUBSWrr:
883 case AArch64::SUBSWri:
884 case AArch64::SUBSXrr:
885 case AArch64::SUBSXri:
886 break;
887 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
888 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
889 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
890 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
891 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
892 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
893 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
894 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
895 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
896 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
897 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
898 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
899 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
900 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
901 }
902
903 // Scan forward for the use of NZCV.
904 // When checking against MI: if it's a conditional code requires
905 // checking of V bit, then this is not safe to do.
906 // It is safe to remove CmpInstr if NZCV is redefined or killed.
907 // If we are done with the basic block, we need to check whether NZCV is
908 // live-out.
909 bool IsSafe = false;
910 for (MachineBasicBlock::iterator I = CmpInstr,
911 E = CmpInstr->getParent()->end();
912 !IsSafe && ++I != E;) {
913 const MachineInstr &Instr = *I;
914 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
915 ++IO) {
916 const MachineOperand &MO = Instr.getOperand(IO);
917 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
918 IsSafe = true;
919 break;
920 }
921 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
922 continue;
923 if (MO.isDef()) {
924 IsSafe = true;
925 break;
926 }
927
928 // Decode the condition code.
929 unsigned Opc = Instr.getOpcode();
930 AArch64CC::CondCode CC;
931 switch (Opc) {
932 default:
933 return false;
934 case AArch64::Bcc:
935 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
936 break;
937 case AArch64::CSINVWr:
938 case AArch64::CSINVXr:
939 case AArch64::CSINCWr:
940 case AArch64::CSINCXr:
941 case AArch64::CSELWr:
942 case AArch64::CSELXr:
943 case AArch64::CSNEGWr:
944 case AArch64::CSNEGXr:
945 case AArch64::FCSELSrrr:
946 case AArch64::FCSELDrrr:
947 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
948 break;
949 }
950
951 // It is not safe to remove Compare instruction if Overflow(V) is used.
952 switch (CC) {
953 default:
954 // NZCV can be used multiple times, we should continue.
955 break;
956 case AArch64CC::VS:
957 case AArch64CC::VC:
958 case AArch64CC::GE:
959 case AArch64CC::LT:
960 case AArch64CC::GT:
961 case AArch64CC::LE:
962 return false;
963 }
964 }
965 }
966
967 // If NZCV is not killed nor re-defined, we should check whether it is
968 // live-out. If it is live-out, do not optimize.
969 if (!IsSafe) {
970 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
971 for (auto *MBB : ParentBlock->successors())
972 if (MBB->isLiveIn(AArch64::NZCV))
973 return false;
974 }
975
976 // Update the instruction to set NZCV.
977 MI->setDesc(get(NewOpc));
978 CmpInstr->eraseFromParent();
979 bool succeeded = UpdateOperandRegClass(MI);
980 (void)succeeded;
981 assert(succeeded && "Some operands reg class are incompatible!");
982 MI->addRegisterDefined(AArch64::NZCV, TRI);
983 return true;
984}
985
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000986bool
987AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
988 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
989 return false;
990
991 MachineBasicBlock &MBB = *MI->getParent();
992 DebugLoc DL = MI->getDebugLoc();
993 unsigned Reg = MI->getOperand(0).getReg();
994 const GlobalValue *GV =
995 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
996 const TargetMachine &TM = MBB.getParent()->getTarget();
997 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
998 const unsigned char MO_NC = AArch64II::MO_NC;
999
1000 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1001 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1002 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1003 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1004 .addReg(Reg, RegState::Kill).addImm(0)
1005 .addMemOperand(*MI->memoperands_begin());
1006 } else if (TM.getCodeModel() == CodeModel::Large) {
1007 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1008 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1009 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1010 .addReg(Reg, RegState::Kill)
1011 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1012 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1013 .addReg(Reg, RegState::Kill)
1014 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1015 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1016 .addReg(Reg, RegState::Kill)
1017 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1018 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1019 .addReg(Reg, RegState::Kill).addImm(0)
1020 .addMemOperand(*MI->memoperands_begin());
1021 } else {
1022 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1023 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1024 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1025 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1026 .addReg(Reg, RegState::Kill)
1027 .addGlobalAddress(GV, 0, LoFlags)
1028 .addMemOperand(*MI->memoperands_begin());
1029 }
1030
1031 MBB.erase(MI);
1032
1033 return true;
1034}
1035
Tim Northover3b0846e2014-05-24 12:50:23 +00001036/// Return true if this is this instruction has a non-zero immediate
1037bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
1038 switch (MI->getOpcode()) {
1039 default:
1040 break;
1041 case AArch64::ADDSWrs:
1042 case AArch64::ADDSXrs:
1043 case AArch64::ADDWrs:
1044 case AArch64::ADDXrs:
1045 case AArch64::ANDSWrs:
1046 case AArch64::ANDSXrs:
1047 case AArch64::ANDWrs:
1048 case AArch64::ANDXrs:
1049 case AArch64::BICSWrs:
1050 case AArch64::BICSXrs:
1051 case AArch64::BICWrs:
1052 case AArch64::BICXrs:
1053 case AArch64::CRC32Brr:
1054 case AArch64::CRC32CBrr:
1055 case AArch64::CRC32CHrr:
1056 case AArch64::CRC32CWrr:
1057 case AArch64::CRC32CXrr:
1058 case AArch64::CRC32Hrr:
1059 case AArch64::CRC32Wrr:
1060 case AArch64::CRC32Xrr:
1061 case AArch64::EONWrs:
1062 case AArch64::EONXrs:
1063 case AArch64::EORWrs:
1064 case AArch64::EORXrs:
1065 case AArch64::ORNWrs:
1066 case AArch64::ORNXrs:
1067 case AArch64::ORRWrs:
1068 case AArch64::ORRXrs:
1069 case AArch64::SUBSWrs:
1070 case AArch64::SUBSXrs:
1071 case AArch64::SUBWrs:
1072 case AArch64::SUBXrs:
1073 if (MI->getOperand(3).isImm()) {
1074 unsigned val = MI->getOperand(3).getImm();
1075 return (val != 0);
1076 }
1077 break;
1078 }
1079 return false;
1080}
1081
1082/// Return true if this is this instruction has a non-zero immediate
1083bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
1084 switch (MI->getOpcode()) {
1085 default:
1086 break;
1087 case AArch64::ADDSWrx:
1088 case AArch64::ADDSXrx:
1089 case AArch64::ADDSXrx64:
1090 case AArch64::ADDWrx:
1091 case AArch64::ADDXrx:
1092 case AArch64::ADDXrx64:
1093 case AArch64::SUBSWrx:
1094 case AArch64::SUBSXrx:
1095 case AArch64::SUBSXrx64:
1096 case AArch64::SUBWrx:
1097 case AArch64::SUBXrx:
1098 case AArch64::SUBXrx64:
1099 if (MI->getOperand(3).isImm()) {
1100 unsigned val = MI->getOperand(3).getImm();
1101 return (val != 0);
1102 }
1103 break;
1104 }
1105
1106 return false;
1107}
1108
1109// Return true if this instruction simply sets its single destination register
1110// to zero. This is equivalent to a register rename of the zero-register.
1111bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1112 switch (MI->getOpcode()) {
1113 default:
1114 break;
1115 case AArch64::MOVZWi:
1116 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1117 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1118 assert(MI->getDesc().getNumOperands() == 3 &&
1119 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1120 return true;
1121 }
1122 break;
1123 case AArch64::ANDWri: // and Rd, Rzr, #imm
1124 return MI->getOperand(1).getReg() == AArch64::WZR;
1125 case AArch64::ANDXri:
1126 return MI->getOperand(1).getReg() == AArch64::XZR;
1127 case TargetOpcode::COPY:
1128 return MI->getOperand(1).getReg() == AArch64::WZR;
1129 }
1130 return false;
1131}
1132
1133// Return true if this instruction simply renames a general register without
1134// modifying bits.
1135bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1136 switch (MI->getOpcode()) {
1137 default:
1138 break;
1139 case TargetOpcode::COPY: {
1140 // GPR32 copies will by lowered to ORRXrs
1141 unsigned DstReg = MI->getOperand(0).getReg();
1142 return (AArch64::GPR32RegClass.contains(DstReg) ||
1143 AArch64::GPR64RegClass.contains(DstReg));
1144 }
1145 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1146 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1147 assert(MI->getDesc().getNumOperands() == 4 &&
1148 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1149 return true;
1150 }
Renato Golin541d7e72014-08-01 17:27:31 +00001151 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001152 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1153 if (MI->getOperand(2).getImm() == 0) {
1154 assert(MI->getDesc().getNumOperands() == 4 &&
1155 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1156 return true;
1157 }
Renato Golin541d7e72014-08-01 17:27:31 +00001158 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001159 }
1160 return false;
1161}
1162
1163// Return true if this instruction simply renames a general register without
1164// modifying bits.
1165bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1166 switch (MI->getOpcode()) {
1167 default:
1168 break;
1169 case TargetOpcode::COPY: {
1170 // FPR64 copies will by lowered to ORR.16b
1171 unsigned DstReg = MI->getOperand(0).getReg();
1172 return (AArch64::FPR64RegClass.contains(DstReg) ||
1173 AArch64::FPR128RegClass.contains(DstReg));
1174 }
1175 case AArch64::ORRv16i8:
1176 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1177 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1178 "invalid ORRv16i8 operands");
1179 return true;
1180 }
Renato Golin541d7e72014-08-01 17:27:31 +00001181 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001182 }
1183 return false;
1184}
1185
1186unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1187 int &FrameIndex) const {
1188 switch (MI->getOpcode()) {
1189 default:
1190 break;
1191 case AArch64::LDRWui:
1192 case AArch64::LDRXui:
1193 case AArch64::LDRBui:
1194 case AArch64::LDRHui:
1195 case AArch64::LDRSui:
1196 case AArch64::LDRDui:
1197 case AArch64::LDRQui:
1198 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1199 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1200 FrameIndex = MI->getOperand(1).getIndex();
1201 return MI->getOperand(0).getReg();
1202 }
1203 break;
1204 }
1205
1206 return 0;
1207}
1208
1209unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1210 int &FrameIndex) const {
1211 switch (MI->getOpcode()) {
1212 default:
1213 break;
1214 case AArch64::STRWui:
1215 case AArch64::STRXui:
1216 case AArch64::STRBui:
1217 case AArch64::STRHui:
1218 case AArch64::STRSui:
1219 case AArch64::STRDui:
1220 case AArch64::STRQui:
1221 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1222 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1223 FrameIndex = MI->getOperand(1).getIndex();
1224 return MI->getOperand(0).getReg();
1225 }
1226 break;
1227 }
1228 return 0;
1229}
1230
1231/// Return true if this is load/store scales or extends its register offset.
1232/// This refers to scaling a dynamic index as opposed to scaled immediates.
1233/// MI should be a memory op that allows scaled addressing.
1234bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1235 switch (MI->getOpcode()) {
1236 default:
1237 break;
1238 case AArch64::LDRBBroW:
1239 case AArch64::LDRBroW:
1240 case AArch64::LDRDroW:
1241 case AArch64::LDRHHroW:
1242 case AArch64::LDRHroW:
1243 case AArch64::LDRQroW:
1244 case AArch64::LDRSBWroW:
1245 case AArch64::LDRSBXroW:
1246 case AArch64::LDRSHWroW:
1247 case AArch64::LDRSHXroW:
1248 case AArch64::LDRSWroW:
1249 case AArch64::LDRSroW:
1250 case AArch64::LDRWroW:
1251 case AArch64::LDRXroW:
1252 case AArch64::STRBBroW:
1253 case AArch64::STRBroW:
1254 case AArch64::STRDroW:
1255 case AArch64::STRHHroW:
1256 case AArch64::STRHroW:
1257 case AArch64::STRQroW:
1258 case AArch64::STRSroW:
1259 case AArch64::STRWroW:
1260 case AArch64::STRXroW:
1261 case AArch64::LDRBBroX:
1262 case AArch64::LDRBroX:
1263 case AArch64::LDRDroX:
1264 case AArch64::LDRHHroX:
1265 case AArch64::LDRHroX:
1266 case AArch64::LDRQroX:
1267 case AArch64::LDRSBWroX:
1268 case AArch64::LDRSBXroX:
1269 case AArch64::LDRSHWroX:
1270 case AArch64::LDRSHXroX:
1271 case AArch64::LDRSWroX:
1272 case AArch64::LDRSroX:
1273 case AArch64::LDRWroX:
1274 case AArch64::LDRXroX:
1275 case AArch64::STRBBroX:
1276 case AArch64::STRBroX:
1277 case AArch64::STRDroX:
1278 case AArch64::STRHHroX:
1279 case AArch64::STRHroX:
1280 case AArch64::STRQroX:
1281 case AArch64::STRSroX:
1282 case AArch64::STRWroX:
1283 case AArch64::STRXroX:
1284
1285 unsigned Val = MI->getOperand(3).getImm();
1286 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1287 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1288 }
1289 return false;
1290}
1291
1292/// Check all MachineMemOperands for a hint to suppress pairing.
1293bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1294 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1295 "Too many target MO flags");
1296 for (auto *MM : MI->memoperands()) {
1297 if (MM->getFlags() &
1298 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1299 return true;
1300 }
1301 }
1302 return false;
1303}
1304
1305/// Set a flag on the first MachineMemOperand to suppress pairing.
1306void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1307 if (MI->memoperands_empty())
1308 return;
1309
1310 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1311 "Too many target MO flags");
1312 (*MI->memoperands_begin())
1313 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1314}
1315
Chad Rosiere4e15ba2016-03-09 17:29:48 +00001316bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const {
1317 switch (Opc) {
1318 default:
1319 return false;
1320 case AArch64::STURSi:
1321 case AArch64::STURDi:
1322 case AArch64::STURQi:
1323 case AArch64::STURBBi:
1324 case AArch64::STURHHi:
1325 case AArch64::STURWi:
1326 case AArch64::STURXi:
1327 case AArch64::LDURSi:
1328 case AArch64::LDURDi:
1329 case AArch64::LDURQi:
1330 case AArch64::LDURWi:
1331 case AArch64::LDURXi:
1332 case AArch64::LDURSWi:
1333 case AArch64::LDURHHi:
1334 case AArch64::LDURBBi:
1335 case AArch64::LDURSBWi:
1336 case AArch64::LDURSHWi:
1337 return true;
1338 }
1339}
1340
1341bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr *MI) const {
1342 return isUnscaledLdSt(MI->getOpcode());
1343}
1344
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001345// Is this a candidate for ld/st merging or pairing? For example, we don't
1346// touch volatiles or load/stores that have a hint to avoid pair formation.
1347bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr *MI) const {
1348 // If this is a volatile load/store, don't mess with it.
1349 if (MI->hasOrderedMemoryRef())
1350 return false;
1351
1352 // Make sure this is a reg+imm (as opposed to an address reloc).
1353 assert(MI->getOperand(1).isReg() && "Expected a reg operand.");
1354 if (!MI->getOperand(2).isImm())
1355 return false;
1356
1357 // Can't merge/pair if the instruction modifies the base register.
1358 // e.g., ldr x0, [x0]
1359 unsigned BaseReg = MI->getOperand(1).getReg();
1360 const TargetRegisterInfo *TRI = &getRegisterInfo();
1361 if (MI->modifiesRegister(BaseReg, TRI))
1362 return false;
1363
1364 // Check if this load/store has a hint to avoid pair formation.
1365 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1366 if (isLdStPairSuppressed(MI))
1367 return false;
1368
1369 return true;
1370}
1371
Chad Rosierc27a18f2016-03-09 16:00:35 +00001372bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
1373 MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset,
1374 const TargetRegisterInfo *TRI) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00001375 switch (LdSt->getOpcode()) {
1376 default:
1377 return false;
Chad Rosier0da267d2016-03-09 16:46:48 +00001378 // Scaled instructions.
Tim Northover3b0846e2014-05-24 12:50:23 +00001379 case AArch64::STRSui:
1380 case AArch64::STRDui:
1381 case AArch64::STRQui:
1382 case AArch64::STRXui:
1383 case AArch64::STRWui:
1384 case AArch64::LDRSui:
1385 case AArch64::LDRDui:
1386 case AArch64::LDRQui:
1387 case AArch64::LDRXui:
1388 case AArch64::LDRWui:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001389 case AArch64::LDRSWui:
1390 // Unscaled instructions.
1391 case AArch64::LDURSi:
1392 case AArch64::LDURDi:
1393 case AArch64::LDURQi:
1394 case AArch64::LDURWi:
1395 case AArch64::LDURXi:
1396 case AArch64::LDURSWi:
Chad Rosier0da267d2016-03-09 16:46:48 +00001397 unsigned Width;
1398 return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001399 };
1400}
1401
Sanjoy Dasb666ea32015-06-15 18:44:14 +00001402bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
Chad Rosier0da267d2016-03-09 16:46:48 +00001403 MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
Chad Rosier3528c1e2014-09-08 14:43:48 +00001404 const TargetRegisterInfo *TRI) const {
Chad Rosiercf173ff2016-03-21 18:04:10 +00001405 assert(LdSt->mayLoadOrStore() && "Expected a memory operation.");
Chad Rosier3528c1e2014-09-08 14:43:48 +00001406 // Handle only loads/stores with base register followed by immediate offset.
1407 if (LdSt->getNumOperands() != 3)
1408 return false;
1409 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1410 return false;
1411
1412 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1413 // Unscaled instructions have scaling factor set to 1.
Chad Rosier0da267d2016-03-09 16:46:48 +00001414 unsigned Scale = 0;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001415 switch (LdSt->getOpcode()) {
1416 default:
1417 return false;
1418 case AArch64::LDURQi:
1419 case AArch64::STURQi:
1420 Width = 16;
1421 Scale = 1;
1422 break;
1423 case AArch64::LDURXi:
1424 case AArch64::LDURDi:
1425 case AArch64::STURXi:
1426 case AArch64::STURDi:
1427 Width = 8;
1428 Scale = 1;
1429 break;
1430 case AArch64::LDURWi:
1431 case AArch64::LDURSi:
1432 case AArch64::LDURSWi:
1433 case AArch64::STURWi:
1434 case AArch64::STURSi:
1435 Width = 4;
1436 Scale = 1;
1437 break;
1438 case AArch64::LDURHi:
1439 case AArch64::LDURHHi:
1440 case AArch64::LDURSHXi:
1441 case AArch64::LDURSHWi:
1442 case AArch64::STURHi:
1443 case AArch64::STURHHi:
1444 Width = 2;
1445 Scale = 1;
1446 break;
1447 case AArch64::LDURBi:
1448 case AArch64::LDURBBi:
1449 case AArch64::LDURSBXi:
1450 case AArch64::LDURSBWi:
1451 case AArch64::STURBi:
1452 case AArch64::STURBBi:
1453 Width = 1;
1454 Scale = 1;
1455 break;
Chad Rosierd90e2eb2015-09-18 14:15:19 +00001456 case AArch64::LDRQui:
1457 case AArch64::STRQui:
1458 Scale = Width = 16;
1459 break;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001460 case AArch64::LDRXui:
Chad Rosier84a0afd2015-09-18 14:13:18 +00001461 case AArch64::LDRDui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001462 case AArch64::STRXui:
Chad Rosier84a0afd2015-09-18 14:13:18 +00001463 case AArch64::STRDui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001464 Scale = Width = 8;
1465 break;
1466 case AArch64::LDRWui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001467 case AArch64::LDRSui:
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001468 case AArch64::LDRSWui:
Chad Rosier84a0afd2015-09-18 14:13:18 +00001469 case AArch64::STRWui:
Chad Rosier3528c1e2014-09-08 14:43:48 +00001470 case AArch64::STRSui:
1471 Scale = Width = 4;
1472 break;
Chad Rosier84a0afd2015-09-18 14:13:18 +00001473 case AArch64::LDRHui:
1474 case AArch64::LDRHHui:
1475 case AArch64::STRHui:
1476 case AArch64::STRHHui:
1477 Scale = Width = 2;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001478 break;
Chad Rosierd90e2eb2015-09-18 14:15:19 +00001479 case AArch64::LDRBui:
1480 case AArch64::LDRBBui:
1481 case AArch64::STRBui:
1482 case AArch64::STRBBui:
1483 Scale = Width = 1;
Chad Rosier3528c1e2014-09-08 14:43:48 +00001484 break;
Chad Rosier064261d2016-02-01 20:54:36 +00001485 }
Chad Rosier3528c1e2014-09-08 14:43:48 +00001486
1487 BaseReg = LdSt->getOperand(1).getReg();
1488 Offset = LdSt->getOperand(2).getImm() * Scale;
1489 return true;
1490}
1491
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001492// Scale the unscaled offsets. Returns false if the unscaled offset can't be
1493// scaled.
1494static bool scaleOffset(unsigned Opc, int64_t &Offset) {
1495 unsigned OffsetStride = 1;
1496 switch (Opc) {
1497 default:
1498 return false;
1499 case AArch64::LDURQi:
1500 OffsetStride = 16;
1501 break;
1502 case AArch64::LDURXi:
1503 case AArch64::LDURDi:
1504 OffsetStride = 8;
1505 break;
1506 case AArch64::LDURWi:
1507 case AArch64::LDURSi:
1508 case AArch64::LDURSWi:
1509 OffsetStride = 4;
1510 break;
1511 }
1512 // If the byte-offset isn't a multiple of the stride, we can't scale this
1513 // offset.
1514 if (Offset % OffsetStride != 0)
1515 return false;
1516
1517 // Convert the byte-offset used by unscaled into an "element" offset used
1518 // by the scaled pair load/store instructions.
1519 Offset /= OffsetStride;
1520 return true;
1521}
1522
1523static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
1524 if (FirstOpc == SecondOpc)
1525 return true;
1526 // We can also pair sign-ext and zero-ext instructions.
1527 switch (FirstOpc) {
1528 default:
1529 return false;
1530 case AArch64::LDRWui:
1531 case AArch64::LDURWi:
1532 return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
1533 case AArch64::LDRSWui:
1534 case AArch64::LDURSWi:
1535 return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
1536 }
1537 // These instructions can't be paired based on their opcodes.
1538 return false;
1539}
1540
Tim Northover3b0846e2014-05-24 12:50:23 +00001541/// Detect opportunities for ldp/stp formation.
1542///
Sanjoy Dasb666ea32015-06-15 18:44:14 +00001543/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
Tim Northover3b0846e2014-05-24 12:50:23 +00001544bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1545 MachineInstr *SecondLdSt,
1546 unsigned NumLoads) const {
1547 // Only cluster up to a single pair.
1548 if (NumLoads > 1)
1549 return false;
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001550
1551 // Can we pair these instructions based on their opcodes?
1552 unsigned FirstOpc = FirstLdSt->getOpcode();
1553 unsigned SecondOpc = SecondLdSt->getOpcode();
1554 if (!canPairLdStOpc(FirstOpc, SecondOpc))
Tim Northover3b0846e2014-05-24 12:50:23 +00001555 return false;
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001556
1557 // Can't merge volatiles or load/stores that have a hint to avoid pair
1558 // formation, for example.
1559 if (!isCandidateToMergeOrPair(FirstLdSt) ||
1560 !isCandidateToMergeOrPair(SecondLdSt))
Tim Northover3b0846e2014-05-24 12:50:23 +00001561 return false;
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001562
1563 // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
1564 int64_t Offset1 = FirstLdSt->getOperand(2).getImm();
1565 if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
1566 return false;
1567
1568 int64_t Offset2 = SecondLdSt->getOperand(2).getImm();
1569 if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
1570 return false;
1571
1572 // Pairwise instructions have a 7-bit signed offset field.
1573 if (Offset1 > 63 || Offset1 < -64)
1574 return false;
1575
Tim Northover3b0846e2014-05-24 12:50:23 +00001576 // The caller should already have ordered First/SecondLdSt by offset.
Chad Rosiercdfd7e72016-03-18 19:21:02 +00001577 assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
1578 return Offset1 + 1 == Offset2;
Tim Northover3b0846e2014-05-24 12:50:23 +00001579}
1580
1581bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1582 MachineInstr *Second) const {
Matthias Braunc8b67e62015-07-20 23:11:42 +00001583 if (Subtarget.isCyclone()) {
1584 // Cyclone can fuse CMN, CMP, TST followed by Bcc.
1585 unsigned SecondOpcode = Second->getOpcode();
1586 if (SecondOpcode == AArch64::Bcc) {
1587 switch (First->getOpcode()) {
1588 default:
1589 return false;
1590 case AArch64::SUBSWri:
1591 case AArch64::ADDSWri:
1592 case AArch64::ANDSWri:
1593 case AArch64::SUBSXri:
1594 case AArch64::ADDSXri:
1595 case AArch64::ANDSXri:
1596 return true;
1597 }
Matthias Braune536f4f2015-07-20 22:34:47 +00001598 }
Matthias Braunc8b67e62015-07-20 23:11:42 +00001599 // Cyclone B0 also supports ALU operations followed by CBZ/CBNZ.
1600 if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
1601 SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
1602 switch (First->getOpcode()) {
1603 default:
1604 return false;
1605 case AArch64::ADDWri:
1606 case AArch64::ADDXri:
1607 case AArch64::ANDWri:
1608 case AArch64::ANDXri:
1609 case AArch64::EORWri:
1610 case AArch64::EORXri:
1611 case AArch64::ORRWri:
1612 case AArch64::ORRXri:
1613 case AArch64::SUBWri:
1614 case AArch64::SUBXri:
1615 return true;
1616 }
Matthias Braune536f4f2015-07-20 22:34:47 +00001617 }
1618 }
1619 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001620}
1621
Adrian Prantl87b7eb92014-10-01 18:55:02 +00001622MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1623 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
1624 const MDNode *Expr, DebugLoc DL) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00001625 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1626 .addFrameIndex(FrameIx)
1627 .addImm(0)
1628 .addImm(Offset)
Adrian Prantl87b7eb92014-10-01 18:55:02 +00001629 .addMetadata(Var)
1630 .addMetadata(Expr);
Tim Northover3b0846e2014-05-24 12:50:23 +00001631 return &*MIB;
1632}
1633
1634static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1635 unsigned Reg, unsigned SubIdx,
1636 unsigned State,
1637 const TargetRegisterInfo *TRI) {
1638 if (!SubIdx)
1639 return MIB.addReg(Reg, State);
1640
1641 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1642 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1643 return MIB.addReg(Reg, State, SubIdx);
1644}
1645
1646static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1647 unsigned NumRegs) {
1648 // We really want the positive remainder mod 32 here, that happens to be
1649 // easily obtainable with a mask.
1650 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1651}
1652
1653void AArch64InstrInfo::copyPhysRegTuple(
1654 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1655 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1656 llvm::ArrayRef<unsigned> Indices) const {
Eric Christopher58f32662014-06-10 22:57:21 +00001657 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001658 "Unexpected register copy without NEON");
Eric Christophera0de2532015-03-18 20:37:30 +00001659 const TargetRegisterInfo *TRI = &getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001660 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1661 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1662 unsigned NumRegs = Indices.size();
1663
1664 int SubReg = 0, End = NumRegs, Incr = 1;
1665 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1666 SubReg = NumRegs - 1;
1667 End = -1;
1668 Incr = -1;
1669 }
1670
1671 for (; SubReg != End; SubReg += Incr) {
James Molloyf8aa57a2015-04-16 11:37:40 +00001672 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
Tim Northover3b0846e2014-05-24 12:50:23 +00001673 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1674 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1675 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1676 }
1677}
1678
1679void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1680 MachineBasicBlock::iterator I, DebugLoc DL,
1681 unsigned DestReg, unsigned SrcReg,
1682 bool KillSrc) const {
1683 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1684 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
Eric Christophera0de2532015-03-18 20:37:30 +00001685 const TargetRegisterInfo *TRI = &getRegisterInfo();
1686
Tim Northover3b0846e2014-05-24 12:50:23 +00001687 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1688 // If either operand is WSP, expand to ADD #0.
1689 if (Subtarget.hasZeroCycleRegMove()) {
1690 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1691 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1692 &AArch64::GPR64spRegClass);
1693 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1694 &AArch64::GPR64spRegClass);
1695 // This instruction is reading and writing X registers. This may upset
1696 // the register scavenger and machine verifier, so we need to indicate
1697 // that we are reading an undefined value from SrcRegX, but a proper
1698 // value from SrcReg.
1699 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1700 .addReg(SrcRegX, RegState::Undef)
1701 .addImm(0)
1702 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1703 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1704 } else {
1705 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1706 .addReg(SrcReg, getKillRegState(KillSrc))
1707 .addImm(0)
1708 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1709 }
1710 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1711 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1712 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1713 } else {
1714 if (Subtarget.hasZeroCycleRegMove()) {
1715 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1716 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1717 &AArch64::GPR64spRegClass);
1718 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1719 &AArch64::GPR64spRegClass);
1720 // This instruction is reading and writing X registers. This may upset
1721 // the register scavenger and machine verifier, so we need to indicate
1722 // that we are reading an undefined value from SrcRegX, but a proper
1723 // value from SrcReg.
1724 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1725 .addReg(AArch64::XZR)
1726 .addReg(SrcRegX, RegState::Undef)
1727 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1728 } else {
1729 // Otherwise, expand to ORR WZR.
1730 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1731 .addReg(AArch64::WZR)
1732 .addReg(SrcReg, getKillRegState(KillSrc));
1733 }
1734 }
1735 return;
1736 }
1737
1738 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1739 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1740 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1741 // If either operand is SP, expand to ADD #0.
1742 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1743 .addReg(SrcReg, getKillRegState(KillSrc))
1744 .addImm(0)
1745 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1746 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1747 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1748 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1749 } else {
1750 // Otherwise, expand to ORR XZR.
1751 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1752 .addReg(AArch64::XZR)
1753 .addReg(SrcReg, getKillRegState(KillSrc));
1754 }
1755 return;
1756 }
1757
1758 // Copy a DDDD register quad by copying the individual sub-registers.
1759 if (AArch64::DDDDRegClass.contains(DestReg) &&
1760 AArch64::DDDDRegClass.contains(SrcReg)) {
1761 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1762 AArch64::dsub2, AArch64::dsub3 };
1763 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1764 Indices);
1765 return;
1766 }
1767
1768 // Copy a DDD register triple by copying the individual sub-registers.
1769 if (AArch64::DDDRegClass.contains(DestReg) &&
1770 AArch64::DDDRegClass.contains(SrcReg)) {
1771 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1772 AArch64::dsub2 };
1773 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1774 Indices);
1775 return;
1776 }
1777
1778 // Copy a DD register pair by copying the individual sub-registers.
1779 if (AArch64::DDRegClass.contains(DestReg) &&
1780 AArch64::DDRegClass.contains(SrcReg)) {
1781 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1782 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1783 Indices);
1784 return;
1785 }
1786
1787 // Copy a QQQQ register quad by copying the individual sub-registers.
1788 if (AArch64::QQQQRegClass.contains(DestReg) &&
1789 AArch64::QQQQRegClass.contains(SrcReg)) {
1790 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1791 AArch64::qsub2, AArch64::qsub3 };
1792 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1793 Indices);
1794 return;
1795 }
1796
1797 // Copy a QQQ register triple by copying the individual sub-registers.
1798 if (AArch64::QQQRegClass.contains(DestReg) &&
1799 AArch64::QQQRegClass.contains(SrcReg)) {
1800 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1801 AArch64::qsub2 };
1802 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1803 Indices);
1804 return;
1805 }
1806
1807 // Copy a QQ register pair by copying the individual sub-registers.
1808 if (AArch64::QQRegClass.contains(DestReg) &&
1809 AArch64::QQRegClass.contains(SrcReg)) {
1810 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1811 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1812 Indices);
1813 return;
1814 }
1815
1816 if (AArch64::FPR128RegClass.contains(DestReg) &&
1817 AArch64::FPR128RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001818 if(Subtarget.hasNEON()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001819 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1820 .addReg(SrcReg)
1821 .addReg(SrcReg, getKillRegState(KillSrc));
1822 } else {
1823 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1824 .addReg(AArch64::SP, RegState::Define)
1825 .addReg(SrcReg, getKillRegState(KillSrc))
1826 .addReg(AArch64::SP)
1827 .addImm(-16);
1828 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1829 .addReg(AArch64::SP, RegState::Define)
1830 .addReg(DestReg, RegState::Define)
1831 .addReg(AArch64::SP)
1832 .addImm(16);
1833 }
1834 return;
1835 }
1836
1837 if (AArch64::FPR64RegClass.contains(DestReg) &&
1838 AArch64::FPR64RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001839 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00001840 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1841 &AArch64::FPR128RegClass);
1842 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1843 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001844 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1845 .addReg(SrcReg)
1846 .addReg(SrcReg, getKillRegState(KillSrc));
1847 } else {
1848 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1849 .addReg(SrcReg, getKillRegState(KillSrc));
1850 }
1851 return;
1852 }
1853
1854 if (AArch64::FPR32RegClass.contains(DestReg) &&
1855 AArch64::FPR32RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001856 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00001857 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1858 &AArch64::FPR128RegClass);
1859 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1860 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001861 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1862 .addReg(SrcReg)
1863 .addReg(SrcReg, getKillRegState(KillSrc));
1864 } else {
1865 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1866 .addReg(SrcReg, getKillRegState(KillSrc));
1867 }
1868 return;
1869 }
1870
1871 if (AArch64::FPR16RegClass.contains(DestReg) &&
1872 AArch64::FPR16RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001873 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00001874 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1875 &AArch64::FPR128RegClass);
1876 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1877 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001878 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1879 .addReg(SrcReg)
1880 .addReg(SrcReg, getKillRegState(KillSrc));
1881 } else {
Eric Christophera0de2532015-03-18 20:37:30 +00001882 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1883 &AArch64::FPR32RegClass);
1884 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1885 &AArch64::FPR32RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001886 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1887 .addReg(SrcReg, getKillRegState(KillSrc));
1888 }
1889 return;
1890 }
1891
1892 if (AArch64::FPR8RegClass.contains(DestReg) &&
1893 AArch64::FPR8RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001894 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00001895 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
Tim Northover3b0846e2014-05-24 12:50:23 +00001896 &AArch64::FPR128RegClass);
Eric Christophera0de2532015-03-18 20:37:30 +00001897 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1898 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001899 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1900 .addReg(SrcReg)
1901 .addReg(SrcReg, getKillRegState(KillSrc));
1902 } else {
Eric Christophera0de2532015-03-18 20:37:30 +00001903 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1904 &AArch64::FPR32RegClass);
1905 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1906 &AArch64::FPR32RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001907 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1908 .addReg(SrcReg, getKillRegState(KillSrc));
1909 }
1910 return;
1911 }
1912
1913 // Copies between GPR64 and FPR64.
1914 if (AArch64::FPR64RegClass.contains(DestReg) &&
1915 AArch64::GPR64RegClass.contains(SrcReg)) {
1916 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1917 .addReg(SrcReg, getKillRegState(KillSrc));
1918 return;
1919 }
1920 if (AArch64::GPR64RegClass.contains(DestReg) &&
1921 AArch64::FPR64RegClass.contains(SrcReg)) {
1922 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1923 .addReg(SrcReg, getKillRegState(KillSrc));
1924 return;
1925 }
1926 // Copies between GPR32 and FPR32.
1927 if (AArch64::FPR32RegClass.contains(DestReg) &&
1928 AArch64::GPR32RegClass.contains(SrcReg)) {
1929 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1930 .addReg(SrcReg, getKillRegState(KillSrc));
1931 return;
1932 }
1933 if (AArch64::GPR32RegClass.contains(DestReg) &&
1934 AArch64::FPR32RegClass.contains(SrcReg)) {
1935 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1936 .addReg(SrcReg, getKillRegState(KillSrc));
1937 return;
1938 }
1939
Tim Northover1bed9af2014-05-27 12:16:02 +00001940 if (DestReg == AArch64::NZCV) {
1941 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1942 BuildMI(MBB, I, DL, get(AArch64::MSR))
1943 .addImm(AArch64SysReg::NZCV)
1944 .addReg(SrcReg, getKillRegState(KillSrc))
1945 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1946 return;
1947 }
1948
1949 if (SrcReg == AArch64::NZCV) {
1950 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1951 BuildMI(MBB, I, DL, get(AArch64::MRS))
1952 .addReg(DestReg)
1953 .addImm(AArch64SysReg::NZCV)
1954 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1955 return;
1956 }
1957
1958 llvm_unreachable("unimplemented reg-to-reg copy");
Tim Northover3b0846e2014-05-24 12:50:23 +00001959}
1960
1961void AArch64InstrInfo::storeRegToStackSlot(
1962 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1963 bool isKill, int FI, const TargetRegisterClass *RC,
1964 const TargetRegisterInfo *TRI) const {
1965 DebugLoc DL;
1966 if (MBBI != MBB.end())
1967 DL = MBBI->getDebugLoc();
1968 MachineFunction &MF = *MBB.getParent();
1969 MachineFrameInfo &MFI = *MF.getFrameInfo();
1970 unsigned Align = MFI.getObjectAlignment(FI);
1971
Alex Lorenze40c8a22015-08-11 23:09:45 +00001972 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
Tim Northover3b0846e2014-05-24 12:50:23 +00001973 MachineMemOperand *MMO = MF.getMachineMemOperand(
1974 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1975 unsigned Opc = 0;
1976 bool Offset = true;
1977 switch (RC->getSize()) {
1978 case 1:
1979 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1980 Opc = AArch64::STRBui;
1981 break;
1982 case 2:
1983 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1984 Opc = AArch64::STRHui;
1985 break;
1986 case 4:
1987 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1988 Opc = AArch64::STRWui;
1989 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1990 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1991 else
1992 assert(SrcReg != AArch64::WSP);
1993 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1994 Opc = AArch64::STRSui;
1995 break;
1996 case 8:
1997 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1998 Opc = AArch64::STRXui;
1999 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2000 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2001 else
2002 assert(SrcReg != AArch64::SP);
2003 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2004 Opc = AArch64::STRDui;
2005 break;
2006 case 16:
2007 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2008 Opc = AArch64::STRQui;
2009 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002010 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002011 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002012 Opc = AArch64::ST1Twov1d;
2013 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002014 }
2015 break;
2016 case 24:
2017 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002018 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002019 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002020 Opc = AArch64::ST1Threev1d;
2021 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002022 }
2023 break;
2024 case 32:
2025 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002026 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002027 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002028 Opc = AArch64::ST1Fourv1d;
2029 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002030 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002031 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002032 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002033 Opc = AArch64::ST1Twov2d;
2034 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002035 }
2036 break;
2037 case 48:
2038 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002039 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002040 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002041 Opc = AArch64::ST1Threev2d;
2042 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002043 }
2044 break;
2045 case 64:
2046 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002047 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002048 "Unexpected register store without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002049 Opc = AArch64::ST1Fourv2d;
2050 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002051 }
2052 break;
2053 }
2054 assert(Opc && "Unknown register class");
2055
James Molloyf8aa57a2015-04-16 11:37:40 +00002056 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
Tim Northover3b0846e2014-05-24 12:50:23 +00002057 .addReg(SrcReg, getKillRegState(isKill))
2058 .addFrameIndex(FI);
2059
2060 if (Offset)
2061 MI.addImm(0);
2062 MI.addMemOperand(MMO);
2063}
2064
2065void AArch64InstrInfo::loadRegFromStackSlot(
2066 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
2067 int FI, const TargetRegisterClass *RC,
2068 const TargetRegisterInfo *TRI) const {
2069 DebugLoc DL;
2070 if (MBBI != MBB.end())
2071 DL = MBBI->getDebugLoc();
2072 MachineFunction &MF = *MBB.getParent();
2073 MachineFrameInfo &MFI = *MF.getFrameInfo();
2074 unsigned Align = MFI.getObjectAlignment(FI);
Alex Lorenze40c8a22015-08-11 23:09:45 +00002075 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
Tim Northover3b0846e2014-05-24 12:50:23 +00002076 MachineMemOperand *MMO = MF.getMachineMemOperand(
2077 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
2078
2079 unsigned Opc = 0;
2080 bool Offset = true;
2081 switch (RC->getSize()) {
2082 case 1:
2083 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2084 Opc = AArch64::LDRBui;
2085 break;
2086 case 2:
2087 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2088 Opc = AArch64::LDRHui;
2089 break;
2090 case 4:
2091 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2092 Opc = AArch64::LDRWui;
2093 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2094 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
2095 else
2096 assert(DestReg != AArch64::WSP);
2097 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2098 Opc = AArch64::LDRSui;
2099 break;
2100 case 8:
2101 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2102 Opc = AArch64::LDRXui;
2103 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2104 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
2105 else
2106 assert(DestReg != AArch64::SP);
2107 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2108 Opc = AArch64::LDRDui;
2109 break;
2110 case 16:
2111 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2112 Opc = AArch64::LDRQui;
2113 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002114 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002115 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002116 Opc = AArch64::LD1Twov1d;
2117 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002118 }
2119 break;
2120 case 24:
2121 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002122 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002123 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002124 Opc = AArch64::LD1Threev1d;
2125 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002126 }
2127 break;
2128 case 32:
2129 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002130 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002131 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002132 Opc = AArch64::LD1Fourv1d;
2133 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002134 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002135 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002136 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002137 Opc = AArch64::LD1Twov2d;
2138 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002139 }
2140 break;
2141 case 48:
2142 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002143 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002144 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002145 Opc = AArch64::LD1Threev2d;
2146 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002147 }
2148 break;
2149 case 64:
2150 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00002151 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00002152 "Unexpected register load without NEON");
Richard Trieu7a083812016-02-18 22:09:30 +00002153 Opc = AArch64::LD1Fourv2d;
2154 Offset = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002155 }
2156 break;
2157 }
2158 assert(Opc && "Unknown register class");
2159
James Molloyf8aa57a2015-04-16 11:37:40 +00002160 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
Tim Northover3b0846e2014-05-24 12:50:23 +00002161 .addReg(DestReg, getDefRegState(true))
2162 .addFrameIndex(FI);
2163 if (Offset)
2164 MI.addImm(0);
2165 MI.addMemOperand(MMO);
2166}
2167
2168void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2169 MachineBasicBlock::iterator MBBI, DebugLoc DL,
2170 unsigned DestReg, unsigned SrcReg, int Offset,
Eric Christopherbc76b972014-06-10 17:33:39 +00002171 const TargetInstrInfo *TII,
Tim Northover3b0846e2014-05-24 12:50:23 +00002172 MachineInstr::MIFlag Flag, bool SetNZCV) {
2173 if (DestReg == SrcReg && Offset == 0)
2174 return;
2175
2176 bool isSub = Offset < 0;
2177 if (isSub)
2178 Offset = -Offset;
2179
2180 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2181 // scratch register. If DestReg is a virtual register, use it as the
2182 // scratch register; otherwise, create a new virtual register (to be
2183 // replaced by the scavenger at the end of PEI). That case can be optimized
2184 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2185 // register can be loaded with offset%8 and the add/sub can use an extending
2186 // instruction with LSL#3.
2187 // Currently the function handles any offsets but generates a poor sequence
2188 // of code.
2189 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2190
2191 unsigned Opc;
2192 if (SetNZCV)
2193 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2194 else
2195 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2196 const unsigned MaxEncoding = 0xfff;
2197 const unsigned ShiftSize = 12;
2198 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2199 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2200 unsigned ThisVal;
2201 if (((unsigned)Offset) > MaxEncodableValue) {
2202 ThisVal = MaxEncodableValue;
2203 } else {
2204 ThisVal = Offset & MaxEncodableValue;
2205 }
2206 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2207 "Encoding cannot handle value that big");
2208 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2209 .addReg(SrcReg)
2210 .addImm(ThisVal >> ShiftSize)
2211 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2212 .setMIFlag(Flag);
2213
2214 SrcReg = DestReg;
2215 Offset -= ThisVal;
2216 if (Offset == 0)
2217 return;
2218 }
2219 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2220 .addReg(SrcReg)
2221 .addImm(Offset)
2222 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2223 .setMIFlag(Flag);
2224}
2225
Keno Fischere70b31f2015-06-08 20:09:58 +00002226MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
2227 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
2228 MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00002229 // This is a bit of a hack. Consider this instruction:
2230 //
2231 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2232 //
2233 // We explicitly chose GPR64all for the virtual register so such a copy might
2234 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2235 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2236 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2237 //
2238 // To prevent that, we are going to constrain the %vreg0 register class here.
2239 //
2240 // <rdar://problem/11522048>
2241 //
2242 if (MI->isCopy()) {
2243 unsigned DstReg = MI->getOperand(0).getReg();
2244 unsigned SrcReg = MI->getOperand(1).getReg();
2245 if (SrcReg == AArch64::SP &&
2246 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2247 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2248 return nullptr;
2249 }
2250 if (DstReg == AArch64::SP &&
2251 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2252 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2253 return nullptr;
2254 }
2255 }
2256
2257 // Cannot fold.
2258 return nullptr;
2259}
2260
2261int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2262 bool *OutUseUnscaledOp,
2263 unsigned *OutUnscaledOp,
2264 int *EmittableOffset) {
2265 int Scale = 1;
2266 bool IsSigned = false;
2267 // The ImmIdx should be changed case by case if it is not 2.
2268 unsigned ImmIdx = 2;
2269 unsigned UnscaledOp = 0;
2270 // Set output values in case of early exit.
2271 if (EmittableOffset)
2272 *EmittableOffset = 0;
2273 if (OutUseUnscaledOp)
2274 *OutUseUnscaledOp = false;
2275 if (OutUnscaledOp)
2276 *OutUnscaledOp = 0;
2277 switch (MI.getOpcode()) {
2278 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002279 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
Tim Northover3b0846e2014-05-24 12:50:23 +00002280 // Vector spills/fills can't take an immediate offset.
2281 case AArch64::LD1Twov2d:
2282 case AArch64::LD1Threev2d:
2283 case AArch64::LD1Fourv2d:
2284 case AArch64::LD1Twov1d:
2285 case AArch64::LD1Threev1d:
2286 case AArch64::LD1Fourv1d:
2287 case AArch64::ST1Twov2d:
2288 case AArch64::ST1Threev2d:
2289 case AArch64::ST1Fourv2d:
2290 case AArch64::ST1Twov1d:
2291 case AArch64::ST1Threev1d:
2292 case AArch64::ST1Fourv1d:
2293 return AArch64FrameOffsetCannotUpdate;
2294 case AArch64::PRFMui:
2295 Scale = 8;
2296 UnscaledOp = AArch64::PRFUMi;
2297 break;
2298 case AArch64::LDRXui:
2299 Scale = 8;
2300 UnscaledOp = AArch64::LDURXi;
2301 break;
2302 case AArch64::LDRWui:
2303 Scale = 4;
2304 UnscaledOp = AArch64::LDURWi;
2305 break;
2306 case AArch64::LDRBui:
2307 Scale = 1;
2308 UnscaledOp = AArch64::LDURBi;
2309 break;
2310 case AArch64::LDRHui:
2311 Scale = 2;
2312 UnscaledOp = AArch64::LDURHi;
2313 break;
2314 case AArch64::LDRSui:
2315 Scale = 4;
2316 UnscaledOp = AArch64::LDURSi;
2317 break;
2318 case AArch64::LDRDui:
2319 Scale = 8;
2320 UnscaledOp = AArch64::LDURDi;
2321 break;
2322 case AArch64::LDRQui:
2323 Scale = 16;
2324 UnscaledOp = AArch64::LDURQi;
2325 break;
2326 case AArch64::LDRBBui:
2327 Scale = 1;
2328 UnscaledOp = AArch64::LDURBBi;
2329 break;
2330 case AArch64::LDRHHui:
2331 Scale = 2;
2332 UnscaledOp = AArch64::LDURHHi;
2333 break;
2334 case AArch64::LDRSBXui:
2335 Scale = 1;
2336 UnscaledOp = AArch64::LDURSBXi;
2337 break;
2338 case AArch64::LDRSBWui:
2339 Scale = 1;
2340 UnscaledOp = AArch64::LDURSBWi;
2341 break;
2342 case AArch64::LDRSHXui:
2343 Scale = 2;
2344 UnscaledOp = AArch64::LDURSHXi;
2345 break;
2346 case AArch64::LDRSHWui:
2347 Scale = 2;
2348 UnscaledOp = AArch64::LDURSHWi;
2349 break;
2350 case AArch64::LDRSWui:
2351 Scale = 4;
2352 UnscaledOp = AArch64::LDURSWi;
2353 break;
2354
2355 case AArch64::STRXui:
2356 Scale = 8;
2357 UnscaledOp = AArch64::STURXi;
2358 break;
2359 case AArch64::STRWui:
2360 Scale = 4;
2361 UnscaledOp = AArch64::STURWi;
2362 break;
2363 case AArch64::STRBui:
2364 Scale = 1;
2365 UnscaledOp = AArch64::STURBi;
2366 break;
2367 case AArch64::STRHui:
2368 Scale = 2;
2369 UnscaledOp = AArch64::STURHi;
2370 break;
2371 case AArch64::STRSui:
2372 Scale = 4;
2373 UnscaledOp = AArch64::STURSi;
2374 break;
2375 case AArch64::STRDui:
2376 Scale = 8;
2377 UnscaledOp = AArch64::STURDi;
2378 break;
2379 case AArch64::STRQui:
2380 Scale = 16;
2381 UnscaledOp = AArch64::STURQi;
2382 break;
2383 case AArch64::STRBBui:
2384 Scale = 1;
2385 UnscaledOp = AArch64::STURBBi;
2386 break;
2387 case AArch64::STRHHui:
2388 Scale = 2;
2389 UnscaledOp = AArch64::STURHHi;
2390 break;
2391
2392 case AArch64::LDPXi:
2393 case AArch64::LDPDi:
2394 case AArch64::STPXi:
2395 case AArch64::STPDi:
Ahmed Bougacha05541452015-09-10 01:54:43 +00002396 case AArch64::LDNPXi:
2397 case AArch64::LDNPDi:
2398 case AArch64::STNPXi:
2399 case AArch64::STNPDi:
2400 ImmIdx = 3;
Tim Northover3b0846e2014-05-24 12:50:23 +00002401 IsSigned = true;
2402 Scale = 8;
2403 break;
2404 case AArch64::LDPQi:
2405 case AArch64::STPQi:
Ahmed Bougacha05541452015-09-10 01:54:43 +00002406 case AArch64::LDNPQi:
2407 case AArch64::STNPQi:
2408 ImmIdx = 3;
Tim Northover3b0846e2014-05-24 12:50:23 +00002409 IsSigned = true;
2410 Scale = 16;
2411 break;
2412 case AArch64::LDPWi:
2413 case AArch64::LDPSi:
2414 case AArch64::STPWi:
2415 case AArch64::STPSi:
Ahmed Bougacha05541452015-09-10 01:54:43 +00002416 case AArch64::LDNPWi:
2417 case AArch64::LDNPSi:
2418 case AArch64::STNPWi:
2419 case AArch64::STNPSi:
2420 ImmIdx = 3;
Tim Northover3b0846e2014-05-24 12:50:23 +00002421 IsSigned = true;
2422 Scale = 4;
2423 break;
2424
2425 case AArch64::LDURXi:
2426 case AArch64::LDURWi:
2427 case AArch64::LDURBi:
2428 case AArch64::LDURHi:
2429 case AArch64::LDURSi:
2430 case AArch64::LDURDi:
2431 case AArch64::LDURQi:
2432 case AArch64::LDURHHi:
2433 case AArch64::LDURBBi:
2434 case AArch64::LDURSBXi:
2435 case AArch64::LDURSBWi:
2436 case AArch64::LDURSHXi:
2437 case AArch64::LDURSHWi:
2438 case AArch64::LDURSWi:
2439 case AArch64::STURXi:
2440 case AArch64::STURWi:
2441 case AArch64::STURBi:
2442 case AArch64::STURHi:
2443 case AArch64::STURSi:
2444 case AArch64::STURDi:
2445 case AArch64::STURQi:
2446 case AArch64::STURBBi:
2447 case AArch64::STURHHi:
2448 Scale = 1;
2449 break;
2450 }
2451
2452 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2453
2454 bool useUnscaledOp = false;
2455 // If the offset doesn't match the scale, we rewrite the instruction to
2456 // use the unscaled instruction instead. Likewise, if we have a negative
2457 // offset (and have an unscaled op to use).
2458 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2459 useUnscaledOp = true;
2460
2461 // Use an unscaled addressing mode if the instruction has a negative offset
2462 // (or if the instruction is already using an unscaled addressing mode).
2463 unsigned MaskBits;
2464 if (IsSigned) {
2465 // ldp/stp instructions.
2466 MaskBits = 7;
2467 Offset /= Scale;
2468 } else if (UnscaledOp == 0 || useUnscaledOp) {
2469 MaskBits = 9;
2470 IsSigned = true;
2471 Scale = 1;
2472 } else {
2473 MaskBits = 12;
2474 IsSigned = false;
2475 Offset /= Scale;
2476 }
2477
2478 // Attempt to fold address computation.
2479 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2480 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2481 if (Offset >= MinOff && Offset <= MaxOff) {
2482 if (EmittableOffset)
2483 *EmittableOffset = Offset;
2484 Offset = 0;
2485 } else {
2486 int NewOff = Offset < 0 ? MinOff : MaxOff;
2487 if (EmittableOffset)
2488 *EmittableOffset = NewOff;
2489 Offset = (Offset - NewOff) * Scale;
2490 }
2491 if (OutUseUnscaledOp)
2492 *OutUseUnscaledOp = useUnscaledOp;
2493 if (OutUnscaledOp)
2494 *OutUnscaledOp = UnscaledOp;
2495 return AArch64FrameOffsetCanUpdate |
2496 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2497}
2498
2499bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2500 unsigned FrameReg, int &Offset,
2501 const AArch64InstrInfo *TII) {
2502 unsigned Opcode = MI.getOpcode();
2503 unsigned ImmIdx = FrameRegIdx + 1;
2504
2505 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2506 Offset += MI.getOperand(ImmIdx).getImm();
2507 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2508 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2509 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2510 MI.eraseFromParent();
2511 Offset = 0;
2512 return true;
2513 }
2514
2515 int NewOffset;
2516 unsigned UnscaledOp;
2517 bool UseUnscaledOp;
2518 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2519 &UnscaledOp, &NewOffset);
2520 if (Status & AArch64FrameOffsetCanUpdate) {
2521 if (Status & AArch64FrameOffsetIsLegal)
2522 // Replace the FrameIndex with FrameReg.
2523 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2524 if (UseUnscaledOp)
2525 MI.setDesc(TII->get(UnscaledOp));
2526
2527 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2528 return Offset == 0;
2529 }
2530
2531 return false;
2532}
2533
2534void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2535 NopInst.setOpcode(AArch64::HINT);
Jim Grosbache9119e42015-05-13 18:37:00 +00002536 NopInst.addOperand(MCOperand::createImm(0));
Tim Northover3b0846e2014-05-24 12:50:23 +00002537}
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002538/// useMachineCombiner - return true when a target supports MachineCombiner
Benjamin Kramer8c90fd72014-09-03 11:41:21 +00002539bool AArch64InstrInfo::useMachineCombiner() const {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002540 // AArch64 supports the combiner
2541 return true;
2542}
2543//
2544// True when Opc sets flag
2545static bool isCombineInstrSettingFlag(unsigned Opc) {
2546 switch (Opc) {
2547 case AArch64::ADDSWrr:
2548 case AArch64::ADDSWri:
2549 case AArch64::ADDSXrr:
2550 case AArch64::ADDSXri:
2551 case AArch64::SUBSWrr:
2552 case AArch64::SUBSXrr:
2553 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2554 case AArch64::SUBSWri:
2555 case AArch64::SUBSXri:
2556 return true;
2557 default:
2558 break;
2559 }
2560 return false;
2561}
2562//
2563// 32b Opcodes that can be combined with a MUL
2564static bool isCombineInstrCandidate32(unsigned Opc) {
2565 switch (Opc) {
2566 case AArch64::ADDWrr:
2567 case AArch64::ADDWri:
2568 case AArch64::SUBWrr:
2569 case AArch64::ADDSWrr:
2570 case AArch64::ADDSWri:
2571 case AArch64::SUBSWrr:
2572 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2573 case AArch64::SUBWri:
2574 case AArch64::SUBSWri:
2575 return true;
2576 default:
2577 break;
2578 }
2579 return false;
2580}
2581//
2582// 64b Opcodes that can be combined with a MUL
2583static bool isCombineInstrCandidate64(unsigned Opc) {
2584 switch (Opc) {
2585 case AArch64::ADDXrr:
2586 case AArch64::ADDXri:
2587 case AArch64::SUBXrr:
2588 case AArch64::ADDSXrr:
2589 case AArch64::ADDSXri:
2590 case AArch64::SUBSXrr:
2591 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2592 case AArch64::SUBXri:
2593 case AArch64::SUBSXri:
2594 return true;
2595 default:
2596 break;
2597 }
2598 return false;
2599}
2600//
2601// Opcodes that can be combined with a MUL
2602static bool isCombineInstrCandidate(unsigned Opc) {
2603 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2604}
2605
2606static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
2607 unsigned MulOpc, unsigned ZeroReg) {
2608 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2609 MachineInstr *MI = nullptr;
2610 // We need a virtual register definition.
2611 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2612 MI = MRI.getUniqueVRegDef(MO.getReg());
2613 // And it needs to be in the trace (otherwise, it won't have a depth).
2614 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
2615 return false;
2616
2617 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
2618 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
2619 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
2620
2621 // The third input reg must be zero.
2622 if (MI->getOperand(3).getReg() != ZeroReg)
2623 return false;
2624
2625 // Must only used by the user we combine with.
Gerolf Hoflehnerfe2c11f2014-08-13 22:07:36 +00002626 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002627 return false;
2628
2629 return true;
2630}
2631
Haicheng Wu08b94622016-01-07 04:01:02 +00002632// TODO: There are many more machine instruction opcodes to match:
2633// 1. Other data types (integer, vectors)
2634// 2. Other math / logic operations (xor, or)
2635// 3. Other forms of the same operation (intrinsics and other variants)
2636bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
2637 switch (Inst.getOpcode()) {
2638 case AArch64::FADDDrr:
2639 case AArch64::FADDSrr:
2640 case AArch64::FADDv2f32:
2641 case AArch64::FADDv2f64:
2642 case AArch64::FADDv4f32:
2643 case AArch64::FMULDrr:
2644 case AArch64::FMULSrr:
2645 case AArch64::FMULX32:
2646 case AArch64::FMULX64:
2647 case AArch64::FMULXv2f32:
2648 case AArch64::FMULXv2f64:
2649 case AArch64::FMULXv4f32:
2650 case AArch64::FMULv2f32:
2651 case AArch64::FMULv2f64:
2652 case AArch64::FMULv4f32:
2653 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
2654 default:
2655 return false;
2656 }
2657}
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002658
Haicheng Wu08b94622016-01-07 04:01:02 +00002659/// Find instructions that can be turned into madd.
2660static bool getMaddPatterns(MachineInstr &Root,
2661 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002662 unsigned Opc = Root.getOpcode();
2663 MachineBasicBlock &MBB = *Root.getParent();
2664 bool Found = false;
2665
2666 if (!isCombineInstrCandidate(Opc))
2667 return 0;
2668 if (isCombineInstrSettingFlag(Opc)) {
2669 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
2670 // When NZCV is live bail out.
2671 if (Cmp_NZCV == -1)
2672 return 0;
2673 unsigned NewOpc = convertFlagSettingOpcode(&Root);
2674 // When opcode can't change bail out.
2675 // CHECKME: do we miss any cases for opcode conversion?
2676 if (NewOpc == Opc)
2677 return 0;
2678 Opc = NewOpc;
2679 }
2680
2681 switch (Opc) {
2682 default:
2683 break;
2684 case AArch64::ADDWrr:
2685 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
2686 "ADDWrr does not have register operands");
2687 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2688 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002689 Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002690 Found = true;
2691 }
2692 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2693 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002694 Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002695 Found = true;
2696 }
2697 break;
2698 case AArch64::ADDXrr:
2699 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2700 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002701 Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002702 Found = true;
2703 }
2704 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2705 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002706 Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002707 Found = true;
2708 }
2709 break;
2710 case AArch64::SUBWrr:
2711 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2712 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002713 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002714 Found = true;
2715 }
2716 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2717 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002718 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002719 Found = true;
2720 }
2721 break;
2722 case AArch64::SUBXrr:
2723 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2724 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002725 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002726 Found = true;
2727 }
2728 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2729 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002730 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002731 Found = true;
2732 }
2733 break;
2734 case AArch64::ADDWri:
2735 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2736 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002737 Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002738 Found = true;
2739 }
2740 break;
2741 case AArch64::ADDXri:
2742 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2743 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002744 Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002745 Found = true;
2746 }
2747 break;
2748 case AArch64::SUBWri:
2749 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2750 AArch64::WZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002751 Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002752 Found = true;
2753 }
2754 break;
2755 case AArch64::SUBXri:
2756 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2757 AArch64::XZR)) {
Sanjay Patel387e66e2015-11-05 19:34:57 +00002758 Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002759 Found = true;
2760 }
2761 break;
2762 }
2763 return Found;
2764}
2765
Haicheng Wu08b94622016-01-07 04:01:02 +00002766/// Return true when there is potentially a faster code sequence for an
2767/// instruction chain ending in \p Root. All potential patterns are listed in
2768/// the \p Pattern vector. Pattern should be sorted in priority order since the
2769/// pattern evaluator stops checking as soon as it finds a faster sequence.
2770
2771bool AArch64InstrInfo::getMachineCombinerPatterns(
2772 MachineInstr &Root,
2773 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
2774 if (getMaddPatterns(Root, Patterns))
2775 return true;
2776
2777 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
2778}
2779
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002780/// genMadd - Generate madd instruction and combine mul and add.
2781/// Example:
2782/// MUL I=A,B,0
2783/// ADD R,I,C
2784/// ==> MADD R,A,B,C
2785/// \param Root is the ADD instruction
NAKAMURA Takumi40da2672014-08-08 02:04:18 +00002786/// \param [out] InsInstrs is a vector of machine instructions and will
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002787/// contain the generated madd instruction
2788/// \param IdxMulOpd is index of operand in Root that is the result of
2789/// the MUL. In the example above IdxMulOpd is 1.
2790/// \param MaddOpc the opcode fo the madd instruction
2791static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
2792 const TargetInstrInfo *TII, MachineInstr &Root,
2793 SmallVectorImpl<MachineInstr *> &InsInstrs,
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002794 unsigned IdxMulOpd, unsigned MaddOpc,
2795 const TargetRegisterClass *RC) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002796 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2797
2798 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
2799 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002800 unsigned ResultReg = Root.getOperand(0).getReg();
2801 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2802 bool Src0IsKill = MUL->getOperand(1).isKill();
2803 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2804 bool Src1IsKill = MUL->getOperand(2).isKill();
2805 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
2806 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
2807
2808 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2809 MRI.constrainRegClass(ResultReg, RC);
2810 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2811 MRI.constrainRegClass(SrcReg0, RC);
2812 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2813 MRI.constrainRegClass(SrcReg1, RC);
2814 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
2815 MRI.constrainRegClass(SrcReg2, RC);
2816
2817 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2818 ResultReg)
2819 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2820 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2821 .addReg(SrcReg2, getKillRegState(Src2IsKill));
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002822 // Insert the MADD
2823 InsInstrs.push_back(MIB);
2824 return MUL;
2825}
2826
2827/// genMaddR - Generate madd instruction and combine mul and add using
2828/// an extra virtual register
2829/// Example - an ADD intermediate needs to be stored in a register:
2830/// MUL I=A,B,0
2831/// ADD R,I,Imm
2832/// ==> ORR V, ZR, Imm
2833/// ==> MADD R,A,B,V
2834/// \param Root is the ADD instruction
NAKAMURA Takumi40da2672014-08-08 02:04:18 +00002835/// \param [out] InsInstrs is a vector of machine instructions and will
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002836/// contain the generated madd instruction
2837/// \param IdxMulOpd is index of operand in Root that is the result of
2838/// the MUL. In the example above IdxMulOpd is 1.
2839/// \param MaddOpc the opcode fo the madd instruction
2840/// \param VR is a virtual register that holds the value of an ADD operand
2841/// (V in the example above).
2842static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
2843 const TargetInstrInfo *TII, MachineInstr &Root,
2844 SmallVectorImpl<MachineInstr *> &InsInstrs,
2845 unsigned IdxMulOpd, unsigned MaddOpc,
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002846 unsigned VR, const TargetRegisterClass *RC) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002847 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2848
2849 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002850 unsigned ResultReg = Root.getOperand(0).getReg();
2851 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2852 bool Src0IsKill = MUL->getOperand(1).isKill();
2853 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2854 bool Src1IsKill = MUL->getOperand(2).isKill();
2855
2856 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2857 MRI.constrainRegClass(ResultReg, RC);
2858 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2859 MRI.constrainRegClass(SrcReg0, RC);
2860 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2861 MRI.constrainRegClass(SrcReg1, RC);
2862 if (TargetRegisterInfo::isVirtualRegister(VR))
2863 MRI.constrainRegClass(VR, RC);
2864
2865 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2866 ResultReg)
2867 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2868 .addReg(SrcReg1, getKillRegState(Src1IsKill))
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002869 .addReg(VR);
2870 // Insert the MADD
2871 InsInstrs.push_back(MIB);
2872 return MUL;
2873}
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002874
Sanjay Patelcfe03932015-06-19 23:21:42 +00002875/// When getMachineCombinerPatterns() finds potential patterns,
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002876/// this function generates the instructions that could replace the
2877/// original code sequence
2878void AArch64InstrInfo::genAlternativeCodeSequence(
Sanjay Patel387e66e2015-11-05 19:34:57 +00002879 MachineInstr &Root, MachineCombinerPattern Pattern,
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002880 SmallVectorImpl<MachineInstr *> &InsInstrs,
2881 SmallVectorImpl<MachineInstr *> &DelInstrs,
2882 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2883 MachineBasicBlock &MBB = *Root.getParent();
2884 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2885 MachineFunction &MF = *MBB.getParent();
Eric Christophere0818912014-09-03 20:36:26 +00002886 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002887
2888 MachineInstr *MUL;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002889 const TargetRegisterClass *RC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002890 unsigned Opc;
2891 switch (Pattern) {
2892 default:
Haicheng Wu08b94622016-01-07 04:01:02 +00002893 // Reassociate instructions.
2894 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
2895 DelInstrs, InstrIdxForVirtReg);
2896 return;
Sanjay Patel387e66e2015-11-05 19:34:57 +00002897 case MachineCombinerPattern::MULADDW_OP1:
2898 case MachineCombinerPattern::MULADDX_OP1:
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002899 // MUL I=A,B,0
2900 // ADD R,I,C
2901 // ==> MADD R,A,B,C
2902 // --- Create(MADD);
Sanjay Patel387e66e2015-11-05 19:34:57 +00002903 if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002904 Opc = AArch64::MADDWrrr;
2905 RC = &AArch64::GPR32RegClass;
2906 } else {
2907 Opc = AArch64::MADDXrrr;
2908 RC = &AArch64::GPR64RegClass;
2909 }
2910 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002911 break;
Sanjay Patel387e66e2015-11-05 19:34:57 +00002912 case MachineCombinerPattern::MULADDW_OP2:
2913 case MachineCombinerPattern::MULADDX_OP2:
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002914 // MUL I=A,B,0
2915 // ADD R,C,I
2916 // ==> MADD R,A,B,C
2917 // --- Create(MADD);
Sanjay Patel387e66e2015-11-05 19:34:57 +00002918 if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002919 Opc = AArch64::MADDWrrr;
2920 RC = &AArch64::GPR32RegClass;
2921 } else {
2922 Opc = AArch64::MADDXrrr;
2923 RC = &AArch64::GPR64RegClass;
2924 }
2925 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002926 break;
Sanjay Patel387e66e2015-11-05 19:34:57 +00002927 case MachineCombinerPattern::MULADDWI_OP1:
2928 case MachineCombinerPattern::MULADDXI_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002929 // MUL I=A,B,0
2930 // ADD R,I,Imm
2931 // ==> ORR V, ZR, Imm
2932 // ==> MADD R,A,B,V
2933 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002934 const TargetRegisterClass *OrrRC;
2935 unsigned BitSize, OrrOpc, ZeroReg;
Sanjay Patel387e66e2015-11-05 19:34:57 +00002936 if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002937 OrrOpc = AArch64::ORRWri;
2938 OrrRC = &AArch64::GPR32spRegClass;
2939 BitSize = 32;
2940 ZeroReg = AArch64::WZR;
2941 Opc = AArch64::MADDWrrr;
2942 RC = &AArch64::GPR32RegClass;
2943 } else {
2944 OrrOpc = AArch64::ORRXri;
2945 OrrRC = &AArch64::GPR64spRegClass;
2946 BitSize = 64;
2947 ZeroReg = AArch64::XZR;
2948 Opc = AArch64::MADDXrrr;
2949 RC = &AArch64::GPR64RegClass;
2950 }
2951 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2952 uint64_t Imm = Root.getOperand(2).getImm();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002953
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002954 if (Root.getOperand(3).isImm()) {
2955 unsigned Val = Root.getOperand(3).getImm();
2956 Imm = Imm << Val;
2957 }
2958 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
2959 uint64_t Encoding;
2960 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2961 MachineInstrBuilder MIB1 =
2962 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2963 .addReg(ZeroReg)
2964 .addImm(Encoding);
2965 InsInstrs.push_back(MIB1);
2966 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2967 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002968 }
2969 break;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002970 }
Sanjay Patel387e66e2015-11-05 19:34:57 +00002971 case MachineCombinerPattern::MULSUBW_OP1:
2972 case MachineCombinerPattern::MULSUBX_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002973 // MUL I=A,B,0
2974 // SUB R,I, C
2975 // ==> SUB V, 0, C
2976 // ==> MADD R,A,B,V // = -C + A*B
2977 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002978 const TargetRegisterClass *SubRC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002979 unsigned SubOpc, ZeroReg;
Sanjay Patel387e66e2015-11-05 19:34:57 +00002980 if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002981 SubOpc = AArch64::SUBWrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002982 SubRC = &AArch64::GPR32spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002983 ZeroReg = AArch64::WZR;
2984 Opc = AArch64::MADDWrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002985 RC = &AArch64::GPR32RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002986 } else {
2987 SubOpc = AArch64::SUBXrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002988 SubRC = &AArch64::GPR64spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002989 ZeroReg = AArch64::XZR;
2990 Opc = AArch64::MADDXrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002991 RC = &AArch64::GPR64RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002992 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002993 unsigned NewVR = MRI.createVirtualRegister(SubRC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002994 // SUB NewVR, 0, C
2995 MachineInstrBuilder MIB1 =
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002996 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002997 .addReg(ZeroReg)
2998 .addOperand(Root.getOperand(2));
2999 InsInstrs.push_back(MIB1);
3000 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003001 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3002 break;
3003 }
Sanjay Patel387e66e2015-11-05 19:34:57 +00003004 case MachineCombinerPattern::MULSUBW_OP2:
3005 case MachineCombinerPattern::MULSUBX_OP2:
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003006 // MUL I=A,B,0
3007 // SUB R,C,I
3008 // ==> MSUB R,A,B,C (computes C - A*B)
3009 // --- Create(MSUB);
Sanjay Patel387e66e2015-11-05 19:34:57 +00003010 if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003011 Opc = AArch64::MSUBWrrr;
3012 RC = &AArch64::GPR32RegClass;
3013 } else {
3014 Opc = AArch64::MSUBXrrr;
3015 RC = &AArch64::GPR64RegClass;
3016 }
3017 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003018 break;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003019 case MachineCombinerPattern::MULSUBWI_OP1:
3020 case MachineCombinerPattern::MULSUBXI_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003021 // MUL I=A,B,0
3022 // SUB R,I, Imm
3023 // ==> ORR V, ZR, -Imm
3024 // ==> MADD R,A,B,V // = -Imm + A*B
3025 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003026 const TargetRegisterClass *OrrRC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003027 unsigned BitSize, OrrOpc, ZeroReg;
Sanjay Patel387e66e2015-11-05 19:34:57 +00003028 if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
Juergen Ributzka25816b02014-08-30 06:16:26 +00003029 OrrOpc = AArch64::ORRWri;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003030 OrrRC = &AArch64::GPR32spRegClass;
3031 BitSize = 32;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003032 ZeroReg = AArch64::WZR;
3033 Opc = AArch64::MADDWrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003034 RC = &AArch64::GPR32RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003035 } else {
3036 OrrOpc = AArch64::ORRXri;
Juergen Ributzkaf9660f02014-11-04 22:20:07 +00003037 OrrRC = &AArch64::GPR64spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003038 BitSize = 64;
3039 ZeroReg = AArch64::XZR;
3040 Opc = AArch64::MADDXrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003041 RC = &AArch64::GPR64RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003042 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003043 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003044 int Imm = Root.getOperand(2).getImm();
3045 if (Root.getOperand(3).isImm()) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003046 unsigned Val = Root.getOperand(3).getImm();
3047 Imm = Imm << Val;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003048 }
3049 uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize);
3050 uint64_t Encoding;
3051 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3052 MachineInstrBuilder MIB1 =
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003053 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003054 .addReg(ZeroReg)
3055 .addImm(Encoding);
3056 InsInstrs.push_back(MIB1);
3057 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003058 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003059 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003060 break;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003061 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00003062 } // end switch (Pattern)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00003063 // Record MUL and ADD/SUB for deletion
3064 DelInstrs.push_back(MUL);
3065 DelInstrs.push_back(&Root);
3066
3067 return;
3068}
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003069
3070/// \brief Replace csincr-branch sequence by simple conditional branch
3071///
3072/// Examples:
3073/// 1.
3074/// csinc w9, wzr, wzr, <condition code>
3075/// tbnz w9, #0, 0x44
3076/// to
3077/// b.<inverted condition code>
3078///
3079/// 2.
3080/// csinc w9, wzr, wzr, <condition code>
3081/// tbz w9, #0, 0x44
3082/// to
3083/// b.<condition code>
3084///
Chad Rosier4aeab5f2016-03-21 13:43:58 +00003085/// Replace compare and branch sequence by TBZ/TBNZ instruction when the
3086/// compare's constant operand is power of 2.
Balaram Makame9b27252016-03-10 17:54:55 +00003087///
3088/// Examples:
3089/// and w8, w8, #0x400
3090/// cbnz w8, L1
3091/// to
3092/// tbnz w8, #10, L1
3093///
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003094/// \param MI Conditional Branch
3095/// \return True when the simple conditional branch is generated
3096///
3097bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
3098 bool IsNegativeBranch = false;
3099 bool IsTestAndBranch = false;
3100 unsigned TargetBBInMI = 0;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003101 switch (MI->getOpcode()) {
3102 default:
3103 llvm_unreachable("Unknown branch instruction?");
3104 case AArch64::Bcc:
3105 return false;
3106 case AArch64::CBZW:
3107 case AArch64::CBZX:
3108 TargetBBInMI = 1;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003109 break;
3110 case AArch64::CBNZW:
3111 case AArch64::CBNZX:
3112 TargetBBInMI = 1;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003113 IsNegativeBranch = true;
3114 break;
3115 case AArch64::TBZW:
3116 case AArch64::TBZX:
3117 TargetBBInMI = 2;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003118 IsTestAndBranch = true;
3119 break;
3120 case AArch64::TBNZW:
3121 case AArch64::TBNZX:
3122 TargetBBInMI = 2;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003123 IsNegativeBranch = true;
3124 IsTestAndBranch = true;
3125 break;
3126 }
3127 // So we increment a zero register and test for bits other
3128 // than bit 0? Conservatively bail out in case the verifier
3129 // missed this case.
3130 if (IsTestAndBranch && MI->getOperand(1).getImm())
3131 return false;
3132
3133 // Find Definition.
3134 assert(MI->getParent() && "Incomplete machine instruciton\n");
3135 MachineBasicBlock *MBB = MI->getParent();
3136 MachineFunction *MF = MBB->getParent();
3137 MachineRegisterInfo *MRI = &MF->getRegInfo();
3138 unsigned VReg = MI->getOperand(0).getReg();
3139 if (!TargetRegisterInfo::isVirtualRegister(VReg))
3140 return false;
3141
3142 MachineInstr *DefMI = MRI->getVRegDef(VReg);
3143
Balaram Makame9b27252016-03-10 17:54:55 +00003144 // Look through COPY instructions to find definition.
3145 while (DefMI->isCopy()) {
3146 unsigned CopyVReg = DefMI->getOperand(1).getReg();
3147 if (!MRI->hasOneNonDBGUse(CopyVReg))
3148 return false;
3149 if (!MRI->hasOneDef(CopyVReg))
3150 return false;
3151 DefMI = MRI->getVRegDef(CopyVReg);
3152 }
3153
3154 switch (DefMI->getOpcode()) {
3155 default:
3156 return false;
3157 // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
3158 case AArch64::ANDWri:
3159 case AArch64::ANDXri: {
3160 if (IsTestAndBranch)
3161 return false;
3162 if (DefMI->getParent() != MBB)
3163 return false;
3164 if (!MRI->hasOneNonDBGUse(VReg))
3165 return false;
3166
3167 uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
3168 DefMI->getOperand(2).getImm(),
3169 (DefMI->getOpcode() == AArch64::ANDWri) ? 32 : 64);
3170 if (!isPowerOf2_64(Mask))
3171 return false;
3172
3173 MachineOperand &MO = DefMI->getOperand(1);
3174 unsigned NewReg = MO.getReg();
3175 if (!TargetRegisterInfo::isVirtualRegister(NewReg))
3176 return false;
3177
3178 assert(!MRI->def_empty(NewReg) && "Register must be defined.");
3179
3180 MachineBasicBlock &RefToMBB = *MBB;
3181 MachineBasicBlock *TBB = MI->getOperand(1).getMBB();
3182 DebugLoc DL = MI->getDebugLoc();
3183 unsigned Imm = Log2_64(Mask);
3184 unsigned Opc = (Imm < 32)
3185 ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
3186 : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
3187 BuildMI(RefToMBB, MI, DL, get(Opc)).addReg(NewReg).addImm(Imm).addMBB(TBB);
3188 MI->eraseFromParent();
3189 return true;
3190 }
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003191 // Look for CSINC
Balaram Makame9b27252016-03-10 17:54:55 +00003192 case AArch64::CSINCWr:
3193 case AArch64::CSINCXr: {
3194 if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
3195 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
3196 !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
3197 DefMI->getOperand(2).getReg() == AArch64::XZR))
3198 return false;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003199
Balaram Makame9b27252016-03-10 17:54:55 +00003200 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
3201 return false;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003202
Balaram Makame9b27252016-03-10 17:54:55 +00003203 AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
3204 bool CheckOnlyCCWrites = true;
3205 // Convert only when the condition code is not modified between
3206 // the CSINC and the branch. The CC may be used by other
3207 // instructions in between.
3208 if (modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, &getRegisterInfo()))
3209 return false;
3210 MachineBasicBlock &RefToMBB = *MBB;
3211 MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
3212 DebugLoc DL = MI->getDebugLoc();
3213 if (IsNegativeBranch)
3214 CC = AArch64CC::getInvertedCondCode(CC);
3215 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
3216 MI->eraseFromParent();
3217 return true;
3218 }
3219 }
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00003220}
Alex Lorenzf3630112015-08-18 22:52:15 +00003221
3222std::pair<unsigned, unsigned>
3223AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
3224 const unsigned Mask = AArch64II::MO_FRAGMENT;
3225 return std::make_pair(TF & Mask, TF & ~Mask);
3226}
3227
3228ArrayRef<std::pair<unsigned, const char *>>
3229AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
3230 using namespace AArch64II;
Hal Finkel982e8d42015-08-30 08:07:29 +00003231 static const std::pair<unsigned, const char *> TargetFlags[] = {
Alex Lorenzf3630112015-08-18 22:52:15 +00003232 {MO_PAGE, "aarch64-page"},
3233 {MO_PAGEOFF, "aarch64-pageoff"},
3234 {MO_G3, "aarch64-g3"},
3235 {MO_G2, "aarch64-g2"},
3236 {MO_G1, "aarch64-g1"},
3237 {MO_G0, "aarch64-g0"},
3238 {MO_HI12, "aarch64-hi12"}};
3239 return makeArrayRef(TargetFlags);
3240}
3241
3242ArrayRef<std::pair<unsigned, const char *>>
3243AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
3244 using namespace AArch64II;
Hal Finkel982e8d42015-08-30 08:07:29 +00003245 static const std::pair<unsigned, const char *> TargetFlags[] = {
Alex Lorenzf3630112015-08-18 22:52:15 +00003246 {MO_GOT, "aarch64-got"},
3247 {MO_NC, "aarch64-nc"},
3248 {MO_TLS, "aarch64-tls"},
3249 {MO_CONSTPOOL, "aarch64-constant-pool"}};
3250 return makeArrayRef(TargetFlags);
3251}