blob: 207c34ca7f0b751764262aaad50e533ee469fc2f [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the AArch64 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64InstrInfo.h"
Chandler Carruthd9903882015-01-14 11:23:27 +000015#include "AArch64MachineCombinerPattern.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000016#include "AArch64Subtarget.h"
17#include "MCTargetDesc/AArch64AddressingModes.h"
18#include "llvm/CodeGen/MachineFrameInfo.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineMemOperand.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/PseudoSourceValue.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/TargetRegistry.h"
26
27using namespace llvm;
28
29#define GET_INSTRINFO_CTOR_DTOR
30#include "AArch64GenInstrInfo.inc"
31
32AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
33 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
Eric Christophera0de2532015-03-18 20:37:30 +000034 RI(STI.getTargetTriple()), Subtarget(STI) {}
Tim Northover3b0846e2014-05-24 12:50:23 +000035
36/// GetInstSize - Return the number of bytes of code the specified
37/// instruction may be. This returns the maximum number of bytes.
38unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
Tim Northoverd5531f72014-06-17 11:31:42 +000039 const MachineBasicBlock &MBB = *MI->getParent();
40 const MachineFunction *MF = MBB.getParent();
41 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +000042
Tim Northoverd5531f72014-06-17 11:31:42 +000043 if (MI->getOpcode() == AArch64::INLINEASM)
44 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
45
46 const MCInstrDesc &Desc = MI->getDesc();
Tim Northover3b0846e2014-05-24 12:50:23 +000047 switch (Desc.getOpcode()) {
48 default:
49 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
50 return 4;
51 case TargetOpcode::DBG_VALUE:
52 case TargetOpcode::EH_LABEL:
53 case TargetOpcode::IMPLICIT_DEF:
54 case TargetOpcode::KILL:
55 return 0;
56 }
57
58 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
59}
60
61static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
62 SmallVectorImpl<MachineOperand> &Cond) {
63 // Block ends with fall-through condbranch.
64 switch (LastInst->getOpcode()) {
65 default:
66 llvm_unreachable("Unknown branch instruction?");
67 case AArch64::Bcc:
68 Target = LastInst->getOperand(1).getMBB();
69 Cond.push_back(LastInst->getOperand(0));
70 break;
71 case AArch64::CBZW:
72 case AArch64::CBZX:
73 case AArch64::CBNZW:
74 case AArch64::CBNZX:
75 Target = LastInst->getOperand(1).getMBB();
76 Cond.push_back(MachineOperand::CreateImm(-1));
77 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
78 Cond.push_back(LastInst->getOperand(0));
79 break;
80 case AArch64::TBZW:
81 case AArch64::TBZX:
82 case AArch64::TBNZW:
83 case AArch64::TBNZX:
84 Target = LastInst->getOperand(2).getMBB();
85 Cond.push_back(MachineOperand::CreateImm(-1));
86 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
87 Cond.push_back(LastInst->getOperand(0));
88 Cond.push_back(LastInst->getOperand(1));
89 }
90}
91
92// Branch analysis.
93bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
94 MachineBasicBlock *&TBB,
95 MachineBasicBlock *&FBB,
96 SmallVectorImpl<MachineOperand> &Cond,
97 bool AllowModify) const {
98 // If the block has no terminators, it just falls into the block after it.
99 MachineBasicBlock::iterator I = MBB.end();
100 if (I == MBB.begin())
101 return false;
102 --I;
103 while (I->isDebugValue()) {
104 if (I == MBB.begin())
105 return false;
106 --I;
107 }
108 if (!isUnpredicatedTerminator(I))
109 return false;
110
111 // Get the last instruction in the block.
112 MachineInstr *LastInst = I;
113
114 // If there is only one terminator instruction, process it.
115 unsigned LastOpc = LastInst->getOpcode();
116 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
117 if (isUncondBranchOpcode(LastOpc)) {
118 TBB = LastInst->getOperand(0).getMBB();
119 return false;
120 }
121 if (isCondBranchOpcode(LastOpc)) {
122 // Block ends with fall-through condbranch.
123 parseCondBranch(LastInst, TBB, Cond);
124 return false;
125 }
126 return true; // Can't handle indirect branch.
127 }
128
129 // Get the instruction before it if it is a terminator.
130 MachineInstr *SecondLastInst = I;
131 unsigned SecondLastOpc = SecondLastInst->getOpcode();
132
133 // If AllowModify is true and the block ends with two or more unconditional
134 // branches, delete all but the first unconditional branch.
135 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
136 while (isUncondBranchOpcode(SecondLastOpc)) {
137 LastInst->eraseFromParent();
138 LastInst = SecondLastInst;
139 LastOpc = LastInst->getOpcode();
140 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
141 // Return now the only terminator is an unconditional branch.
142 TBB = LastInst->getOperand(0).getMBB();
143 return false;
144 } else {
145 SecondLastInst = I;
146 SecondLastOpc = SecondLastInst->getOpcode();
147 }
148 }
149 }
150
151 // If there are three terminators, we don't know what sort of block this is.
152 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
153 return true;
154
155 // If the block ends with a B and a Bcc, handle it.
156 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
157 parseCondBranch(SecondLastInst, TBB, Cond);
158 FBB = LastInst->getOperand(0).getMBB();
159 return false;
160 }
161
162 // If the block ends with two unconditional branches, handle it. The second
163 // one is not executed, so remove it.
164 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
165 TBB = SecondLastInst->getOperand(0).getMBB();
166 I = LastInst;
167 if (AllowModify)
168 I->eraseFromParent();
169 return false;
170 }
171
172 // ...likewise if it ends with an indirect branch followed by an unconditional
173 // branch.
174 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
175 I = LastInst;
176 if (AllowModify)
177 I->eraseFromParent();
178 return true;
179 }
180
181 // Otherwise, can't handle this.
182 return true;
183}
184
185bool AArch64InstrInfo::ReverseBranchCondition(
186 SmallVectorImpl<MachineOperand> &Cond) const {
187 if (Cond[0].getImm() != -1) {
188 // Regular Bcc
189 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
190 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
191 } else {
192 // Folded compare-and-branch
193 switch (Cond[1].getImm()) {
194 default:
195 llvm_unreachable("Unknown conditional branch!");
196 case AArch64::CBZW:
197 Cond[1].setImm(AArch64::CBNZW);
198 break;
199 case AArch64::CBNZW:
200 Cond[1].setImm(AArch64::CBZW);
201 break;
202 case AArch64::CBZX:
203 Cond[1].setImm(AArch64::CBNZX);
204 break;
205 case AArch64::CBNZX:
206 Cond[1].setImm(AArch64::CBZX);
207 break;
208 case AArch64::TBZW:
209 Cond[1].setImm(AArch64::TBNZW);
210 break;
211 case AArch64::TBNZW:
212 Cond[1].setImm(AArch64::TBZW);
213 break;
214 case AArch64::TBZX:
215 Cond[1].setImm(AArch64::TBNZX);
216 break;
217 case AArch64::TBNZX:
218 Cond[1].setImm(AArch64::TBZX);
219 break;
220 }
221 }
222
223 return false;
224}
225
226unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
227 MachineBasicBlock::iterator I = MBB.end();
228 if (I == MBB.begin())
229 return 0;
230 --I;
231 while (I->isDebugValue()) {
232 if (I == MBB.begin())
233 return 0;
234 --I;
235 }
236 if (!isUncondBranchOpcode(I->getOpcode()) &&
237 !isCondBranchOpcode(I->getOpcode()))
238 return 0;
239
240 // Remove the branch.
241 I->eraseFromParent();
242
243 I = MBB.end();
244
245 if (I == MBB.begin())
246 return 1;
247 --I;
248 if (!isCondBranchOpcode(I->getOpcode()))
249 return 1;
250
251 // Remove the branch.
252 I->eraseFromParent();
253 return 2;
254}
255
256void AArch64InstrInfo::instantiateCondBranch(
257 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
258 const SmallVectorImpl<MachineOperand> &Cond) const {
259 if (Cond[0].getImm() != -1) {
260 // Regular Bcc
261 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
262 } else {
263 // Folded compare-and-branch
Ahmed Bougacha72001cf2014-11-07 02:50:00 +0000264 // Note that we use addOperand instead of addReg to keep the flags.
Tim Northover3b0846e2014-05-24 12:50:23 +0000265 const MachineInstrBuilder MIB =
Ahmed Bougacha72001cf2014-11-07 02:50:00 +0000266 BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000267 if (Cond.size() > 3)
268 MIB.addImm(Cond[3].getImm());
269 MIB.addMBB(TBB);
270 }
271}
272
273unsigned AArch64InstrInfo::InsertBranch(
274 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
275 const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
276 // Shouldn't be a fall through.
277 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
278
279 if (!FBB) {
280 if (Cond.empty()) // Unconditional branch?
281 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
282 else
283 instantiateCondBranch(MBB, DL, TBB, Cond);
284 return 1;
285 }
286
287 // Two-way conditional branch.
288 instantiateCondBranch(MBB, DL, TBB, Cond);
289 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
290 return 2;
291}
292
293// Find the original register that VReg is copied from.
294static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
295 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
296 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
297 if (!DefMI->isFullCopy())
298 return VReg;
299 VReg = DefMI->getOperand(1).getReg();
300 }
301 return VReg;
302}
303
304// Determine if VReg is defined by an instruction that can be folded into a
305// csel instruction. If so, return the folded opcode, and the replacement
306// register.
307static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
308 unsigned *NewVReg = nullptr) {
309 VReg = removeCopies(MRI, VReg);
310 if (!TargetRegisterInfo::isVirtualRegister(VReg))
311 return 0;
312
313 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
314 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
315 unsigned Opc = 0;
316 unsigned SrcOpNum = 0;
317 switch (DefMI->getOpcode()) {
318 case AArch64::ADDSXri:
319 case AArch64::ADDSWri:
320 // if NZCV is used, do not fold.
321 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
322 return 0;
323 // fall-through to ADDXri and ADDWri.
324 case AArch64::ADDXri:
325 case AArch64::ADDWri:
326 // add x, 1 -> csinc.
327 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
328 DefMI->getOperand(3).getImm() != 0)
329 return 0;
330 SrcOpNum = 1;
331 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
332 break;
333
334 case AArch64::ORNXrr:
335 case AArch64::ORNWrr: {
336 // not x -> csinv, represented as orn dst, xzr, src.
337 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
338 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
339 return 0;
340 SrcOpNum = 2;
341 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
342 break;
343 }
344
345 case AArch64::SUBSXrr:
346 case AArch64::SUBSWrr:
347 // if NZCV is used, do not fold.
348 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
349 return 0;
350 // fall-through to SUBXrr and SUBWrr.
351 case AArch64::SUBXrr:
352 case AArch64::SUBWrr: {
353 // neg x -> csneg, represented as sub dst, xzr, src.
354 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
355 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
356 return 0;
357 SrcOpNum = 2;
358 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
359 break;
360 }
361 default:
362 return 0;
363 }
364 assert(Opc && SrcOpNum && "Missing parameters");
365
366 if (NewVReg)
367 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
368 return Opc;
369}
370
371bool AArch64InstrInfo::canInsertSelect(
372 const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
373 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
374 int &FalseCycles) const {
375 // Check register classes.
376 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
377 const TargetRegisterClass *RC =
Eric Christophera0de2532015-03-18 20:37:30 +0000378 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
Tim Northover3b0846e2014-05-24 12:50:23 +0000379 if (!RC)
380 return false;
381
382 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
383 unsigned ExtraCondLat = Cond.size() != 1;
384
385 // GPRs are handled by csel.
386 // FIXME: Fold in x+1, -x, and ~x when applicable.
387 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
388 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
389 // Single-cycle csel, csinc, csinv, and csneg.
390 CondCycles = 1 + ExtraCondLat;
391 TrueCycles = FalseCycles = 1;
392 if (canFoldIntoCSel(MRI, TrueReg))
393 TrueCycles = 0;
394 else if (canFoldIntoCSel(MRI, FalseReg))
395 FalseCycles = 0;
396 return true;
397 }
398
399 // Scalar floating point is handled by fcsel.
400 // FIXME: Form fabs, fmin, and fmax when applicable.
401 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
402 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
403 CondCycles = 5 + ExtraCondLat;
404 TrueCycles = FalseCycles = 2;
405 return true;
406 }
407
408 // Can't do vectors.
409 return false;
410}
411
412void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
413 MachineBasicBlock::iterator I, DebugLoc DL,
414 unsigned DstReg,
415 const SmallVectorImpl<MachineOperand> &Cond,
416 unsigned TrueReg, unsigned FalseReg) const {
417 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
418
419 // Parse the condition code, see parseCondBranch() above.
420 AArch64CC::CondCode CC;
421 switch (Cond.size()) {
422 default:
423 llvm_unreachable("Unknown condition opcode in Cond");
424 case 1: // b.cc
425 CC = AArch64CC::CondCode(Cond[0].getImm());
426 break;
427 case 3: { // cbz/cbnz
428 // We must insert a compare against 0.
429 bool Is64Bit;
430 switch (Cond[1].getImm()) {
431 default:
432 llvm_unreachable("Unknown branch opcode in Cond");
433 case AArch64::CBZW:
434 Is64Bit = 0;
435 CC = AArch64CC::EQ;
436 break;
437 case AArch64::CBZX:
438 Is64Bit = 1;
439 CC = AArch64CC::EQ;
440 break;
441 case AArch64::CBNZW:
442 Is64Bit = 0;
443 CC = AArch64CC::NE;
444 break;
445 case AArch64::CBNZX:
446 Is64Bit = 1;
447 CC = AArch64CC::NE;
448 break;
449 }
450 unsigned SrcReg = Cond[2].getReg();
451 if (Is64Bit) {
452 // cmp reg, #0 is actually subs xzr, reg, #0.
453 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
454 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
455 .addReg(SrcReg)
456 .addImm(0)
457 .addImm(0);
458 } else {
459 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
460 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
461 .addReg(SrcReg)
462 .addImm(0)
463 .addImm(0);
464 }
465 break;
466 }
467 case 4: { // tbz/tbnz
468 // We must insert a tst instruction.
469 switch (Cond[1].getImm()) {
470 default:
471 llvm_unreachable("Unknown branch opcode in Cond");
472 case AArch64::TBZW:
473 case AArch64::TBZX:
474 CC = AArch64CC::EQ;
475 break;
476 case AArch64::TBNZW:
477 case AArch64::TBNZX:
478 CC = AArch64CC::NE;
479 break;
480 }
481 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
482 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
483 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
484 .addReg(Cond[2].getReg())
485 .addImm(
486 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
487 else
488 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
489 .addReg(Cond[2].getReg())
490 .addImm(
491 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
492 break;
493 }
494 }
495
496 unsigned Opc = 0;
497 const TargetRegisterClass *RC = nullptr;
498 bool TryFold = false;
499 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
500 RC = &AArch64::GPR64RegClass;
501 Opc = AArch64::CSELXr;
502 TryFold = true;
503 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
504 RC = &AArch64::GPR32RegClass;
505 Opc = AArch64::CSELWr;
506 TryFold = true;
507 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
508 RC = &AArch64::FPR64RegClass;
509 Opc = AArch64::FCSELDrrr;
510 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
511 RC = &AArch64::FPR32RegClass;
512 Opc = AArch64::FCSELSrrr;
513 }
514 assert(RC && "Unsupported regclass");
515
516 // Try folding simple instructions into the csel.
517 if (TryFold) {
518 unsigned NewVReg = 0;
519 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
520 if (FoldedOpc) {
521 // The folded opcodes csinc, csinc and csneg apply the operation to
522 // FalseReg, so we need to invert the condition.
523 CC = AArch64CC::getInvertedCondCode(CC);
524 TrueReg = FalseReg;
525 } else
526 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
527
528 // Fold the operation. Leave any dead instructions for DCE to clean up.
529 if (FoldedOpc) {
530 FalseReg = NewVReg;
531 Opc = FoldedOpc;
532 // The extends the live range of NewVReg.
533 MRI.clearKillFlags(NewVReg);
534 }
535 }
536
537 // Pull all virtual register into the appropriate class.
538 MRI.constrainRegClass(TrueReg, RC);
539 MRI.constrainRegClass(FalseReg, RC);
540
541 // Insert the csel.
542 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
543 CC);
544}
545
Jiangning Liucd296372014-07-29 02:09:26 +0000546// FIXME: this implementation should be micro-architecture dependent, so a
547// micro-architecture target hook should be introduced here in future.
548bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
549 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
550 return MI->isAsCheapAsAMove();
551
552 switch (MI->getOpcode()) {
553 default:
554 return false;
555
556 // add/sub on register without shift
557 case AArch64::ADDWri:
558 case AArch64::ADDXri:
559 case AArch64::SUBWri:
560 case AArch64::SUBXri:
561 return (MI->getOperand(3).getImm() == 0);
562
563 // logical ops on immediate
564 case AArch64::ANDWri:
565 case AArch64::ANDXri:
566 case AArch64::EORWri:
567 case AArch64::EORXri:
568 case AArch64::ORRWri:
569 case AArch64::ORRXri:
570 return true;
571
572 // logical ops on register without shift
573 case AArch64::ANDWrr:
574 case AArch64::ANDXrr:
575 case AArch64::BICWrr:
576 case AArch64::BICXrr:
577 case AArch64::EONWrr:
578 case AArch64::EONXrr:
579 case AArch64::EORWrr:
580 case AArch64::EORXrr:
581 case AArch64::ORNWrr:
582 case AArch64::ORNXrr:
583 case AArch64::ORRWrr:
584 case AArch64::ORRXrr:
585 return true;
586 }
587
588 llvm_unreachable("Unknown opcode to check as cheap as a move!");
589}
590
Tim Northover3b0846e2014-05-24 12:50:23 +0000591bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
592 unsigned &SrcReg, unsigned &DstReg,
593 unsigned &SubIdx) const {
594 switch (MI.getOpcode()) {
595 default:
596 return false;
597 case AArch64::SBFMXri: // aka sxtw
598 case AArch64::UBFMXri: // aka uxtw
599 // Check for the 32 -> 64 bit extension case, these instructions can do
600 // much more.
601 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
602 return false;
603 // This is a signed or unsigned 32 -> 64 bit extension.
604 SrcReg = MI.getOperand(1).getReg();
605 DstReg = MI.getOperand(0).getReg();
606 SubIdx = AArch64::sub_32;
607 return true;
608 }
609}
610
Chad Rosier3528c1e2014-09-08 14:43:48 +0000611bool
612AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
613 MachineInstr *MIb,
614 AliasAnalysis *AA) const {
Eric Christophera0de2532015-03-18 20:37:30 +0000615 const TargetRegisterInfo *TRI = &getRegisterInfo();
Chad Rosier3528c1e2014-09-08 14:43:48 +0000616 unsigned BaseRegA = 0, BaseRegB = 0;
617 int OffsetA = 0, OffsetB = 0;
618 int WidthA = 0, WidthB = 0;
619
Chad Rosiera73b3592015-05-21 21:59:57 +0000620 assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
621 assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
Chad Rosier3528c1e2014-09-08 14:43:48 +0000622
623 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
624 MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
625 return false;
626
627 // Retrieve the base register, offset from the base register and width. Width
628 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
629 // base registers are identical, and the offset of a lower memory access +
630 // the width doesn't overlap the offset of a higher memory access,
631 // then the memory accesses are different.
632 if (getLdStBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
633 getLdStBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
634 if (BaseRegA == BaseRegB) {
635 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
636 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
637 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
638 if (LowOffset + LowWidth <= HighOffset)
639 return true;
640 }
641 }
642 return false;
643}
644
Tim Northover3b0846e2014-05-24 12:50:23 +0000645/// analyzeCompare - For a comparison instruction, return the source registers
646/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
647/// Return true if the comparison instruction can be analyzed.
648bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
649 unsigned &SrcReg2, int &CmpMask,
650 int &CmpValue) const {
651 switch (MI->getOpcode()) {
652 default:
653 break;
654 case AArch64::SUBSWrr:
655 case AArch64::SUBSWrs:
656 case AArch64::SUBSWrx:
657 case AArch64::SUBSXrr:
658 case AArch64::SUBSXrs:
659 case AArch64::SUBSXrx:
660 case AArch64::ADDSWrr:
661 case AArch64::ADDSWrs:
662 case AArch64::ADDSWrx:
663 case AArch64::ADDSXrr:
664 case AArch64::ADDSXrs:
665 case AArch64::ADDSXrx:
666 // Replace SUBSWrr with SUBWrr if NZCV is not used.
667 SrcReg = MI->getOperand(1).getReg();
668 SrcReg2 = MI->getOperand(2).getReg();
669 CmpMask = ~0;
670 CmpValue = 0;
671 return true;
672 case AArch64::SUBSWri:
673 case AArch64::ADDSWri:
674 case AArch64::SUBSXri:
675 case AArch64::ADDSXri:
676 SrcReg = MI->getOperand(1).getReg();
677 SrcReg2 = 0;
678 CmpMask = ~0;
Jiangning Liudcc651f2014-08-08 14:19:29 +0000679 // FIXME: In order to convert CmpValue to 0 or 1
680 CmpValue = (MI->getOperand(2).getImm() != 0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000681 return true;
682 case AArch64::ANDSWri:
683 case AArch64::ANDSXri:
684 // ANDS does not use the same encoding scheme as the others xxxS
685 // instructions.
686 SrcReg = MI->getOperand(1).getReg();
687 SrcReg2 = 0;
688 CmpMask = ~0;
Jiangning Liudcc651f2014-08-08 14:19:29 +0000689 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
690 // while the type of CmpValue is int. When converting uint64_t to int,
691 // the high 32 bits of uint64_t will be lost.
692 // In fact it causes a bug in spec2006-483.xalancbmk
693 // CmpValue is only used to compare with zero in OptimizeCompareInstr
694 CmpValue = (AArch64_AM::decodeLogicalImmediate(
695 MI->getOperand(2).getImm(),
696 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000697 return true;
698 }
699
700 return false;
701}
702
703static bool UpdateOperandRegClass(MachineInstr *Instr) {
704 MachineBasicBlock *MBB = Instr->getParent();
705 assert(MBB && "Can't get MachineBasicBlock here");
706 MachineFunction *MF = MBB->getParent();
707 assert(MF && "Can't get MachineFunction here");
Eric Christopher6c901622015-01-28 03:51:33 +0000708 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
709 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000710 MachineRegisterInfo *MRI = &MF->getRegInfo();
711
712 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
713 ++OpIdx) {
714 MachineOperand &MO = Instr->getOperand(OpIdx);
715 const TargetRegisterClass *OpRegCstraints =
716 Instr->getRegClassConstraint(OpIdx, TII, TRI);
717
718 // If there's no constraint, there's nothing to do.
719 if (!OpRegCstraints)
720 continue;
721 // If the operand is a frame index, there's nothing to do here.
722 // A frame index operand will resolve correctly during PEI.
723 if (MO.isFI())
724 continue;
725
726 assert(MO.isReg() &&
727 "Operand has register constraints without being a register!");
728
729 unsigned Reg = MO.getReg();
730 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
731 if (!OpRegCstraints->contains(Reg))
732 return false;
733 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
734 !MRI->constrainRegClass(Reg, OpRegCstraints))
735 return false;
736 }
737
738 return true;
739}
740
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000741/// \brief Return the opcode that does not set flags when possible - otherwise
742/// return the original opcode. The caller is responsible to do the actual
743/// substitution and legality checking.
744static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
745 // Don't convert all compare instructions, because for some the zero register
746 // encoding becomes the sp register.
747 bool MIDefinesZeroReg = false;
748 if (MI->definesRegister(AArch64::WZR) || MI->definesRegister(AArch64::XZR))
749 MIDefinesZeroReg = true;
750
751 switch (MI->getOpcode()) {
752 default:
753 return MI->getOpcode();
754 case AArch64::ADDSWrr:
755 return AArch64::ADDWrr;
756 case AArch64::ADDSWri:
757 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
758 case AArch64::ADDSWrs:
759 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
760 case AArch64::ADDSWrx:
761 return AArch64::ADDWrx;
762 case AArch64::ADDSXrr:
763 return AArch64::ADDXrr;
764 case AArch64::ADDSXri:
765 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
766 case AArch64::ADDSXrs:
767 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
768 case AArch64::ADDSXrx:
769 return AArch64::ADDXrx;
770 case AArch64::SUBSWrr:
771 return AArch64::SUBWrr;
772 case AArch64::SUBSWri:
773 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
774 case AArch64::SUBSWrs:
775 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
776 case AArch64::SUBSWrx:
777 return AArch64::SUBWrx;
778 case AArch64::SUBSXrr:
779 return AArch64::SUBXrr;
780 case AArch64::SUBSXri:
781 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
782 case AArch64::SUBSXrs:
783 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
784 case AArch64::SUBSXrx:
785 return AArch64::SUBXrx;
786 }
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000787}
Tim Northover3b0846e2014-05-24 12:50:23 +0000788
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000789/// True when condition code could be modified on the instruction
790/// trace starting at from and ending at to.
791static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To,
792 const bool CheckOnlyCCWrites,
793 const TargetRegisterInfo *TRI) {
794 // We iterate backward starting \p To until we hit \p From
795 MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
796
797 // Early exit if To is at the beginning of the BB.
798 if (I == B)
799 return true;
800
801 // Check whether the definition of SrcReg is in the same basic block as
802 // Compare. If not, assume the condition code gets modified on some path.
803 if (To->getParent() != From->getParent())
804 return true;
805
806 // Check that NZCV isn't set on the trace.
807 for (--I; I != E; --I) {
808 const MachineInstr &Instr = *I;
809
810 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
811 (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI)))
812 // This instruction modifies or uses NZCV after the one we want to
813 // change.
814 return true;
815 if (I == B)
816 // We currently don't allow the instruction trace to cross basic
817 // block boundaries
818 return true;
819 }
820 return false;
821}
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000822/// optimizeCompareInstr - Convert the instruction supplying the argument to the
823/// comparison into one that sets the zero bit in the flags register.
824bool AArch64InstrInfo::optimizeCompareInstr(
825 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
826 int CmpValue, const MachineRegisterInfo *MRI) const {
827
828 // Replace SUBSWrr with SUBWrr if NZCV is not used.
829 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
830 if (Cmp_NZCV != -1) {
Juergen Ributzka7a7c4682014-11-18 21:02:40 +0000831 if (CmpInstr->definesRegister(AArch64::WZR) ||
832 CmpInstr->definesRegister(AArch64::XZR)) {
833 CmpInstr->eraseFromParent();
834 return true;
835 }
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000836 unsigned Opc = CmpInstr->getOpcode();
837 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
838 if (NewOpc == Opc)
839 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000840 const MCInstrDesc &MCID = get(NewOpc);
841 CmpInstr->setDesc(MCID);
842 CmpInstr->RemoveOperand(Cmp_NZCV);
843 bool succeeded = UpdateOperandRegClass(CmpInstr);
844 (void)succeeded;
845 assert(succeeded && "Some operands reg class are incompatible!");
846 return true;
847 }
848
849 // Continue only if we have a "ri" where immediate is zero.
Jiangning Liudcc651f2014-08-08 14:19:29 +0000850 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
851 // function.
852 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
Tim Northover3b0846e2014-05-24 12:50:23 +0000853 if (CmpValue != 0 || SrcReg2 != 0)
854 return false;
855
856 // CmpInstr is a Compare instruction if destination register is not used.
857 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
858 return false;
859
860 // Get the unique definition of SrcReg.
861 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
862 if (!MI)
863 return false;
864
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000865 bool CheckOnlyCCWrites = false;
Eric Christophera0de2532015-03-18 20:37:30 +0000866 const TargetRegisterInfo *TRI = &getRegisterInfo();
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +0000867 if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI))
868 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000869
870 unsigned NewOpc = MI->getOpcode();
871 switch (MI->getOpcode()) {
872 default:
873 return false;
874 case AArch64::ADDSWrr:
875 case AArch64::ADDSWri:
876 case AArch64::ADDSXrr:
877 case AArch64::ADDSXri:
878 case AArch64::SUBSWrr:
879 case AArch64::SUBSWri:
880 case AArch64::SUBSXrr:
881 case AArch64::SUBSXri:
882 break;
883 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
884 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
885 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
886 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
887 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
888 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
889 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
890 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
891 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
892 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
893 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
894 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
895 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
896 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
897 }
898
899 // Scan forward for the use of NZCV.
900 // When checking against MI: if it's a conditional code requires
901 // checking of V bit, then this is not safe to do.
902 // It is safe to remove CmpInstr if NZCV is redefined or killed.
903 // If we are done with the basic block, we need to check whether NZCV is
904 // live-out.
905 bool IsSafe = false;
906 for (MachineBasicBlock::iterator I = CmpInstr,
907 E = CmpInstr->getParent()->end();
908 !IsSafe && ++I != E;) {
909 const MachineInstr &Instr = *I;
910 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
911 ++IO) {
912 const MachineOperand &MO = Instr.getOperand(IO);
913 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
914 IsSafe = true;
915 break;
916 }
917 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
918 continue;
919 if (MO.isDef()) {
920 IsSafe = true;
921 break;
922 }
923
924 // Decode the condition code.
925 unsigned Opc = Instr.getOpcode();
926 AArch64CC::CondCode CC;
927 switch (Opc) {
928 default:
929 return false;
930 case AArch64::Bcc:
931 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
932 break;
933 case AArch64::CSINVWr:
934 case AArch64::CSINVXr:
935 case AArch64::CSINCWr:
936 case AArch64::CSINCXr:
937 case AArch64::CSELWr:
938 case AArch64::CSELXr:
939 case AArch64::CSNEGWr:
940 case AArch64::CSNEGXr:
941 case AArch64::FCSELSrrr:
942 case AArch64::FCSELDrrr:
943 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
944 break;
945 }
946
947 // It is not safe to remove Compare instruction if Overflow(V) is used.
948 switch (CC) {
949 default:
950 // NZCV can be used multiple times, we should continue.
951 break;
952 case AArch64CC::VS:
953 case AArch64CC::VC:
954 case AArch64CC::GE:
955 case AArch64CC::LT:
956 case AArch64CC::GT:
957 case AArch64CC::LE:
958 return false;
959 }
960 }
961 }
962
963 // If NZCV is not killed nor re-defined, we should check whether it is
964 // live-out. If it is live-out, do not optimize.
965 if (!IsSafe) {
966 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
967 for (auto *MBB : ParentBlock->successors())
968 if (MBB->isLiveIn(AArch64::NZCV))
969 return false;
970 }
971
972 // Update the instruction to set NZCV.
973 MI->setDesc(get(NewOpc));
974 CmpInstr->eraseFromParent();
975 bool succeeded = UpdateOperandRegClass(MI);
976 (void)succeeded;
977 assert(succeeded && "Some operands reg class are incompatible!");
978 MI->addRegisterDefined(AArch64::NZCV, TRI);
979 return true;
980}
981
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000982bool
983AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
984 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
985 return false;
986
987 MachineBasicBlock &MBB = *MI->getParent();
988 DebugLoc DL = MI->getDebugLoc();
989 unsigned Reg = MI->getOperand(0).getReg();
990 const GlobalValue *GV =
991 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
992 const TargetMachine &TM = MBB.getParent()->getTarget();
993 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
994 const unsigned char MO_NC = AArch64II::MO_NC;
995
996 if ((OpFlags & AArch64II::MO_GOT) != 0) {
997 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
998 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
999 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1000 .addReg(Reg, RegState::Kill).addImm(0)
1001 .addMemOperand(*MI->memoperands_begin());
1002 } else if (TM.getCodeModel() == CodeModel::Large) {
1003 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1004 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1005 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1006 .addReg(Reg, RegState::Kill)
1007 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1008 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1009 .addReg(Reg, RegState::Kill)
1010 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1011 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1012 .addReg(Reg, RegState::Kill)
1013 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1014 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1015 .addReg(Reg, RegState::Kill).addImm(0)
1016 .addMemOperand(*MI->memoperands_begin());
1017 } else {
1018 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1019 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1020 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1021 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1022 .addReg(Reg, RegState::Kill)
1023 .addGlobalAddress(GV, 0, LoFlags)
1024 .addMemOperand(*MI->memoperands_begin());
1025 }
1026
1027 MBB.erase(MI);
1028
1029 return true;
1030}
1031
Tim Northover3b0846e2014-05-24 12:50:23 +00001032/// Return true if this is this instruction has a non-zero immediate
1033bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
1034 switch (MI->getOpcode()) {
1035 default:
1036 break;
1037 case AArch64::ADDSWrs:
1038 case AArch64::ADDSXrs:
1039 case AArch64::ADDWrs:
1040 case AArch64::ADDXrs:
1041 case AArch64::ANDSWrs:
1042 case AArch64::ANDSXrs:
1043 case AArch64::ANDWrs:
1044 case AArch64::ANDXrs:
1045 case AArch64::BICSWrs:
1046 case AArch64::BICSXrs:
1047 case AArch64::BICWrs:
1048 case AArch64::BICXrs:
1049 case AArch64::CRC32Brr:
1050 case AArch64::CRC32CBrr:
1051 case AArch64::CRC32CHrr:
1052 case AArch64::CRC32CWrr:
1053 case AArch64::CRC32CXrr:
1054 case AArch64::CRC32Hrr:
1055 case AArch64::CRC32Wrr:
1056 case AArch64::CRC32Xrr:
1057 case AArch64::EONWrs:
1058 case AArch64::EONXrs:
1059 case AArch64::EORWrs:
1060 case AArch64::EORXrs:
1061 case AArch64::ORNWrs:
1062 case AArch64::ORNXrs:
1063 case AArch64::ORRWrs:
1064 case AArch64::ORRXrs:
1065 case AArch64::SUBSWrs:
1066 case AArch64::SUBSXrs:
1067 case AArch64::SUBWrs:
1068 case AArch64::SUBXrs:
1069 if (MI->getOperand(3).isImm()) {
1070 unsigned val = MI->getOperand(3).getImm();
1071 return (val != 0);
1072 }
1073 break;
1074 }
1075 return false;
1076}
1077
1078/// Return true if this is this instruction has a non-zero immediate
1079bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
1080 switch (MI->getOpcode()) {
1081 default:
1082 break;
1083 case AArch64::ADDSWrx:
1084 case AArch64::ADDSXrx:
1085 case AArch64::ADDSXrx64:
1086 case AArch64::ADDWrx:
1087 case AArch64::ADDXrx:
1088 case AArch64::ADDXrx64:
1089 case AArch64::SUBSWrx:
1090 case AArch64::SUBSXrx:
1091 case AArch64::SUBSXrx64:
1092 case AArch64::SUBWrx:
1093 case AArch64::SUBXrx:
1094 case AArch64::SUBXrx64:
1095 if (MI->getOperand(3).isImm()) {
1096 unsigned val = MI->getOperand(3).getImm();
1097 return (val != 0);
1098 }
1099 break;
1100 }
1101
1102 return false;
1103}
1104
1105// Return true if this instruction simply sets its single destination register
1106// to zero. This is equivalent to a register rename of the zero-register.
1107bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1108 switch (MI->getOpcode()) {
1109 default:
1110 break;
1111 case AArch64::MOVZWi:
1112 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1113 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1114 assert(MI->getDesc().getNumOperands() == 3 &&
1115 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1116 return true;
1117 }
1118 break;
1119 case AArch64::ANDWri: // and Rd, Rzr, #imm
1120 return MI->getOperand(1).getReg() == AArch64::WZR;
1121 case AArch64::ANDXri:
1122 return MI->getOperand(1).getReg() == AArch64::XZR;
1123 case TargetOpcode::COPY:
1124 return MI->getOperand(1).getReg() == AArch64::WZR;
1125 }
1126 return false;
1127}
1128
1129// Return true if this instruction simply renames a general register without
1130// modifying bits.
1131bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1132 switch (MI->getOpcode()) {
1133 default:
1134 break;
1135 case TargetOpcode::COPY: {
1136 // GPR32 copies will by lowered to ORRXrs
1137 unsigned DstReg = MI->getOperand(0).getReg();
1138 return (AArch64::GPR32RegClass.contains(DstReg) ||
1139 AArch64::GPR64RegClass.contains(DstReg));
1140 }
1141 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1142 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1143 assert(MI->getDesc().getNumOperands() == 4 &&
1144 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1145 return true;
1146 }
Renato Golin541d7e72014-08-01 17:27:31 +00001147 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001148 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1149 if (MI->getOperand(2).getImm() == 0) {
1150 assert(MI->getDesc().getNumOperands() == 4 &&
1151 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1152 return true;
1153 }
Renato Golin541d7e72014-08-01 17:27:31 +00001154 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001155 }
1156 return false;
1157}
1158
1159// Return true if this instruction simply renames a general register without
1160// modifying bits.
1161bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1162 switch (MI->getOpcode()) {
1163 default:
1164 break;
1165 case TargetOpcode::COPY: {
1166 // FPR64 copies will by lowered to ORR.16b
1167 unsigned DstReg = MI->getOperand(0).getReg();
1168 return (AArch64::FPR64RegClass.contains(DstReg) ||
1169 AArch64::FPR128RegClass.contains(DstReg));
1170 }
1171 case AArch64::ORRv16i8:
1172 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1173 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1174 "invalid ORRv16i8 operands");
1175 return true;
1176 }
Renato Golin541d7e72014-08-01 17:27:31 +00001177 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001178 }
1179 return false;
1180}
1181
1182unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1183 int &FrameIndex) const {
1184 switch (MI->getOpcode()) {
1185 default:
1186 break;
1187 case AArch64::LDRWui:
1188 case AArch64::LDRXui:
1189 case AArch64::LDRBui:
1190 case AArch64::LDRHui:
1191 case AArch64::LDRSui:
1192 case AArch64::LDRDui:
1193 case AArch64::LDRQui:
1194 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1195 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1196 FrameIndex = MI->getOperand(1).getIndex();
1197 return MI->getOperand(0).getReg();
1198 }
1199 break;
1200 }
1201
1202 return 0;
1203}
1204
1205unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1206 int &FrameIndex) const {
1207 switch (MI->getOpcode()) {
1208 default:
1209 break;
1210 case AArch64::STRWui:
1211 case AArch64::STRXui:
1212 case AArch64::STRBui:
1213 case AArch64::STRHui:
1214 case AArch64::STRSui:
1215 case AArch64::STRDui:
1216 case AArch64::STRQui:
1217 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1218 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1219 FrameIndex = MI->getOperand(1).getIndex();
1220 return MI->getOperand(0).getReg();
1221 }
1222 break;
1223 }
1224 return 0;
1225}
1226
1227/// Return true if this is load/store scales or extends its register offset.
1228/// This refers to scaling a dynamic index as opposed to scaled immediates.
1229/// MI should be a memory op that allows scaled addressing.
1230bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1231 switch (MI->getOpcode()) {
1232 default:
1233 break;
1234 case AArch64::LDRBBroW:
1235 case AArch64::LDRBroW:
1236 case AArch64::LDRDroW:
1237 case AArch64::LDRHHroW:
1238 case AArch64::LDRHroW:
1239 case AArch64::LDRQroW:
1240 case AArch64::LDRSBWroW:
1241 case AArch64::LDRSBXroW:
1242 case AArch64::LDRSHWroW:
1243 case AArch64::LDRSHXroW:
1244 case AArch64::LDRSWroW:
1245 case AArch64::LDRSroW:
1246 case AArch64::LDRWroW:
1247 case AArch64::LDRXroW:
1248 case AArch64::STRBBroW:
1249 case AArch64::STRBroW:
1250 case AArch64::STRDroW:
1251 case AArch64::STRHHroW:
1252 case AArch64::STRHroW:
1253 case AArch64::STRQroW:
1254 case AArch64::STRSroW:
1255 case AArch64::STRWroW:
1256 case AArch64::STRXroW:
1257 case AArch64::LDRBBroX:
1258 case AArch64::LDRBroX:
1259 case AArch64::LDRDroX:
1260 case AArch64::LDRHHroX:
1261 case AArch64::LDRHroX:
1262 case AArch64::LDRQroX:
1263 case AArch64::LDRSBWroX:
1264 case AArch64::LDRSBXroX:
1265 case AArch64::LDRSHWroX:
1266 case AArch64::LDRSHXroX:
1267 case AArch64::LDRSWroX:
1268 case AArch64::LDRSroX:
1269 case AArch64::LDRWroX:
1270 case AArch64::LDRXroX:
1271 case AArch64::STRBBroX:
1272 case AArch64::STRBroX:
1273 case AArch64::STRDroX:
1274 case AArch64::STRHHroX:
1275 case AArch64::STRHroX:
1276 case AArch64::STRQroX:
1277 case AArch64::STRSroX:
1278 case AArch64::STRWroX:
1279 case AArch64::STRXroX:
1280
1281 unsigned Val = MI->getOperand(3).getImm();
1282 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1283 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1284 }
1285 return false;
1286}
1287
1288/// Check all MachineMemOperands for a hint to suppress pairing.
1289bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1290 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1291 "Too many target MO flags");
1292 for (auto *MM : MI->memoperands()) {
1293 if (MM->getFlags() &
1294 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1295 return true;
1296 }
1297 }
1298 return false;
1299}
1300
1301/// Set a flag on the first MachineMemOperand to suppress pairing.
1302void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1303 if (MI->memoperands_empty())
1304 return;
1305
1306 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1307 "Too many target MO flags");
1308 (*MI->memoperands_begin())
1309 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1310}
1311
1312bool
1313AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1314 unsigned &Offset,
1315 const TargetRegisterInfo *TRI) const {
1316 switch (LdSt->getOpcode()) {
1317 default:
1318 return false;
1319 case AArch64::STRSui:
1320 case AArch64::STRDui:
1321 case AArch64::STRQui:
1322 case AArch64::STRXui:
1323 case AArch64::STRWui:
1324 case AArch64::LDRSui:
1325 case AArch64::LDRDui:
1326 case AArch64::LDRQui:
1327 case AArch64::LDRXui:
1328 case AArch64::LDRWui:
1329 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1330 return false;
1331 BaseReg = LdSt->getOperand(1).getReg();
1332 MachineFunction &MF = *LdSt->getParent()->getParent();
1333 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1334 Offset = LdSt->getOperand(2).getImm() * Width;
1335 return true;
1336 };
1337}
1338
Chad Rosier3528c1e2014-09-08 14:43:48 +00001339bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
1340 MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
1341 const TargetRegisterInfo *TRI) const {
1342 // Handle only loads/stores with base register followed by immediate offset.
1343 if (LdSt->getNumOperands() != 3)
1344 return false;
1345 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1346 return false;
1347
1348 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1349 // Unscaled instructions have scaling factor set to 1.
1350 int Scale = 0;
1351 switch (LdSt->getOpcode()) {
1352 default:
1353 return false;
1354 case AArch64::LDURQi:
1355 case AArch64::STURQi:
1356 Width = 16;
1357 Scale = 1;
1358 break;
1359 case AArch64::LDURXi:
1360 case AArch64::LDURDi:
1361 case AArch64::STURXi:
1362 case AArch64::STURDi:
1363 Width = 8;
1364 Scale = 1;
1365 break;
1366 case AArch64::LDURWi:
1367 case AArch64::LDURSi:
1368 case AArch64::LDURSWi:
1369 case AArch64::STURWi:
1370 case AArch64::STURSi:
1371 Width = 4;
1372 Scale = 1;
1373 break;
1374 case AArch64::LDURHi:
1375 case AArch64::LDURHHi:
1376 case AArch64::LDURSHXi:
1377 case AArch64::LDURSHWi:
1378 case AArch64::STURHi:
1379 case AArch64::STURHHi:
1380 Width = 2;
1381 Scale = 1;
1382 break;
1383 case AArch64::LDURBi:
1384 case AArch64::LDURBBi:
1385 case AArch64::LDURSBXi:
1386 case AArch64::LDURSBWi:
1387 case AArch64::STURBi:
1388 case AArch64::STURBBi:
1389 Width = 1;
1390 Scale = 1;
1391 break;
1392 case AArch64::LDRXui:
1393 case AArch64::STRXui:
1394 Scale = Width = 8;
1395 break;
1396 case AArch64::LDRWui:
1397 case AArch64::STRWui:
1398 Scale = Width = 4;
1399 break;
1400 case AArch64::LDRBui:
1401 case AArch64::STRBui:
1402 Scale = Width = 1;
1403 break;
1404 case AArch64::LDRHui:
1405 case AArch64::STRHui:
1406 Scale = Width = 2;
1407 break;
1408 case AArch64::LDRSui:
1409 case AArch64::STRSui:
1410 Scale = Width = 4;
1411 break;
1412 case AArch64::LDRDui:
1413 case AArch64::STRDui:
1414 Scale = Width = 8;
1415 break;
1416 case AArch64::LDRQui:
1417 case AArch64::STRQui:
1418 Scale = Width = 16;
1419 break;
1420 case AArch64::LDRBBui:
1421 case AArch64::STRBBui:
1422 Scale = Width = 1;
1423 break;
1424 case AArch64::LDRHHui:
1425 case AArch64::STRHHui:
1426 Scale = Width = 2;
1427 break;
1428 };
1429
1430 BaseReg = LdSt->getOperand(1).getReg();
1431 Offset = LdSt->getOperand(2).getImm() * Scale;
1432 return true;
1433}
1434
Tim Northover3b0846e2014-05-24 12:50:23 +00001435/// Detect opportunities for ldp/stp formation.
1436///
1437/// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
1438bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1439 MachineInstr *SecondLdSt,
1440 unsigned NumLoads) const {
1441 // Only cluster up to a single pair.
1442 if (NumLoads > 1)
1443 return false;
1444 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1445 return false;
1446 // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
1447 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1448 // Allow 6 bits of positive range.
1449 if (Ofs1 > 64)
1450 return false;
1451 // The caller should already have ordered First/SecondLdSt by offset.
1452 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1453 return Ofs1 + 1 == Ofs2;
1454}
1455
1456bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1457 MachineInstr *Second) const {
1458 // Cyclone can fuse CMN, CMP followed by Bcc.
1459
1460 // FIXME: B0 can also fuse:
1461 // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
1462 if (Second->getOpcode() != AArch64::Bcc)
1463 return false;
1464 switch (First->getOpcode()) {
1465 default:
1466 return false;
1467 case AArch64::SUBSWri:
1468 case AArch64::ADDSWri:
1469 case AArch64::ANDSWri:
1470 case AArch64::SUBSXri:
1471 case AArch64::ADDSXri:
1472 case AArch64::ANDSXri:
1473 return true;
1474 }
1475}
1476
Adrian Prantl87b7eb92014-10-01 18:55:02 +00001477MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1478 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
1479 const MDNode *Expr, DebugLoc DL) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00001480 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1481 .addFrameIndex(FrameIx)
1482 .addImm(0)
1483 .addImm(Offset)
Adrian Prantl87b7eb92014-10-01 18:55:02 +00001484 .addMetadata(Var)
1485 .addMetadata(Expr);
Tim Northover3b0846e2014-05-24 12:50:23 +00001486 return &*MIB;
1487}
1488
1489static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1490 unsigned Reg, unsigned SubIdx,
1491 unsigned State,
1492 const TargetRegisterInfo *TRI) {
1493 if (!SubIdx)
1494 return MIB.addReg(Reg, State);
1495
1496 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1497 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1498 return MIB.addReg(Reg, State, SubIdx);
1499}
1500
1501static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1502 unsigned NumRegs) {
1503 // We really want the positive remainder mod 32 here, that happens to be
1504 // easily obtainable with a mask.
1505 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1506}
1507
1508void AArch64InstrInfo::copyPhysRegTuple(
1509 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1510 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1511 llvm::ArrayRef<unsigned> Indices) const {
Eric Christopher58f32662014-06-10 22:57:21 +00001512 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001513 "Unexpected register copy without NEON");
Eric Christophera0de2532015-03-18 20:37:30 +00001514 const TargetRegisterInfo *TRI = &getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001515 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1516 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1517 unsigned NumRegs = Indices.size();
1518
1519 int SubReg = 0, End = NumRegs, Incr = 1;
1520 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1521 SubReg = NumRegs - 1;
1522 End = -1;
1523 Incr = -1;
1524 }
1525
1526 for (; SubReg != End; SubReg += Incr) {
James Molloyf8aa57a2015-04-16 11:37:40 +00001527 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
Tim Northover3b0846e2014-05-24 12:50:23 +00001528 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1529 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1530 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1531 }
1532}
1533
1534void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1535 MachineBasicBlock::iterator I, DebugLoc DL,
1536 unsigned DestReg, unsigned SrcReg,
1537 bool KillSrc) const {
1538 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1539 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
Eric Christophera0de2532015-03-18 20:37:30 +00001540 const TargetRegisterInfo *TRI = &getRegisterInfo();
1541
Tim Northover3b0846e2014-05-24 12:50:23 +00001542 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1543 // If either operand is WSP, expand to ADD #0.
1544 if (Subtarget.hasZeroCycleRegMove()) {
1545 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1546 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1547 &AArch64::GPR64spRegClass);
1548 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1549 &AArch64::GPR64spRegClass);
1550 // This instruction is reading and writing X registers. This may upset
1551 // the register scavenger and machine verifier, so we need to indicate
1552 // that we are reading an undefined value from SrcRegX, but a proper
1553 // value from SrcReg.
1554 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1555 .addReg(SrcRegX, RegState::Undef)
1556 .addImm(0)
1557 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1558 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1559 } else {
1560 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1561 .addReg(SrcReg, getKillRegState(KillSrc))
1562 .addImm(0)
1563 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1564 }
1565 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1566 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1567 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1568 } else {
1569 if (Subtarget.hasZeroCycleRegMove()) {
1570 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1571 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1572 &AArch64::GPR64spRegClass);
1573 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1574 &AArch64::GPR64spRegClass);
1575 // This instruction is reading and writing X registers. This may upset
1576 // the register scavenger and machine verifier, so we need to indicate
1577 // that we are reading an undefined value from SrcRegX, but a proper
1578 // value from SrcReg.
1579 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1580 .addReg(AArch64::XZR)
1581 .addReg(SrcRegX, RegState::Undef)
1582 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1583 } else {
1584 // Otherwise, expand to ORR WZR.
1585 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1586 .addReg(AArch64::WZR)
1587 .addReg(SrcReg, getKillRegState(KillSrc));
1588 }
1589 }
1590 return;
1591 }
1592
1593 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1594 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1595 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1596 // If either operand is SP, expand to ADD #0.
1597 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1598 .addReg(SrcReg, getKillRegState(KillSrc))
1599 .addImm(0)
1600 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1601 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1602 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1603 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1604 } else {
1605 // Otherwise, expand to ORR XZR.
1606 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1607 .addReg(AArch64::XZR)
1608 .addReg(SrcReg, getKillRegState(KillSrc));
1609 }
1610 return;
1611 }
1612
1613 // Copy a DDDD register quad by copying the individual sub-registers.
1614 if (AArch64::DDDDRegClass.contains(DestReg) &&
1615 AArch64::DDDDRegClass.contains(SrcReg)) {
1616 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1617 AArch64::dsub2, AArch64::dsub3 };
1618 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1619 Indices);
1620 return;
1621 }
1622
1623 // Copy a DDD register triple by copying the individual sub-registers.
1624 if (AArch64::DDDRegClass.contains(DestReg) &&
1625 AArch64::DDDRegClass.contains(SrcReg)) {
1626 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1627 AArch64::dsub2 };
1628 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1629 Indices);
1630 return;
1631 }
1632
1633 // Copy a DD register pair by copying the individual sub-registers.
1634 if (AArch64::DDRegClass.contains(DestReg) &&
1635 AArch64::DDRegClass.contains(SrcReg)) {
1636 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1637 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1638 Indices);
1639 return;
1640 }
1641
1642 // Copy a QQQQ register quad by copying the individual sub-registers.
1643 if (AArch64::QQQQRegClass.contains(DestReg) &&
1644 AArch64::QQQQRegClass.contains(SrcReg)) {
1645 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1646 AArch64::qsub2, AArch64::qsub3 };
1647 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1648 Indices);
1649 return;
1650 }
1651
1652 // Copy a QQQ register triple by copying the individual sub-registers.
1653 if (AArch64::QQQRegClass.contains(DestReg) &&
1654 AArch64::QQQRegClass.contains(SrcReg)) {
1655 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1656 AArch64::qsub2 };
1657 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1658 Indices);
1659 return;
1660 }
1661
1662 // Copy a QQ register pair by copying the individual sub-registers.
1663 if (AArch64::QQRegClass.contains(DestReg) &&
1664 AArch64::QQRegClass.contains(SrcReg)) {
1665 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1666 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1667 Indices);
1668 return;
1669 }
1670
1671 if (AArch64::FPR128RegClass.contains(DestReg) &&
1672 AArch64::FPR128RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001673 if(Subtarget.hasNEON()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001674 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1675 .addReg(SrcReg)
1676 .addReg(SrcReg, getKillRegState(KillSrc));
1677 } else {
1678 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1679 .addReg(AArch64::SP, RegState::Define)
1680 .addReg(SrcReg, getKillRegState(KillSrc))
1681 .addReg(AArch64::SP)
1682 .addImm(-16);
1683 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1684 .addReg(AArch64::SP, RegState::Define)
1685 .addReg(DestReg, RegState::Define)
1686 .addReg(AArch64::SP)
1687 .addImm(16);
1688 }
1689 return;
1690 }
1691
1692 if (AArch64::FPR64RegClass.contains(DestReg) &&
1693 AArch64::FPR64RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001694 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00001695 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1696 &AArch64::FPR128RegClass);
1697 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1698 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001699 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1700 .addReg(SrcReg)
1701 .addReg(SrcReg, getKillRegState(KillSrc));
1702 } else {
1703 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1704 .addReg(SrcReg, getKillRegState(KillSrc));
1705 }
1706 return;
1707 }
1708
1709 if (AArch64::FPR32RegClass.contains(DestReg) &&
1710 AArch64::FPR32RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001711 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00001712 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1713 &AArch64::FPR128RegClass);
1714 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1715 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001716 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1717 .addReg(SrcReg)
1718 .addReg(SrcReg, getKillRegState(KillSrc));
1719 } else {
1720 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1721 .addReg(SrcReg, getKillRegState(KillSrc));
1722 }
1723 return;
1724 }
1725
1726 if (AArch64::FPR16RegClass.contains(DestReg) &&
1727 AArch64::FPR16RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001728 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00001729 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1730 &AArch64::FPR128RegClass);
1731 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1732 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001733 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1734 .addReg(SrcReg)
1735 .addReg(SrcReg, getKillRegState(KillSrc));
1736 } else {
Eric Christophera0de2532015-03-18 20:37:30 +00001737 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1738 &AArch64::FPR32RegClass);
1739 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1740 &AArch64::FPR32RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001741 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1742 .addReg(SrcReg, getKillRegState(KillSrc));
1743 }
1744 return;
1745 }
1746
1747 if (AArch64::FPR8RegClass.contains(DestReg) &&
1748 AArch64::FPR8RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001749 if(Subtarget.hasNEON()) {
Eric Christophera0de2532015-03-18 20:37:30 +00001750 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
Tim Northover3b0846e2014-05-24 12:50:23 +00001751 &AArch64::FPR128RegClass);
Eric Christophera0de2532015-03-18 20:37:30 +00001752 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1753 &AArch64::FPR128RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001754 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1755 .addReg(SrcReg)
1756 .addReg(SrcReg, getKillRegState(KillSrc));
1757 } else {
Eric Christophera0de2532015-03-18 20:37:30 +00001758 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1759 &AArch64::FPR32RegClass);
1760 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1761 &AArch64::FPR32RegClass);
Tim Northover3b0846e2014-05-24 12:50:23 +00001762 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1763 .addReg(SrcReg, getKillRegState(KillSrc));
1764 }
1765 return;
1766 }
1767
1768 // Copies between GPR64 and FPR64.
1769 if (AArch64::FPR64RegClass.contains(DestReg) &&
1770 AArch64::GPR64RegClass.contains(SrcReg)) {
1771 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1772 .addReg(SrcReg, getKillRegState(KillSrc));
1773 return;
1774 }
1775 if (AArch64::GPR64RegClass.contains(DestReg) &&
1776 AArch64::FPR64RegClass.contains(SrcReg)) {
1777 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1778 .addReg(SrcReg, getKillRegState(KillSrc));
1779 return;
1780 }
1781 // Copies between GPR32 and FPR32.
1782 if (AArch64::FPR32RegClass.contains(DestReg) &&
1783 AArch64::GPR32RegClass.contains(SrcReg)) {
1784 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1785 .addReg(SrcReg, getKillRegState(KillSrc));
1786 return;
1787 }
1788 if (AArch64::GPR32RegClass.contains(DestReg) &&
1789 AArch64::FPR32RegClass.contains(SrcReg)) {
1790 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1791 .addReg(SrcReg, getKillRegState(KillSrc));
1792 return;
1793 }
1794
Tim Northover1bed9af2014-05-27 12:16:02 +00001795 if (DestReg == AArch64::NZCV) {
1796 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1797 BuildMI(MBB, I, DL, get(AArch64::MSR))
1798 .addImm(AArch64SysReg::NZCV)
1799 .addReg(SrcReg, getKillRegState(KillSrc))
1800 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1801 return;
1802 }
1803
1804 if (SrcReg == AArch64::NZCV) {
1805 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1806 BuildMI(MBB, I, DL, get(AArch64::MRS))
1807 .addReg(DestReg)
1808 .addImm(AArch64SysReg::NZCV)
1809 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1810 return;
1811 }
1812
1813 llvm_unreachable("unimplemented reg-to-reg copy");
Tim Northover3b0846e2014-05-24 12:50:23 +00001814}
1815
1816void AArch64InstrInfo::storeRegToStackSlot(
1817 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1818 bool isKill, int FI, const TargetRegisterClass *RC,
1819 const TargetRegisterInfo *TRI) const {
1820 DebugLoc DL;
1821 if (MBBI != MBB.end())
1822 DL = MBBI->getDebugLoc();
1823 MachineFunction &MF = *MBB.getParent();
1824 MachineFrameInfo &MFI = *MF.getFrameInfo();
1825 unsigned Align = MFI.getObjectAlignment(FI);
1826
1827 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1828 MachineMemOperand *MMO = MF.getMachineMemOperand(
1829 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1830 unsigned Opc = 0;
1831 bool Offset = true;
1832 switch (RC->getSize()) {
1833 case 1:
1834 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1835 Opc = AArch64::STRBui;
1836 break;
1837 case 2:
1838 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1839 Opc = AArch64::STRHui;
1840 break;
1841 case 4:
1842 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1843 Opc = AArch64::STRWui;
1844 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1845 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1846 else
1847 assert(SrcReg != AArch64::WSP);
1848 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1849 Opc = AArch64::STRSui;
1850 break;
1851 case 8:
1852 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1853 Opc = AArch64::STRXui;
1854 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1855 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1856 else
1857 assert(SrcReg != AArch64::SP);
1858 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1859 Opc = AArch64::STRDui;
1860 break;
1861 case 16:
1862 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1863 Opc = AArch64::STRQui;
1864 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001865 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001866 "Unexpected register store without NEON");
1867 Opc = AArch64::ST1Twov1d, Offset = false;
1868 }
1869 break;
1870 case 24:
1871 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001872 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001873 "Unexpected register store without NEON");
1874 Opc = AArch64::ST1Threev1d, Offset = false;
1875 }
1876 break;
1877 case 32:
1878 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001879 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001880 "Unexpected register store without NEON");
1881 Opc = AArch64::ST1Fourv1d, Offset = false;
1882 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001883 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001884 "Unexpected register store without NEON");
1885 Opc = AArch64::ST1Twov2d, Offset = false;
1886 }
1887 break;
1888 case 48:
1889 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001890 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001891 "Unexpected register store without NEON");
1892 Opc = AArch64::ST1Threev2d, Offset = false;
1893 }
1894 break;
1895 case 64:
1896 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001897 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001898 "Unexpected register store without NEON");
1899 Opc = AArch64::ST1Fourv2d, Offset = false;
1900 }
1901 break;
1902 }
1903 assert(Opc && "Unknown register class");
1904
James Molloyf8aa57a2015-04-16 11:37:40 +00001905 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
Tim Northover3b0846e2014-05-24 12:50:23 +00001906 .addReg(SrcReg, getKillRegState(isKill))
1907 .addFrameIndex(FI);
1908
1909 if (Offset)
1910 MI.addImm(0);
1911 MI.addMemOperand(MMO);
1912}
1913
1914void AArch64InstrInfo::loadRegFromStackSlot(
1915 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1916 int FI, const TargetRegisterClass *RC,
1917 const TargetRegisterInfo *TRI) const {
1918 DebugLoc DL;
1919 if (MBBI != MBB.end())
1920 DL = MBBI->getDebugLoc();
1921 MachineFunction &MF = *MBB.getParent();
1922 MachineFrameInfo &MFI = *MF.getFrameInfo();
1923 unsigned Align = MFI.getObjectAlignment(FI);
1924 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1925 MachineMemOperand *MMO = MF.getMachineMemOperand(
1926 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1927
1928 unsigned Opc = 0;
1929 bool Offset = true;
1930 switch (RC->getSize()) {
1931 case 1:
1932 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1933 Opc = AArch64::LDRBui;
1934 break;
1935 case 2:
1936 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1937 Opc = AArch64::LDRHui;
1938 break;
1939 case 4:
1940 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1941 Opc = AArch64::LDRWui;
1942 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1943 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1944 else
1945 assert(DestReg != AArch64::WSP);
1946 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1947 Opc = AArch64::LDRSui;
1948 break;
1949 case 8:
1950 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1951 Opc = AArch64::LDRXui;
1952 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1953 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1954 else
1955 assert(DestReg != AArch64::SP);
1956 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1957 Opc = AArch64::LDRDui;
1958 break;
1959 case 16:
1960 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1961 Opc = AArch64::LDRQui;
1962 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001963 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001964 "Unexpected register load without NEON");
1965 Opc = AArch64::LD1Twov1d, Offset = false;
1966 }
1967 break;
1968 case 24:
1969 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001970 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001971 "Unexpected register load without NEON");
1972 Opc = AArch64::LD1Threev1d, Offset = false;
1973 }
1974 break;
1975 case 32:
1976 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001977 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001978 "Unexpected register load without NEON");
1979 Opc = AArch64::LD1Fourv1d, Offset = false;
1980 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001981 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001982 "Unexpected register load without NEON");
1983 Opc = AArch64::LD1Twov2d, Offset = false;
1984 }
1985 break;
1986 case 48:
1987 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001988 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001989 "Unexpected register load without NEON");
1990 Opc = AArch64::LD1Threev2d, Offset = false;
1991 }
1992 break;
1993 case 64:
1994 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001995 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001996 "Unexpected register load without NEON");
1997 Opc = AArch64::LD1Fourv2d, Offset = false;
1998 }
1999 break;
2000 }
2001 assert(Opc && "Unknown register class");
2002
James Molloyf8aa57a2015-04-16 11:37:40 +00002003 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
Tim Northover3b0846e2014-05-24 12:50:23 +00002004 .addReg(DestReg, getDefRegState(true))
2005 .addFrameIndex(FI);
2006 if (Offset)
2007 MI.addImm(0);
2008 MI.addMemOperand(MMO);
2009}
2010
2011void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2012 MachineBasicBlock::iterator MBBI, DebugLoc DL,
2013 unsigned DestReg, unsigned SrcReg, int Offset,
Eric Christopherbc76b972014-06-10 17:33:39 +00002014 const TargetInstrInfo *TII,
Tim Northover3b0846e2014-05-24 12:50:23 +00002015 MachineInstr::MIFlag Flag, bool SetNZCV) {
2016 if (DestReg == SrcReg && Offset == 0)
2017 return;
2018
2019 bool isSub = Offset < 0;
2020 if (isSub)
2021 Offset = -Offset;
2022
2023 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2024 // scratch register. If DestReg is a virtual register, use it as the
2025 // scratch register; otherwise, create a new virtual register (to be
2026 // replaced by the scavenger at the end of PEI). That case can be optimized
2027 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2028 // register can be loaded with offset%8 and the add/sub can use an extending
2029 // instruction with LSL#3.
2030 // Currently the function handles any offsets but generates a poor sequence
2031 // of code.
2032 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2033
2034 unsigned Opc;
2035 if (SetNZCV)
2036 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2037 else
2038 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2039 const unsigned MaxEncoding = 0xfff;
2040 const unsigned ShiftSize = 12;
2041 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2042 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2043 unsigned ThisVal;
2044 if (((unsigned)Offset) > MaxEncodableValue) {
2045 ThisVal = MaxEncodableValue;
2046 } else {
2047 ThisVal = Offset & MaxEncodableValue;
2048 }
2049 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2050 "Encoding cannot handle value that big");
2051 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2052 .addReg(SrcReg)
2053 .addImm(ThisVal >> ShiftSize)
2054 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2055 .setMIFlag(Flag);
2056
2057 SrcReg = DestReg;
2058 Offset -= ThisVal;
2059 if (Offset == 0)
2060 return;
2061 }
2062 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2063 .addReg(SrcReg)
2064 .addImm(Offset)
2065 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2066 .setMIFlag(Flag);
2067}
2068
Benjamin Kramerf1362f62015-02-28 12:04:00 +00002069MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
2070 MachineInstr *MI,
2071 ArrayRef<unsigned> Ops,
2072 int FrameIndex) const {
Tim Northover3b0846e2014-05-24 12:50:23 +00002073 // This is a bit of a hack. Consider this instruction:
2074 //
2075 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2076 //
2077 // We explicitly chose GPR64all for the virtual register so such a copy might
2078 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2079 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2080 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2081 //
2082 // To prevent that, we are going to constrain the %vreg0 register class here.
2083 //
2084 // <rdar://problem/11522048>
2085 //
2086 if (MI->isCopy()) {
2087 unsigned DstReg = MI->getOperand(0).getReg();
2088 unsigned SrcReg = MI->getOperand(1).getReg();
2089 if (SrcReg == AArch64::SP &&
2090 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2091 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2092 return nullptr;
2093 }
2094 if (DstReg == AArch64::SP &&
2095 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2096 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2097 return nullptr;
2098 }
2099 }
2100
2101 // Cannot fold.
2102 return nullptr;
2103}
2104
2105int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2106 bool *OutUseUnscaledOp,
2107 unsigned *OutUnscaledOp,
2108 int *EmittableOffset) {
2109 int Scale = 1;
2110 bool IsSigned = false;
2111 // The ImmIdx should be changed case by case if it is not 2.
2112 unsigned ImmIdx = 2;
2113 unsigned UnscaledOp = 0;
2114 // Set output values in case of early exit.
2115 if (EmittableOffset)
2116 *EmittableOffset = 0;
2117 if (OutUseUnscaledOp)
2118 *OutUseUnscaledOp = false;
2119 if (OutUnscaledOp)
2120 *OutUnscaledOp = 0;
2121 switch (MI.getOpcode()) {
2122 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002123 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
Tim Northover3b0846e2014-05-24 12:50:23 +00002124 // Vector spills/fills can't take an immediate offset.
2125 case AArch64::LD1Twov2d:
2126 case AArch64::LD1Threev2d:
2127 case AArch64::LD1Fourv2d:
2128 case AArch64::LD1Twov1d:
2129 case AArch64::LD1Threev1d:
2130 case AArch64::LD1Fourv1d:
2131 case AArch64::ST1Twov2d:
2132 case AArch64::ST1Threev2d:
2133 case AArch64::ST1Fourv2d:
2134 case AArch64::ST1Twov1d:
2135 case AArch64::ST1Threev1d:
2136 case AArch64::ST1Fourv1d:
2137 return AArch64FrameOffsetCannotUpdate;
2138 case AArch64::PRFMui:
2139 Scale = 8;
2140 UnscaledOp = AArch64::PRFUMi;
2141 break;
2142 case AArch64::LDRXui:
2143 Scale = 8;
2144 UnscaledOp = AArch64::LDURXi;
2145 break;
2146 case AArch64::LDRWui:
2147 Scale = 4;
2148 UnscaledOp = AArch64::LDURWi;
2149 break;
2150 case AArch64::LDRBui:
2151 Scale = 1;
2152 UnscaledOp = AArch64::LDURBi;
2153 break;
2154 case AArch64::LDRHui:
2155 Scale = 2;
2156 UnscaledOp = AArch64::LDURHi;
2157 break;
2158 case AArch64::LDRSui:
2159 Scale = 4;
2160 UnscaledOp = AArch64::LDURSi;
2161 break;
2162 case AArch64::LDRDui:
2163 Scale = 8;
2164 UnscaledOp = AArch64::LDURDi;
2165 break;
2166 case AArch64::LDRQui:
2167 Scale = 16;
2168 UnscaledOp = AArch64::LDURQi;
2169 break;
2170 case AArch64::LDRBBui:
2171 Scale = 1;
2172 UnscaledOp = AArch64::LDURBBi;
2173 break;
2174 case AArch64::LDRHHui:
2175 Scale = 2;
2176 UnscaledOp = AArch64::LDURHHi;
2177 break;
2178 case AArch64::LDRSBXui:
2179 Scale = 1;
2180 UnscaledOp = AArch64::LDURSBXi;
2181 break;
2182 case AArch64::LDRSBWui:
2183 Scale = 1;
2184 UnscaledOp = AArch64::LDURSBWi;
2185 break;
2186 case AArch64::LDRSHXui:
2187 Scale = 2;
2188 UnscaledOp = AArch64::LDURSHXi;
2189 break;
2190 case AArch64::LDRSHWui:
2191 Scale = 2;
2192 UnscaledOp = AArch64::LDURSHWi;
2193 break;
2194 case AArch64::LDRSWui:
2195 Scale = 4;
2196 UnscaledOp = AArch64::LDURSWi;
2197 break;
2198
2199 case AArch64::STRXui:
2200 Scale = 8;
2201 UnscaledOp = AArch64::STURXi;
2202 break;
2203 case AArch64::STRWui:
2204 Scale = 4;
2205 UnscaledOp = AArch64::STURWi;
2206 break;
2207 case AArch64::STRBui:
2208 Scale = 1;
2209 UnscaledOp = AArch64::STURBi;
2210 break;
2211 case AArch64::STRHui:
2212 Scale = 2;
2213 UnscaledOp = AArch64::STURHi;
2214 break;
2215 case AArch64::STRSui:
2216 Scale = 4;
2217 UnscaledOp = AArch64::STURSi;
2218 break;
2219 case AArch64::STRDui:
2220 Scale = 8;
2221 UnscaledOp = AArch64::STURDi;
2222 break;
2223 case AArch64::STRQui:
2224 Scale = 16;
2225 UnscaledOp = AArch64::STURQi;
2226 break;
2227 case AArch64::STRBBui:
2228 Scale = 1;
2229 UnscaledOp = AArch64::STURBBi;
2230 break;
2231 case AArch64::STRHHui:
2232 Scale = 2;
2233 UnscaledOp = AArch64::STURHHi;
2234 break;
2235
2236 case AArch64::LDPXi:
2237 case AArch64::LDPDi:
2238 case AArch64::STPXi:
2239 case AArch64::STPDi:
2240 IsSigned = true;
2241 Scale = 8;
2242 break;
2243 case AArch64::LDPQi:
2244 case AArch64::STPQi:
2245 IsSigned = true;
2246 Scale = 16;
2247 break;
2248 case AArch64::LDPWi:
2249 case AArch64::LDPSi:
2250 case AArch64::STPWi:
2251 case AArch64::STPSi:
2252 IsSigned = true;
2253 Scale = 4;
2254 break;
2255
2256 case AArch64::LDURXi:
2257 case AArch64::LDURWi:
2258 case AArch64::LDURBi:
2259 case AArch64::LDURHi:
2260 case AArch64::LDURSi:
2261 case AArch64::LDURDi:
2262 case AArch64::LDURQi:
2263 case AArch64::LDURHHi:
2264 case AArch64::LDURBBi:
2265 case AArch64::LDURSBXi:
2266 case AArch64::LDURSBWi:
2267 case AArch64::LDURSHXi:
2268 case AArch64::LDURSHWi:
2269 case AArch64::LDURSWi:
2270 case AArch64::STURXi:
2271 case AArch64::STURWi:
2272 case AArch64::STURBi:
2273 case AArch64::STURHi:
2274 case AArch64::STURSi:
2275 case AArch64::STURDi:
2276 case AArch64::STURQi:
2277 case AArch64::STURBBi:
2278 case AArch64::STURHHi:
2279 Scale = 1;
2280 break;
2281 }
2282
2283 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2284
2285 bool useUnscaledOp = false;
2286 // If the offset doesn't match the scale, we rewrite the instruction to
2287 // use the unscaled instruction instead. Likewise, if we have a negative
2288 // offset (and have an unscaled op to use).
2289 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2290 useUnscaledOp = true;
2291
2292 // Use an unscaled addressing mode if the instruction has a negative offset
2293 // (or if the instruction is already using an unscaled addressing mode).
2294 unsigned MaskBits;
2295 if (IsSigned) {
2296 // ldp/stp instructions.
2297 MaskBits = 7;
2298 Offset /= Scale;
2299 } else if (UnscaledOp == 0 || useUnscaledOp) {
2300 MaskBits = 9;
2301 IsSigned = true;
2302 Scale = 1;
2303 } else {
2304 MaskBits = 12;
2305 IsSigned = false;
2306 Offset /= Scale;
2307 }
2308
2309 // Attempt to fold address computation.
2310 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2311 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2312 if (Offset >= MinOff && Offset <= MaxOff) {
2313 if (EmittableOffset)
2314 *EmittableOffset = Offset;
2315 Offset = 0;
2316 } else {
2317 int NewOff = Offset < 0 ? MinOff : MaxOff;
2318 if (EmittableOffset)
2319 *EmittableOffset = NewOff;
2320 Offset = (Offset - NewOff) * Scale;
2321 }
2322 if (OutUseUnscaledOp)
2323 *OutUseUnscaledOp = useUnscaledOp;
2324 if (OutUnscaledOp)
2325 *OutUnscaledOp = UnscaledOp;
2326 return AArch64FrameOffsetCanUpdate |
2327 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2328}
2329
2330bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2331 unsigned FrameReg, int &Offset,
2332 const AArch64InstrInfo *TII) {
2333 unsigned Opcode = MI.getOpcode();
2334 unsigned ImmIdx = FrameRegIdx + 1;
2335
2336 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2337 Offset += MI.getOperand(ImmIdx).getImm();
2338 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2339 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2340 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2341 MI.eraseFromParent();
2342 Offset = 0;
2343 return true;
2344 }
2345
2346 int NewOffset;
2347 unsigned UnscaledOp;
2348 bool UseUnscaledOp;
2349 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2350 &UnscaledOp, &NewOffset);
2351 if (Status & AArch64FrameOffsetCanUpdate) {
2352 if (Status & AArch64FrameOffsetIsLegal)
2353 // Replace the FrameIndex with FrameReg.
2354 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2355 if (UseUnscaledOp)
2356 MI.setDesc(TII->get(UnscaledOp));
2357
2358 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2359 return Offset == 0;
2360 }
2361
2362 return false;
2363}
2364
2365void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2366 NopInst.setOpcode(AArch64::HINT);
Jim Grosbache9119e42015-05-13 18:37:00 +00002367 NopInst.addOperand(MCOperand::createImm(0));
Tim Northover3b0846e2014-05-24 12:50:23 +00002368}
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002369/// useMachineCombiner - return true when a target supports MachineCombiner
Benjamin Kramer8c90fd72014-09-03 11:41:21 +00002370bool AArch64InstrInfo::useMachineCombiner() const {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002371 // AArch64 supports the combiner
2372 return true;
2373}
2374//
2375// True when Opc sets flag
2376static bool isCombineInstrSettingFlag(unsigned Opc) {
2377 switch (Opc) {
2378 case AArch64::ADDSWrr:
2379 case AArch64::ADDSWri:
2380 case AArch64::ADDSXrr:
2381 case AArch64::ADDSXri:
2382 case AArch64::SUBSWrr:
2383 case AArch64::SUBSXrr:
2384 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2385 case AArch64::SUBSWri:
2386 case AArch64::SUBSXri:
2387 return true;
2388 default:
2389 break;
2390 }
2391 return false;
2392}
2393//
2394// 32b Opcodes that can be combined with a MUL
2395static bool isCombineInstrCandidate32(unsigned Opc) {
2396 switch (Opc) {
2397 case AArch64::ADDWrr:
2398 case AArch64::ADDWri:
2399 case AArch64::SUBWrr:
2400 case AArch64::ADDSWrr:
2401 case AArch64::ADDSWri:
2402 case AArch64::SUBSWrr:
2403 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2404 case AArch64::SUBWri:
2405 case AArch64::SUBSWri:
2406 return true;
2407 default:
2408 break;
2409 }
2410 return false;
2411}
2412//
2413// 64b Opcodes that can be combined with a MUL
2414static bool isCombineInstrCandidate64(unsigned Opc) {
2415 switch (Opc) {
2416 case AArch64::ADDXrr:
2417 case AArch64::ADDXri:
2418 case AArch64::SUBXrr:
2419 case AArch64::ADDSXrr:
2420 case AArch64::ADDSXri:
2421 case AArch64::SUBSXrr:
2422 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2423 case AArch64::SUBXri:
2424 case AArch64::SUBSXri:
2425 return true;
2426 default:
2427 break;
2428 }
2429 return false;
2430}
2431//
2432// Opcodes that can be combined with a MUL
2433static bool isCombineInstrCandidate(unsigned Opc) {
2434 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2435}
2436
2437static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
2438 unsigned MulOpc, unsigned ZeroReg) {
2439 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2440 MachineInstr *MI = nullptr;
2441 // We need a virtual register definition.
2442 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2443 MI = MRI.getUniqueVRegDef(MO.getReg());
2444 // And it needs to be in the trace (otherwise, it won't have a depth).
2445 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
2446 return false;
2447
2448 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
2449 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
2450 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
2451
2452 // The third input reg must be zero.
2453 if (MI->getOperand(3).getReg() != ZeroReg)
2454 return false;
2455
2456 // Must only used by the user we combine with.
Gerolf Hoflehnerfe2c11f2014-08-13 22:07:36 +00002457 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002458 return false;
2459
2460 return true;
2461}
2462
2463/// hasPattern - return true when there is potentially a faster code sequence
2464/// for an instruction chain ending in \p Root. All potential patterns are
2465/// listed
2466/// in the \p Pattern vector. Pattern should be sorted in priority order since
2467/// the pattern evaluator stops checking as soon as it finds a faster sequence.
2468
2469bool AArch64InstrInfo::hasPattern(
2470 MachineInstr &Root,
2471 SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Pattern) const {
2472 unsigned Opc = Root.getOpcode();
2473 MachineBasicBlock &MBB = *Root.getParent();
2474 bool Found = false;
2475
2476 if (!isCombineInstrCandidate(Opc))
2477 return 0;
2478 if (isCombineInstrSettingFlag(Opc)) {
2479 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
2480 // When NZCV is live bail out.
2481 if (Cmp_NZCV == -1)
2482 return 0;
2483 unsigned NewOpc = convertFlagSettingOpcode(&Root);
2484 // When opcode can't change bail out.
2485 // CHECKME: do we miss any cases for opcode conversion?
2486 if (NewOpc == Opc)
2487 return 0;
2488 Opc = NewOpc;
2489 }
2490
2491 switch (Opc) {
2492 default:
2493 break;
2494 case AArch64::ADDWrr:
2495 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
2496 "ADDWrr does not have register operands");
2497 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2498 AArch64::WZR)) {
2499 Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP1);
2500 Found = true;
2501 }
2502 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2503 AArch64::WZR)) {
2504 Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP2);
2505 Found = true;
2506 }
2507 break;
2508 case AArch64::ADDXrr:
2509 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2510 AArch64::XZR)) {
2511 Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP1);
2512 Found = true;
2513 }
2514 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2515 AArch64::XZR)) {
2516 Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP2);
2517 Found = true;
2518 }
2519 break;
2520 case AArch64::SUBWrr:
2521 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2522 AArch64::WZR)) {
2523 Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP1);
2524 Found = true;
2525 }
2526 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2527 AArch64::WZR)) {
2528 Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP2);
2529 Found = true;
2530 }
2531 break;
2532 case AArch64::SUBXrr:
2533 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2534 AArch64::XZR)) {
2535 Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP1);
2536 Found = true;
2537 }
2538 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2539 AArch64::XZR)) {
2540 Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP2);
2541 Found = true;
2542 }
2543 break;
2544 case AArch64::ADDWri:
2545 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2546 AArch64::WZR)) {
2547 Pattern.push_back(MachineCombinerPattern::MC_MULADDWI_OP1);
2548 Found = true;
2549 }
2550 break;
2551 case AArch64::ADDXri:
2552 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2553 AArch64::XZR)) {
2554 Pattern.push_back(MachineCombinerPattern::MC_MULADDXI_OP1);
2555 Found = true;
2556 }
2557 break;
2558 case AArch64::SUBWri:
2559 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2560 AArch64::WZR)) {
2561 Pattern.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1);
2562 Found = true;
2563 }
2564 break;
2565 case AArch64::SUBXri:
2566 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2567 AArch64::XZR)) {
2568 Pattern.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1);
2569 Found = true;
2570 }
2571 break;
2572 }
2573 return Found;
2574}
2575
2576/// genMadd - Generate madd instruction and combine mul and add.
2577/// Example:
2578/// MUL I=A,B,0
2579/// ADD R,I,C
2580/// ==> MADD R,A,B,C
2581/// \param Root is the ADD instruction
NAKAMURA Takumi40da2672014-08-08 02:04:18 +00002582/// \param [out] InsInstrs is a vector of machine instructions and will
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002583/// contain the generated madd instruction
2584/// \param IdxMulOpd is index of operand in Root that is the result of
2585/// the MUL. In the example above IdxMulOpd is 1.
2586/// \param MaddOpc the opcode fo the madd instruction
2587static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
2588 const TargetInstrInfo *TII, MachineInstr &Root,
2589 SmallVectorImpl<MachineInstr *> &InsInstrs,
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002590 unsigned IdxMulOpd, unsigned MaddOpc,
2591 const TargetRegisterClass *RC) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002592 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2593
2594 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
2595 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002596 unsigned ResultReg = Root.getOperand(0).getReg();
2597 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2598 bool Src0IsKill = MUL->getOperand(1).isKill();
2599 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2600 bool Src1IsKill = MUL->getOperand(2).isKill();
2601 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
2602 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
2603
2604 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2605 MRI.constrainRegClass(ResultReg, RC);
2606 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2607 MRI.constrainRegClass(SrcReg0, RC);
2608 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2609 MRI.constrainRegClass(SrcReg1, RC);
2610 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
2611 MRI.constrainRegClass(SrcReg2, RC);
2612
2613 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2614 ResultReg)
2615 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2616 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2617 .addReg(SrcReg2, getKillRegState(Src2IsKill));
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002618 // Insert the MADD
2619 InsInstrs.push_back(MIB);
2620 return MUL;
2621}
2622
2623/// genMaddR - Generate madd instruction and combine mul and add using
2624/// an extra virtual register
2625/// Example - an ADD intermediate needs to be stored in a register:
2626/// MUL I=A,B,0
2627/// ADD R,I,Imm
2628/// ==> ORR V, ZR, Imm
2629/// ==> MADD R,A,B,V
2630/// \param Root is the ADD instruction
NAKAMURA Takumi40da2672014-08-08 02:04:18 +00002631/// \param [out] InsInstrs is a vector of machine instructions and will
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002632/// contain the generated madd instruction
2633/// \param IdxMulOpd is index of operand in Root that is the result of
2634/// the MUL. In the example above IdxMulOpd is 1.
2635/// \param MaddOpc the opcode fo the madd instruction
2636/// \param VR is a virtual register that holds the value of an ADD operand
2637/// (V in the example above).
2638static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
2639 const TargetInstrInfo *TII, MachineInstr &Root,
2640 SmallVectorImpl<MachineInstr *> &InsInstrs,
2641 unsigned IdxMulOpd, unsigned MaddOpc,
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002642 unsigned VR, const TargetRegisterClass *RC) {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002643 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2644
2645 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002646 unsigned ResultReg = Root.getOperand(0).getReg();
2647 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2648 bool Src0IsKill = MUL->getOperand(1).isKill();
2649 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2650 bool Src1IsKill = MUL->getOperand(2).isKill();
2651
2652 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2653 MRI.constrainRegClass(ResultReg, RC);
2654 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2655 MRI.constrainRegClass(SrcReg0, RC);
2656 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2657 MRI.constrainRegClass(SrcReg1, RC);
2658 if (TargetRegisterInfo::isVirtualRegister(VR))
2659 MRI.constrainRegClass(VR, RC);
2660
2661 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2662 ResultReg)
2663 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2664 .addReg(SrcReg1, getKillRegState(Src1IsKill))
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002665 .addReg(VR);
2666 // Insert the MADD
2667 InsInstrs.push_back(MIB);
2668 return MUL;
2669}
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002670
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002671/// genAlternativeCodeSequence - when hasPattern() finds a pattern
2672/// this function generates the instructions that could replace the
2673/// original code sequence
2674void AArch64InstrInfo::genAlternativeCodeSequence(
2675 MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
2676 SmallVectorImpl<MachineInstr *> &InsInstrs,
2677 SmallVectorImpl<MachineInstr *> &DelInstrs,
2678 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2679 MachineBasicBlock &MBB = *Root.getParent();
2680 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2681 MachineFunction &MF = *MBB.getParent();
Eric Christophere0818912014-09-03 20:36:26 +00002682 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002683
2684 MachineInstr *MUL;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002685 const TargetRegisterClass *RC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002686 unsigned Opc;
2687 switch (Pattern) {
2688 default:
2689 // signal error.
2690 break;
2691 case MachineCombinerPattern::MC_MULADDW_OP1:
2692 case MachineCombinerPattern::MC_MULADDX_OP1:
2693 // MUL I=A,B,0
2694 // ADD R,I,C
2695 // ==> MADD R,A,B,C
2696 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002697 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP1) {
2698 Opc = AArch64::MADDWrrr;
2699 RC = &AArch64::GPR32RegClass;
2700 } else {
2701 Opc = AArch64::MADDXrrr;
2702 RC = &AArch64::GPR64RegClass;
2703 }
2704 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002705 break;
2706 case MachineCombinerPattern::MC_MULADDW_OP2:
2707 case MachineCombinerPattern::MC_MULADDX_OP2:
2708 // MUL I=A,B,0
2709 // ADD R,C,I
2710 // ==> MADD R,A,B,C
2711 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002712 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP2) {
2713 Opc = AArch64::MADDWrrr;
2714 RC = &AArch64::GPR32RegClass;
2715 } else {
2716 Opc = AArch64::MADDXrrr;
2717 RC = &AArch64::GPR64RegClass;
2718 }
2719 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002720 break;
2721 case MachineCombinerPattern::MC_MULADDWI_OP1:
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002722 case MachineCombinerPattern::MC_MULADDXI_OP1: {
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002723 // MUL I=A,B,0
2724 // ADD R,I,Imm
2725 // ==> ORR V, ZR, Imm
2726 // ==> MADD R,A,B,V
2727 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002728 const TargetRegisterClass *OrrRC;
2729 unsigned BitSize, OrrOpc, ZeroReg;
2730 if (Pattern == MachineCombinerPattern::MC_MULADDWI_OP1) {
2731 OrrOpc = AArch64::ORRWri;
2732 OrrRC = &AArch64::GPR32spRegClass;
2733 BitSize = 32;
2734 ZeroReg = AArch64::WZR;
2735 Opc = AArch64::MADDWrrr;
2736 RC = &AArch64::GPR32RegClass;
2737 } else {
2738 OrrOpc = AArch64::ORRXri;
2739 OrrRC = &AArch64::GPR64spRegClass;
2740 BitSize = 64;
2741 ZeroReg = AArch64::XZR;
2742 Opc = AArch64::MADDXrrr;
2743 RC = &AArch64::GPR64RegClass;
2744 }
2745 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2746 uint64_t Imm = Root.getOperand(2).getImm();
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002747
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002748 if (Root.getOperand(3).isImm()) {
2749 unsigned Val = Root.getOperand(3).getImm();
2750 Imm = Imm << Val;
2751 }
2752 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
2753 uint64_t Encoding;
2754 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2755 MachineInstrBuilder MIB1 =
2756 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2757 .addReg(ZeroReg)
2758 .addImm(Encoding);
2759 InsInstrs.push_back(MIB1);
2760 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2761 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002762 }
2763 break;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002764 }
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002765 case MachineCombinerPattern::MC_MULSUBW_OP1:
2766 case MachineCombinerPattern::MC_MULSUBX_OP1: {
2767 // MUL I=A,B,0
2768 // SUB R,I, C
2769 // ==> SUB V, 0, C
2770 // ==> MADD R,A,B,V // = -C + A*B
2771 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002772 const TargetRegisterClass *SubRC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002773 unsigned SubOpc, ZeroReg;
2774 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP1) {
2775 SubOpc = AArch64::SUBWrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002776 SubRC = &AArch64::GPR32spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002777 ZeroReg = AArch64::WZR;
2778 Opc = AArch64::MADDWrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002779 RC = &AArch64::GPR32RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002780 } else {
2781 SubOpc = AArch64::SUBXrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002782 SubRC = &AArch64::GPR64spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002783 ZeroReg = AArch64::XZR;
2784 Opc = AArch64::MADDXrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002785 RC = &AArch64::GPR64RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002786 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002787 unsigned NewVR = MRI.createVirtualRegister(SubRC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002788 // SUB NewVR, 0, C
2789 MachineInstrBuilder MIB1 =
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002790 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002791 .addReg(ZeroReg)
2792 .addOperand(Root.getOperand(2));
2793 InsInstrs.push_back(MIB1);
2794 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002795 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2796 break;
2797 }
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002798 case MachineCombinerPattern::MC_MULSUBW_OP2:
2799 case MachineCombinerPattern::MC_MULSUBX_OP2:
2800 // MUL I=A,B,0
2801 // SUB R,C,I
2802 // ==> MSUB R,A,B,C (computes C - A*B)
2803 // --- Create(MSUB);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002804 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP2) {
2805 Opc = AArch64::MSUBWrrr;
2806 RC = &AArch64::GPR32RegClass;
2807 } else {
2808 Opc = AArch64::MSUBXrrr;
2809 RC = &AArch64::GPR64RegClass;
2810 }
2811 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002812 break;
2813 case MachineCombinerPattern::MC_MULSUBWI_OP1:
2814 case MachineCombinerPattern::MC_MULSUBXI_OP1: {
2815 // MUL I=A,B,0
2816 // SUB R,I, Imm
2817 // ==> ORR V, ZR, -Imm
2818 // ==> MADD R,A,B,V // = -Imm + A*B
2819 // --- Create(MADD);
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002820 const TargetRegisterClass *OrrRC;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002821 unsigned BitSize, OrrOpc, ZeroReg;
2822 if (Pattern == MachineCombinerPattern::MC_MULSUBWI_OP1) {
Juergen Ributzka25816b02014-08-30 06:16:26 +00002823 OrrOpc = AArch64::ORRWri;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002824 OrrRC = &AArch64::GPR32spRegClass;
2825 BitSize = 32;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002826 ZeroReg = AArch64::WZR;
2827 Opc = AArch64::MADDWrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002828 RC = &AArch64::GPR32RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002829 } else {
2830 OrrOpc = AArch64::ORRXri;
Juergen Ributzkaf9660f02014-11-04 22:20:07 +00002831 OrrRC = &AArch64::GPR64spRegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002832 BitSize = 64;
2833 ZeroReg = AArch64::XZR;
2834 Opc = AArch64::MADDXrrr;
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002835 RC = &AArch64::GPR64RegClass;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002836 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002837 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002838 int Imm = Root.getOperand(2).getImm();
2839 if (Root.getOperand(3).isImm()) {
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002840 unsigned Val = Root.getOperand(3).getImm();
2841 Imm = Imm << Val;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002842 }
2843 uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize);
2844 uint64_t Encoding;
2845 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2846 MachineInstrBuilder MIB1 =
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002847 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002848 .addReg(ZeroReg)
2849 .addImm(Encoding);
2850 InsInstrs.push_back(MIB1);
2851 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002852 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002853 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002854 break;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002855 }
Juergen Ributzka31e5b7f2014-09-03 07:07:10 +00002856 } // end switch (Pattern)
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +00002857 // Record MUL and ADD/SUB for deletion
2858 DelInstrs.push_back(MUL);
2859 DelInstrs.push_back(&Root);
2860
2861 return;
2862}
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00002863
2864/// \brief Replace csincr-branch sequence by simple conditional branch
2865///
2866/// Examples:
2867/// 1.
2868/// csinc w9, wzr, wzr, <condition code>
2869/// tbnz w9, #0, 0x44
2870/// to
2871/// b.<inverted condition code>
2872///
2873/// 2.
2874/// csinc w9, wzr, wzr, <condition code>
2875/// tbz w9, #0, 0x44
2876/// to
2877/// b.<condition code>
2878///
2879/// \param MI Conditional Branch
2880/// \return True when the simple conditional branch is generated
2881///
2882bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
2883 bool IsNegativeBranch = false;
2884 bool IsTestAndBranch = false;
2885 unsigned TargetBBInMI = 0;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00002886 switch (MI->getOpcode()) {
2887 default:
2888 llvm_unreachable("Unknown branch instruction?");
2889 case AArch64::Bcc:
2890 return false;
2891 case AArch64::CBZW:
2892 case AArch64::CBZX:
2893 TargetBBInMI = 1;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00002894 break;
2895 case AArch64::CBNZW:
2896 case AArch64::CBNZX:
2897 TargetBBInMI = 1;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00002898 IsNegativeBranch = true;
2899 break;
2900 case AArch64::TBZW:
2901 case AArch64::TBZX:
2902 TargetBBInMI = 2;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00002903 IsTestAndBranch = true;
2904 break;
2905 case AArch64::TBNZW:
2906 case AArch64::TBNZX:
2907 TargetBBInMI = 2;
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00002908 IsNegativeBranch = true;
2909 IsTestAndBranch = true;
2910 break;
2911 }
2912 // So we increment a zero register and test for bits other
2913 // than bit 0? Conservatively bail out in case the verifier
2914 // missed this case.
2915 if (IsTestAndBranch && MI->getOperand(1).getImm())
2916 return false;
2917
2918 // Find Definition.
2919 assert(MI->getParent() && "Incomplete machine instruciton\n");
2920 MachineBasicBlock *MBB = MI->getParent();
2921 MachineFunction *MF = MBB->getParent();
2922 MachineRegisterInfo *MRI = &MF->getRegInfo();
2923 unsigned VReg = MI->getOperand(0).getReg();
2924 if (!TargetRegisterInfo::isVirtualRegister(VReg))
2925 return false;
2926
2927 MachineInstr *DefMI = MRI->getVRegDef(VReg);
2928
2929 // Look for CSINC
2930 if (!(DefMI->getOpcode() == AArch64::CSINCWr &&
2931 DefMI->getOperand(1).getReg() == AArch64::WZR &&
2932 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
2933 !(DefMI->getOpcode() == AArch64::CSINCXr &&
2934 DefMI->getOperand(1).getReg() == AArch64::XZR &&
2935 DefMI->getOperand(2).getReg() == AArch64::XZR))
2936 return false;
2937
2938 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
2939 return false;
2940
2941 AArch64CC::CondCode CC =
Gerolf Hoflehner5d26d402014-10-14 23:55:00 +00002942 (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00002943 bool CheckOnlyCCWrites = true;
2944 // Convert only when the condition code is not modified between
2945 // the CSINC and the branch. The CC may be used by other
2946 // instructions in between.
Eric Christophera0de2532015-03-18 20:37:30 +00002947 if (modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, &getRegisterInfo()))
Gerolf Hoflehnera4c96d02014-10-14 23:07:53 +00002948 return false;
2949 MachineBasicBlock &RefToMBB = *MBB;
2950 MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
2951 DebugLoc DL = MI->getDebugLoc();
2952 if (IsNegativeBranch)
2953 CC = AArch64CC::getInvertedCondCode(CC);
2954 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
2955 MI->eraseFromParent();
2956 return true;
2957}