blob: 48235186176a9490d06afa72f5bab04a78248dc7 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the AArch64 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64InstrInfo.h"
15#include "AArch64Subtarget.h"
16#include "MCTargetDesc/AArch64AddressingModes.h"
17#include "llvm/CodeGen/MachineFrameInfo.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineMemOperand.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/CodeGen/PseudoSourceValue.h"
22#include "llvm/MC/MCInst.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/TargetRegistry.h"
25
26using namespace llvm;
27
28#define GET_INSTRINFO_CTOR_DTOR
29#include "AArch64GenInstrInfo.inc"
30
31AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
32 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
33 RI(this, &STI), Subtarget(STI) {}
34
35/// GetInstSize - Return the number of bytes of code the specified
36/// instruction may be. This returns the maximum number of bytes.
37unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
Tim Northoverd5531f72014-06-17 11:31:42 +000038 const MachineBasicBlock &MBB = *MI->getParent();
39 const MachineFunction *MF = MBB.getParent();
40 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +000041
Tim Northoverd5531f72014-06-17 11:31:42 +000042 if (MI->getOpcode() == AArch64::INLINEASM)
43 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
44
45 const MCInstrDesc &Desc = MI->getDesc();
Tim Northover3b0846e2014-05-24 12:50:23 +000046 switch (Desc.getOpcode()) {
47 default:
48 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
49 return 4;
50 case TargetOpcode::DBG_VALUE:
51 case TargetOpcode::EH_LABEL:
52 case TargetOpcode::IMPLICIT_DEF:
53 case TargetOpcode::KILL:
54 return 0;
55 }
56
57 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
58}
59
60static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
61 SmallVectorImpl<MachineOperand> &Cond) {
62 // Block ends with fall-through condbranch.
63 switch (LastInst->getOpcode()) {
64 default:
65 llvm_unreachable("Unknown branch instruction?");
66 case AArch64::Bcc:
67 Target = LastInst->getOperand(1).getMBB();
68 Cond.push_back(LastInst->getOperand(0));
69 break;
70 case AArch64::CBZW:
71 case AArch64::CBZX:
72 case AArch64::CBNZW:
73 case AArch64::CBNZX:
74 Target = LastInst->getOperand(1).getMBB();
75 Cond.push_back(MachineOperand::CreateImm(-1));
76 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
77 Cond.push_back(LastInst->getOperand(0));
78 break;
79 case AArch64::TBZW:
80 case AArch64::TBZX:
81 case AArch64::TBNZW:
82 case AArch64::TBNZX:
83 Target = LastInst->getOperand(2).getMBB();
84 Cond.push_back(MachineOperand::CreateImm(-1));
85 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
86 Cond.push_back(LastInst->getOperand(0));
87 Cond.push_back(LastInst->getOperand(1));
88 }
89}
90
91// Branch analysis.
92bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
93 MachineBasicBlock *&TBB,
94 MachineBasicBlock *&FBB,
95 SmallVectorImpl<MachineOperand> &Cond,
96 bool AllowModify) const {
97 // If the block has no terminators, it just falls into the block after it.
98 MachineBasicBlock::iterator I = MBB.end();
99 if (I == MBB.begin())
100 return false;
101 --I;
102 while (I->isDebugValue()) {
103 if (I == MBB.begin())
104 return false;
105 --I;
106 }
107 if (!isUnpredicatedTerminator(I))
108 return false;
109
110 // Get the last instruction in the block.
111 MachineInstr *LastInst = I;
112
113 // If there is only one terminator instruction, process it.
114 unsigned LastOpc = LastInst->getOpcode();
115 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
116 if (isUncondBranchOpcode(LastOpc)) {
117 TBB = LastInst->getOperand(0).getMBB();
118 return false;
119 }
120 if (isCondBranchOpcode(LastOpc)) {
121 // Block ends with fall-through condbranch.
122 parseCondBranch(LastInst, TBB, Cond);
123 return false;
124 }
125 return true; // Can't handle indirect branch.
126 }
127
128 // Get the instruction before it if it is a terminator.
129 MachineInstr *SecondLastInst = I;
130 unsigned SecondLastOpc = SecondLastInst->getOpcode();
131
132 // If AllowModify is true and the block ends with two or more unconditional
133 // branches, delete all but the first unconditional branch.
134 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
135 while (isUncondBranchOpcode(SecondLastOpc)) {
136 LastInst->eraseFromParent();
137 LastInst = SecondLastInst;
138 LastOpc = LastInst->getOpcode();
139 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
140 // Return now the only terminator is an unconditional branch.
141 TBB = LastInst->getOperand(0).getMBB();
142 return false;
143 } else {
144 SecondLastInst = I;
145 SecondLastOpc = SecondLastInst->getOpcode();
146 }
147 }
148 }
149
150 // If there are three terminators, we don't know what sort of block this is.
151 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
152 return true;
153
154 // If the block ends with a B and a Bcc, handle it.
155 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
156 parseCondBranch(SecondLastInst, TBB, Cond);
157 FBB = LastInst->getOperand(0).getMBB();
158 return false;
159 }
160
161 // If the block ends with two unconditional branches, handle it. The second
162 // one is not executed, so remove it.
163 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
164 TBB = SecondLastInst->getOperand(0).getMBB();
165 I = LastInst;
166 if (AllowModify)
167 I->eraseFromParent();
168 return false;
169 }
170
171 // ...likewise if it ends with an indirect branch followed by an unconditional
172 // branch.
173 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
174 I = LastInst;
175 if (AllowModify)
176 I->eraseFromParent();
177 return true;
178 }
179
180 // Otherwise, can't handle this.
181 return true;
182}
183
184bool AArch64InstrInfo::ReverseBranchCondition(
185 SmallVectorImpl<MachineOperand> &Cond) const {
186 if (Cond[0].getImm() != -1) {
187 // Regular Bcc
188 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
189 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
190 } else {
191 // Folded compare-and-branch
192 switch (Cond[1].getImm()) {
193 default:
194 llvm_unreachable("Unknown conditional branch!");
195 case AArch64::CBZW:
196 Cond[1].setImm(AArch64::CBNZW);
197 break;
198 case AArch64::CBNZW:
199 Cond[1].setImm(AArch64::CBZW);
200 break;
201 case AArch64::CBZX:
202 Cond[1].setImm(AArch64::CBNZX);
203 break;
204 case AArch64::CBNZX:
205 Cond[1].setImm(AArch64::CBZX);
206 break;
207 case AArch64::TBZW:
208 Cond[1].setImm(AArch64::TBNZW);
209 break;
210 case AArch64::TBNZW:
211 Cond[1].setImm(AArch64::TBZW);
212 break;
213 case AArch64::TBZX:
214 Cond[1].setImm(AArch64::TBNZX);
215 break;
216 case AArch64::TBNZX:
217 Cond[1].setImm(AArch64::TBZX);
218 break;
219 }
220 }
221
222 return false;
223}
224
225unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
226 MachineBasicBlock::iterator I = MBB.end();
227 if (I == MBB.begin())
228 return 0;
229 --I;
230 while (I->isDebugValue()) {
231 if (I == MBB.begin())
232 return 0;
233 --I;
234 }
235 if (!isUncondBranchOpcode(I->getOpcode()) &&
236 !isCondBranchOpcode(I->getOpcode()))
237 return 0;
238
239 // Remove the branch.
240 I->eraseFromParent();
241
242 I = MBB.end();
243
244 if (I == MBB.begin())
245 return 1;
246 --I;
247 if (!isCondBranchOpcode(I->getOpcode()))
248 return 1;
249
250 // Remove the branch.
251 I->eraseFromParent();
252 return 2;
253}
254
255void AArch64InstrInfo::instantiateCondBranch(
256 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
257 const SmallVectorImpl<MachineOperand> &Cond) const {
258 if (Cond[0].getImm() != -1) {
259 // Regular Bcc
260 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
261 } else {
262 // Folded compare-and-branch
263 const MachineInstrBuilder MIB =
264 BuildMI(&MBB, DL, get(Cond[1].getImm())).addReg(Cond[2].getReg());
265 if (Cond.size() > 3)
266 MIB.addImm(Cond[3].getImm());
267 MIB.addMBB(TBB);
268 }
269}
270
271unsigned AArch64InstrInfo::InsertBranch(
272 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
273 const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
274 // Shouldn't be a fall through.
275 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
276
277 if (!FBB) {
278 if (Cond.empty()) // Unconditional branch?
279 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
280 else
281 instantiateCondBranch(MBB, DL, TBB, Cond);
282 return 1;
283 }
284
285 // Two-way conditional branch.
286 instantiateCondBranch(MBB, DL, TBB, Cond);
287 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
288 return 2;
289}
290
291// Find the original register that VReg is copied from.
292static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
293 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
294 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
295 if (!DefMI->isFullCopy())
296 return VReg;
297 VReg = DefMI->getOperand(1).getReg();
298 }
299 return VReg;
300}
301
302// Determine if VReg is defined by an instruction that can be folded into a
303// csel instruction. If so, return the folded opcode, and the replacement
304// register.
305static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
306 unsigned *NewVReg = nullptr) {
307 VReg = removeCopies(MRI, VReg);
308 if (!TargetRegisterInfo::isVirtualRegister(VReg))
309 return 0;
310
311 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
312 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
313 unsigned Opc = 0;
314 unsigned SrcOpNum = 0;
315 switch (DefMI->getOpcode()) {
316 case AArch64::ADDSXri:
317 case AArch64::ADDSWri:
318 // if NZCV is used, do not fold.
319 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
320 return 0;
321 // fall-through to ADDXri and ADDWri.
322 case AArch64::ADDXri:
323 case AArch64::ADDWri:
324 // add x, 1 -> csinc.
325 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
326 DefMI->getOperand(3).getImm() != 0)
327 return 0;
328 SrcOpNum = 1;
329 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
330 break;
331
332 case AArch64::ORNXrr:
333 case AArch64::ORNWrr: {
334 // not x -> csinv, represented as orn dst, xzr, src.
335 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
336 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
337 return 0;
338 SrcOpNum = 2;
339 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
340 break;
341 }
342
343 case AArch64::SUBSXrr:
344 case AArch64::SUBSWrr:
345 // if NZCV is used, do not fold.
346 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
347 return 0;
348 // fall-through to SUBXrr and SUBWrr.
349 case AArch64::SUBXrr:
350 case AArch64::SUBWrr: {
351 // neg x -> csneg, represented as sub dst, xzr, src.
352 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
353 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
354 return 0;
355 SrcOpNum = 2;
356 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
357 break;
358 }
359 default:
360 return 0;
361 }
362 assert(Opc && SrcOpNum && "Missing parameters");
363
364 if (NewVReg)
365 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
366 return Opc;
367}
368
369bool AArch64InstrInfo::canInsertSelect(
370 const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
371 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
372 int &FalseCycles) const {
373 // Check register classes.
374 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
375 const TargetRegisterClass *RC =
376 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
377 if (!RC)
378 return false;
379
380 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
381 unsigned ExtraCondLat = Cond.size() != 1;
382
383 // GPRs are handled by csel.
384 // FIXME: Fold in x+1, -x, and ~x when applicable.
385 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
386 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
387 // Single-cycle csel, csinc, csinv, and csneg.
388 CondCycles = 1 + ExtraCondLat;
389 TrueCycles = FalseCycles = 1;
390 if (canFoldIntoCSel(MRI, TrueReg))
391 TrueCycles = 0;
392 else if (canFoldIntoCSel(MRI, FalseReg))
393 FalseCycles = 0;
394 return true;
395 }
396
397 // Scalar floating point is handled by fcsel.
398 // FIXME: Form fabs, fmin, and fmax when applicable.
399 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
400 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
401 CondCycles = 5 + ExtraCondLat;
402 TrueCycles = FalseCycles = 2;
403 return true;
404 }
405
406 // Can't do vectors.
407 return false;
408}
409
410void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
411 MachineBasicBlock::iterator I, DebugLoc DL,
412 unsigned DstReg,
413 const SmallVectorImpl<MachineOperand> &Cond,
414 unsigned TrueReg, unsigned FalseReg) const {
415 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
416
417 // Parse the condition code, see parseCondBranch() above.
418 AArch64CC::CondCode CC;
419 switch (Cond.size()) {
420 default:
421 llvm_unreachable("Unknown condition opcode in Cond");
422 case 1: // b.cc
423 CC = AArch64CC::CondCode(Cond[0].getImm());
424 break;
425 case 3: { // cbz/cbnz
426 // We must insert a compare against 0.
427 bool Is64Bit;
428 switch (Cond[1].getImm()) {
429 default:
430 llvm_unreachable("Unknown branch opcode in Cond");
431 case AArch64::CBZW:
432 Is64Bit = 0;
433 CC = AArch64CC::EQ;
434 break;
435 case AArch64::CBZX:
436 Is64Bit = 1;
437 CC = AArch64CC::EQ;
438 break;
439 case AArch64::CBNZW:
440 Is64Bit = 0;
441 CC = AArch64CC::NE;
442 break;
443 case AArch64::CBNZX:
444 Is64Bit = 1;
445 CC = AArch64CC::NE;
446 break;
447 }
448 unsigned SrcReg = Cond[2].getReg();
449 if (Is64Bit) {
450 // cmp reg, #0 is actually subs xzr, reg, #0.
451 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
452 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
453 .addReg(SrcReg)
454 .addImm(0)
455 .addImm(0);
456 } else {
457 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
458 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
459 .addReg(SrcReg)
460 .addImm(0)
461 .addImm(0);
462 }
463 break;
464 }
465 case 4: { // tbz/tbnz
466 // We must insert a tst instruction.
467 switch (Cond[1].getImm()) {
468 default:
469 llvm_unreachable("Unknown branch opcode in Cond");
470 case AArch64::TBZW:
471 case AArch64::TBZX:
472 CC = AArch64CC::EQ;
473 break;
474 case AArch64::TBNZW:
475 case AArch64::TBNZX:
476 CC = AArch64CC::NE;
477 break;
478 }
479 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
480 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
481 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
482 .addReg(Cond[2].getReg())
483 .addImm(
484 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
485 else
486 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
487 .addReg(Cond[2].getReg())
488 .addImm(
489 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
490 break;
491 }
492 }
493
494 unsigned Opc = 0;
495 const TargetRegisterClass *RC = nullptr;
496 bool TryFold = false;
497 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
498 RC = &AArch64::GPR64RegClass;
499 Opc = AArch64::CSELXr;
500 TryFold = true;
501 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
502 RC = &AArch64::GPR32RegClass;
503 Opc = AArch64::CSELWr;
504 TryFold = true;
505 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
506 RC = &AArch64::FPR64RegClass;
507 Opc = AArch64::FCSELDrrr;
508 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
509 RC = &AArch64::FPR32RegClass;
510 Opc = AArch64::FCSELSrrr;
511 }
512 assert(RC && "Unsupported regclass");
513
514 // Try folding simple instructions into the csel.
515 if (TryFold) {
516 unsigned NewVReg = 0;
517 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
518 if (FoldedOpc) {
519 // The folded opcodes csinc, csinc and csneg apply the operation to
520 // FalseReg, so we need to invert the condition.
521 CC = AArch64CC::getInvertedCondCode(CC);
522 TrueReg = FalseReg;
523 } else
524 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
525
526 // Fold the operation. Leave any dead instructions for DCE to clean up.
527 if (FoldedOpc) {
528 FalseReg = NewVReg;
529 Opc = FoldedOpc;
530 // The extends the live range of NewVReg.
531 MRI.clearKillFlags(NewVReg);
532 }
533 }
534
535 // Pull all virtual register into the appropriate class.
536 MRI.constrainRegClass(TrueReg, RC);
537 MRI.constrainRegClass(FalseReg, RC);
538
539 // Insert the csel.
540 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
541 CC);
542}
543
Jiangning Liucd296372014-07-29 02:09:26 +0000544// FIXME: this implementation should be micro-architecture dependent, so a
545// micro-architecture target hook should be introduced here in future.
546bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
547 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
548 return MI->isAsCheapAsAMove();
549
550 switch (MI->getOpcode()) {
551 default:
552 return false;
553
554 // add/sub on register without shift
555 case AArch64::ADDWri:
556 case AArch64::ADDXri:
557 case AArch64::SUBWri:
558 case AArch64::SUBXri:
559 return (MI->getOperand(3).getImm() == 0);
560
561 // logical ops on immediate
562 case AArch64::ANDWri:
563 case AArch64::ANDXri:
564 case AArch64::EORWri:
565 case AArch64::EORXri:
566 case AArch64::ORRWri:
567 case AArch64::ORRXri:
568 return true;
569
570 // logical ops on register without shift
571 case AArch64::ANDWrr:
572 case AArch64::ANDXrr:
573 case AArch64::BICWrr:
574 case AArch64::BICXrr:
575 case AArch64::EONWrr:
576 case AArch64::EONXrr:
577 case AArch64::EORWrr:
578 case AArch64::EORXrr:
579 case AArch64::ORNWrr:
580 case AArch64::ORNXrr:
581 case AArch64::ORRWrr:
582 case AArch64::ORRXrr:
583 return true;
584 }
585
586 llvm_unreachable("Unknown opcode to check as cheap as a move!");
587}
588
Tim Northover3b0846e2014-05-24 12:50:23 +0000589bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
590 unsigned &SrcReg, unsigned &DstReg,
591 unsigned &SubIdx) const {
592 switch (MI.getOpcode()) {
593 default:
594 return false;
595 case AArch64::SBFMXri: // aka sxtw
596 case AArch64::UBFMXri: // aka uxtw
597 // Check for the 32 -> 64 bit extension case, these instructions can do
598 // much more.
599 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
600 return false;
601 // This is a signed or unsigned 32 -> 64 bit extension.
602 SrcReg = MI.getOperand(1).getReg();
603 DstReg = MI.getOperand(0).getReg();
604 SubIdx = AArch64::sub_32;
605 return true;
606 }
607}
608
609/// analyzeCompare - For a comparison instruction, return the source registers
610/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
611/// Return true if the comparison instruction can be analyzed.
612bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
613 unsigned &SrcReg2, int &CmpMask,
614 int &CmpValue) const {
615 switch (MI->getOpcode()) {
616 default:
617 break;
618 case AArch64::SUBSWrr:
619 case AArch64::SUBSWrs:
620 case AArch64::SUBSWrx:
621 case AArch64::SUBSXrr:
622 case AArch64::SUBSXrs:
623 case AArch64::SUBSXrx:
624 case AArch64::ADDSWrr:
625 case AArch64::ADDSWrs:
626 case AArch64::ADDSWrx:
627 case AArch64::ADDSXrr:
628 case AArch64::ADDSXrs:
629 case AArch64::ADDSXrx:
630 // Replace SUBSWrr with SUBWrr if NZCV is not used.
631 SrcReg = MI->getOperand(1).getReg();
632 SrcReg2 = MI->getOperand(2).getReg();
633 CmpMask = ~0;
634 CmpValue = 0;
635 return true;
636 case AArch64::SUBSWri:
637 case AArch64::ADDSWri:
638 case AArch64::SUBSXri:
639 case AArch64::ADDSXri:
640 SrcReg = MI->getOperand(1).getReg();
641 SrcReg2 = 0;
642 CmpMask = ~0;
643 CmpValue = MI->getOperand(2).getImm();
644 return true;
645 case AArch64::ANDSWri:
646 case AArch64::ANDSXri:
647 // ANDS does not use the same encoding scheme as the others xxxS
648 // instructions.
649 SrcReg = MI->getOperand(1).getReg();
650 SrcReg2 = 0;
651 CmpMask = ~0;
652 CmpValue = AArch64_AM::decodeLogicalImmediate(
653 MI->getOperand(2).getImm(),
654 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64);
655 return true;
656 }
657
658 return false;
659}
660
661static bool UpdateOperandRegClass(MachineInstr *Instr) {
662 MachineBasicBlock *MBB = Instr->getParent();
663 assert(MBB && "Can't get MachineBasicBlock here");
664 MachineFunction *MF = MBB->getParent();
665 assert(MF && "Can't get MachineFunction here");
666 const TargetMachine *TM = &MF->getTarget();
Eric Christopherd9134482014-08-04 21:25:23 +0000667 const TargetInstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
668 const TargetRegisterInfo *TRI = TM->getSubtargetImpl()->getRegisterInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000669 MachineRegisterInfo *MRI = &MF->getRegInfo();
670
671 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
672 ++OpIdx) {
673 MachineOperand &MO = Instr->getOperand(OpIdx);
674 const TargetRegisterClass *OpRegCstraints =
675 Instr->getRegClassConstraint(OpIdx, TII, TRI);
676
677 // If there's no constraint, there's nothing to do.
678 if (!OpRegCstraints)
679 continue;
680 // If the operand is a frame index, there's nothing to do here.
681 // A frame index operand will resolve correctly during PEI.
682 if (MO.isFI())
683 continue;
684
685 assert(MO.isReg() &&
686 "Operand has register constraints without being a register!");
687
688 unsigned Reg = MO.getReg();
689 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
690 if (!OpRegCstraints->contains(Reg))
691 return false;
692 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
693 !MRI->constrainRegClass(Reg, OpRegCstraints))
694 return false;
695 }
696
697 return true;
698}
699
Kevin Qinf31ecf32014-08-04 05:10:33 +0000700/// optimizeCompareInstr - Convert the instruction supplying the argument to the
701/// comparison into one that sets the zero bit in the flags register.
702bool AArch64InstrInfo::optimizeCompareInstr(
703 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
704 int CmpValue, const MachineRegisterInfo *MRI) const {
705
706 // Replace SUBSWrr with SUBWrr if NZCV is not used.
707 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
708 if (Cmp_NZCV != -1) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000709 unsigned NewOpc;
Kevin Qinf31ecf32014-08-04 05:10:33 +0000710 switch (CmpInstr->getOpcode()) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000711 default:
712 return false;
713 case AArch64::ADDSWrr: NewOpc = AArch64::ADDWrr; break;
714 case AArch64::ADDSWri: NewOpc = AArch64::ADDWri; break;
715 case AArch64::ADDSWrs: NewOpc = AArch64::ADDWrs; break;
716 case AArch64::ADDSWrx: NewOpc = AArch64::ADDWrx; break;
717 case AArch64::ADDSXrr: NewOpc = AArch64::ADDXrr; break;
718 case AArch64::ADDSXri: NewOpc = AArch64::ADDXri; break;
719 case AArch64::ADDSXrs: NewOpc = AArch64::ADDXrs; break;
720 case AArch64::ADDSXrx: NewOpc = AArch64::ADDXrx; break;
721 case AArch64::SUBSWrr: NewOpc = AArch64::SUBWrr; break;
722 case AArch64::SUBSWri: NewOpc = AArch64::SUBWri; break;
723 case AArch64::SUBSWrs: NewOpc = AArch64::SUBWrs; break;
724 case AArch64::SUBSWrx: NewOpc = AArch64::SUBWrx; break;
725 case AArch64::SUBSXrr: NewOpc = AArch64::SUBXrr; break;
726 case AArch64::SUBSXri: NewOpc = AArch64::SUBXri; break;
727 case AArch64::SUBSXrs: NewOpc = AArch64::SUBXrs; break;
728 case AArch64::SUBSXrx: NewOpc = AArch64::SUBXrx; break;
729 }
730
731 const MCInstrDesc &MCID = get(NewOpc);
732 CmpInstr->setDesc(MCID);
733 CmpInstr->RemoveOperand(Cmp_NZCV);
734 bool succeeded = UpdateOperandRegClass(CmpInstr);
735 (void)succeeded;
736 assert(succeeded && "Some operands reg class are incompatible!");
737 return true;
738 }
739
740 // Continue only if we have a "ri" where immediate is zero.
741 if (CmpValue != 0 || SrcReg2 != 0)
742 return false;
743
744 // CmpInstr is a Compare instruction if destination register is not used.
745 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
746 return false;
747
748 // Get the unique definition of SrcReg.
749 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
750 if (!MI)
751 return false;
752
753 // We iterate backward, starting from the instruction before CmpInstr and
754 // stop when reaching the definition of the source register or done with the
755 // basic block, to check whether NZCV is used or modified in between.
756 MachineBasicBlock::iterator I = CmpInstr, E = MI,
757 B = CmpInstr->getParent()->begin();
758
759 // Early exit if CmpInstr is at the beginning of the BB.
760 if (I == B)
761 return false;
762
763 // Check whether the definition of SrcReg is in the same basic block as
764 // Compare. If not, we can't optimize away the Compare.
765 if (MI->getParent() != CmpInstr->getParent())
766 return false;
767
768 // Check that NZCV isn't set between the comparison instruction and the one we
769 // want to change.
770 const TargetRegisterInfo *TRI = &getRegisterInfo();
771 for (--I; I != E; --I) {
772 const MachineInstr &Instr = *I;
773
774 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
775 Instr.readsRegister(AArch64::NZCV, TRI))
776 // This instruction modifies or uses NZCV after the one we want to
777 // change. We can't do this transformation.
778 return false;
779 if (I == B)
780 // The 'and' is below the comparison instruction.
781 return false;
782 }
783
784 unsigned NewOpc = MI->getOpcode();
785 switch (MI->getOpcode()) {
786 default:
787 return false;
788 case AArch64::ADDSWrr:
789 case AArch64::ADDSWri:
790 case AArch64::ADDSXrr:
791 case AArch64::ADDSXri:
792 case AArch64::SUBSWrr:
793 case AArch64::SUBSWri:
794 case AArch64::SUBSXrr:
795 case AArch64::SUBSXri:
796 break;
797 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
798 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
799 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
800 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
801 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
802 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
803 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
804 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
805 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
806 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
807 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
808 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
809 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
810 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
811 }
812
813 // Scan forward for the use of NZCV.
814 // When checking against MI: if it's a conditional code requires
815 // checking of V bit, then this is not safe to do.
816 // It is safe to remove CmpInstr if NZCV is redefined or killed.
817 // If we are done with the basic block, we need to check whether NZCV is
818 // live-out.
819 bool IsSafe = false;
820 for (MachineBasicBlock::iterator I = CmpInstr,
821 E = CmpInstr->getParent()->end();
822 !IsSafe && ++I != E;) {
823 const MachineInstr &Instr = *I;
824 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
825 ++IO) {
826 const MachineOperand &MO = Instr.getOperand(IO);
827 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
828 IsSafe = true;
829 break;
830 }
831 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
832 continue;
833 if (MO.isDef()) {
834 IsSafe = true;
835 break;
836 }
837
838 // Decode the condition code.
839 unsigned Opc = Instr.getOpcode();
840 AArch64CC::CondCode CC;
841 switch (Opc) {
842 default:
843 return false;
844 case AArch64::Bcc:
845 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
846 break;
847 case AArch64::CSINVWr:
848 case AArch64::CSINVXr:
849 case AArch64::CSINCWr:
850 case AArch64::CSINCXr:
851 case AArch64::CSELWr:
852 case AArch64::CSELXr:
853 case AArch64::CSNEGWr:
854 case AArch64::CSNEGXr:
855 case AArch64::FCSELSrrr:
856 case AArch64::FCSELDrrr:
857 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
858 break;
859 }
860
861 // It is not safe to remove Compare instruction if Overflow(V) is used.
862 switch (CC) {
863 default:
864 // NZCV can be used multiple times, we should continue.
865 break;
866 case AArch64CC::VS:
867 case AArch64CC::VC:
868 case AArch64CC::GE:
869 case AArch64CC::LT:
870 case AArch64CC::GT:
871 case AArch64CC::LE:
872 return false;
873 }
874 }
875 }
876
877 // If NZCV is not killed nor re-defined, we should check whether it is
878 // live-out. If it is live-out, do not optimize.
879 if (!IsSafe) {
880 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
881 for (auto *MBB : ParentBlock->successors())
882 if (MBB->isLiveIn(AArch64::NZCV))
883 return false;
884 }
885
886 // Update the instruction to set NZCV.
887 MI->setDesc(get(NewOpc));
888 CmpInstr->eraseFromParent();
889 bool succeeded = UpdateOperandRegClass(MI);
890 (void)succeeded;
891 assert(succeeded && "Some operands reg class are incompatible!");
892 MI->addRegisterDefined(AArch64::NZCV, TRI);
893 return true;
894}
895
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000896bool
897AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
898 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
899 return false;
900
901 MachineBasicBlock &MBB = *MI->getParent();
902 DebugLoc DL = MI->getDebugLoc();
903 unsigned Reg = MI->getOperand(0).getReg();
904 const GlobalValue *GV =
905 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
906 const TargetMachine &TM = MBB.getParent()->getTarget();
907 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
908 const unsigned char MO_NC = AArch64II::MO_NC;
909
910 if ((OpFlags & AArch64II::MO_GOT) != 0) {
911 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
912 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
913 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
914 .addReg(Reg, RegState::Kill).addImm(0)
915 .addMemOperand(*MI->memoperands_begin());
916 } else if (TM.getCodeModel() == CodeModel::Large) {
917 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
918 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
919 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
920 .addReg(Reg, RegState::Kill)
921 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
922 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
923 .addReg(Reg, RegState::Kill)
924 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
925 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
926 .addReg(Reg, RegState::Kill)
927 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
928 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
929 .addReg(Reg, RegState::Kill).addImm(0)
930 .addMemOperand(*MI->memoperands_begin());
931 } else {
932 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
933 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
934 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
935 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
936 .addReg(Reg, RegState::Kill)
937 .addGlobalAddress(GV, 0, LoFlags)
938 .addMemOperand(*MI->memoperands_begin());
939 }
940
941 MBB.erase(MI);
942
943 return true;
944}
945
Tim Northover3b0846e2014-05-24 12:50:23 +0000946/// Return true if this is this instruction has a non-zero immediate
947bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
948 switch (MI->getOpcode()) {
949 default:
950 break;
951 case AArch64::ADDSWrs:
952 case AArch64::ADDSXrs:
953 case AArch64::ADDWrs:
954 case AArch64::ADDXrs:
955 case AArch64::ANDSWrs:
956 case AArch64::ANDSXrs:
957 case AArch64::ANDWrs:
958 case AArch64::ANDXrs:
959 case AArch64::BICSWrs:
960 case AArch64::BICSXrs:
961 case AArch64::BICWrs:
962 case AArch64::BICXrs:
963 case AArch64::CRC32Brr:
964 case AArch64::CRC32CBrr:
965 case AArch64::CRC32CHrr:
966 case AArch64::CRC32CWrr:
967 case AArch64::CRC32CXrr:
968 case AArch64::CRC32Hrr:
969 case AArch64::CRC32Wrr:
970 case AArch64::CRC32Xrr:
971 case AArch64::EONWrs:
972 case AArch64::EONXrs:
973 case AArch64::EORWrs:
974 case AArch64::EORXrs:
975 case AArch64::ORNWrs:
976 case AArch64::ORNXrs:
977 case AArch64::ORRWrs:
978 case AArch64::ORRXrs:
979 case AArch64::SUBSWrs:
980 case AArch64::SUBSXrs:
981 case AArch64::SUBWrs:
982 case AArch64::SUBXrs:
983 if (MI->getOperand(3).isImm()) {
984 unsigned val = MI->getOperand(3).getImm();
985 return (val != 0);
986 }
987 break;
988 }
989 return false;
990}
991
992/// Return true if this is this instruction has a non-zero immediate
993bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
994 switch (MI->getOpcode()) {
995 default:
996 break;
997 case AArch64::ADDSWrx:
998 case AArch64::ADDSXrx:
999 case AArch64::ADDSXrx64:
1000 case AArch64::ADDWrx:
1001 case AArch64::ADDXrx:
1002 case AArch64::ADDXrx64:
1003 case AArch64::SUBSWrx:
1004 case AArch64::SUBSXrx:
1005 case AArch64::SUBSXrx64:
1006 case AArch64::SUBWrx:
1007 case AArch64::SUBXrx:
1008 case AArch64::SUBXrx64:
1009 if (MI->getOperand(3).isImm()) {
1010 unsigned val = MI->getOperand(3).getImm();
1011 return (val != 0);
1012 }
1013 break;
1014 }
1015
1016 return false;
1017}
1018
1019// Return true if this instruction simply sets its single destination register
1020// to zero. This is equivalent to a register rename of the zero-register.
1021bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1022 switch (MI->getOpcode()) {
1023 default:
1024 break;
1025 case AArch64::MOVZWi:
1026 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1027 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1028 assert(MI->getDesc().getNumOperands() == 3 &&
1029 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1030 return true;
1031 }
1032 break;
1033 case AArch64::ANDWri: // and Rd, Rzr, #imm
1034 return MI->getOperand(1).getReg() == AArch64::WZR;
1035 case AArch64::ANDXri:
1036 return MI->getOperand(1).getReg() == AArch64::XZR;
1037 case TargetOpcode::COPY:
1038 return MI->getOperand(1).getReg() == AArch64::WZR;
1039 }
1040 return false;
1041}
1042
1043// Return true if this instruction simply renames a general register without
1044// modifying bits.
1045bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1046 switch (MI->getOpcode()) {
1047 default:
1048 break;
1049 case TargetOpcode::COPY: {
1050 // GPR32 copies will by lowered to ORRXrs
1051 unsigned DstReg = MI->getOperand(0).getReg();
1052 return (AArch64::GPR32RegClass.contains(DstReg) ||
1053 AArch64::GPR64RegClass.contains(DstReg));
1054 }
1055 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1056 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1057 assert(MI->getDesc().getNumOperands() == 4 &&
1058 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1059 return true;
1060 }
Renato Golin541d7e72014-08-01 17:27:31 +00001061 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001062 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1063 if (MI->getOperand(2).getImm() == 0) {
1064 assert(MI->getDesc().getNumOperands() == 4 &&
1065 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1066 return true;
1067 }
Renato Golin541d7e72014-08-01 17:27:31 +00001068 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001069 }
1070 return false;
1071}
1072
1073// Return true if this instruction simply renames a general register without
1074// modifying bits.
1075bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1076 switch (MI->getOpcode()) {
1077 default:
1078 break;
1079 case TargetOpcode::COPY: {
1080 // FPR64 copies will by lowered to ORR.16b
1081 unsigned DstReg = MI->getOperand(0).getReg();
1082 return (AArch64::FPR64RegClass.contains(DstReg) ||
1083 AArch64::FPR128RegClass.contains(DstReg));
1084 }
1085 case AArch64::ORRv16i8:
1086 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1087 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1088 "invalid ORRv16i8 operands");
1089 return true;
1090 }
Renato Golin541d7e72014-08-01 17:27:31 +00001091 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00001092 }
1093 return false;
1094}
1095
1096unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1097 int &FrameIndex) const {
1098 switch (MI->getOpcode()) {
1099 default:
1100 break;
1101 case AArch64::LDRWui:
1102 case AArch64::LDRXui:
1103 case AArch64::LDRBui:
1104 case AArch64::LDRHui:
1105 case AArch64::LDRSui:
1106 case AArch64::LDRDui:
1107 case AArch64::LDRQui:
1108 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1109 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1110 FrameIndex = MI->getOperand(1).getIndex();
1111 return MI->getOperand(0).getReg();
1112 }
1113 break;
1114 }
1115
1116 return 0;
1117}
1118
1119unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1120 int &FrameIndex) const {
1121 switch (MI->getOpcode()) {
1122 default:
1123 break;
1124 case AArch64::STRWui:
1125 case AArch64::STRXui:
1126 case AArch64::STRBui:
1127 case AArch64::STRHui:
1128 case AArch64::STRSui:
1129 case AArch64::STRDui:
1130 case AArch64::STRQui:
1131 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1132 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1133 FrameIndex = MI->getOperand(1).getIndex();
1134 return MI->getOperand(0).getReg();
1135 }
1136 break;
1137 }
1138 return 0;
1139}
1140
1141/// Return true if this is load/store scales or extends its register offset.
1142/// This refers to scaling a dynamic index as opposed to scaled immediates.
1143/// MI should be a memory op that allows scaled addressing.
1144bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1145 switch (MI->getOpcode()) {
1146 default:
1147 break;
1148 case AArch64::LDRBBroW:
1149 case AArch64::LDRBroW:
1150 case AArch64::LDRDroW:
1151 case AArch64::LDRHHroW:
1152 case AArch64::LDRHroW:
1153 case AArch64::LDRQroW:
1154 case AArch64::LDRSBWroW:
1155 case AArch64::LDRSBXroW:
1156 case AArch64::LDRSHWroW:
1157 case AArch64::LDRSHXroW:
1158 case AArch64::LDRSWroW:
1159 case AArch64::LDRSroW:
1160 case AArch64::LDRWroW:
1161 case AArch64::LDRXroW:
1162 case AArch64::STRBBroW:
1163 case AArch64::STRBroW:
1164 case AArch64::STRDroW:
1165 case AArch64::STRHHroW:
1166 case AArch64::STRHroW:
1167 case AArch64::STRQroW:
1168 case AArch64::STRSroW:
1169 case AArch64::STRWroW:
1170 case AArch64::STRXroW:
1171 case AArch64::LDRBBroX:
1172 case AArch64::LDRBroX:
1173 case AArch64::LDRDroX:
1174 case AArch64::LDRHHroX:
1175 case AArch64::LDRHroX:
1176 case AArch64::LDRQroX:
1177 case AArch64::LDRSBWroX:
1178 case AArch64::LDRSBXroX:
1179 case AArch64::LDRSHWroX:
1180 case AArch64::LDRSHXroX:
1181 case AArch64::LDRSWroX:
1182 case AArch64::LDRSroX:
1183 case AArch64::LDRWroX:
1184 case AArch64::LDRXroX:
1185 case AArch64::STRBBroX:
1186 case AArch64::STRBroX:
1187 case AArch64::STRDroX:
1188 case AArch64::STRHHroX:
1189 case AArch64::STRHroX:
1190 case AArch64::STRQroX:
1191 case AArch64::STRSroX:
1192 case AArch64::STRWroX:
1193 case AArch64::STRXroX:
1194
1195 unsigned Val = MI->getOperand(3).getImm();
1196 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1197 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1198 }
1199 return false;
1200}
1201
1202/// Check all MachineMemOperands for a hint to suppress pairing.
1203bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1204 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1205 "Too many target MO flags");
1206 for (auto *MM : MI->memoperands()) {
1207 if (MM->getFlags() &
1208 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1209 return true;
1210 }
1211 }
1212 return false;
1213}
1214
1215/// Set a flag on the first MachineMemOperand to suppress pairing.
1216void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1217 if (MI->memoperands_empty())
1218 return;
1219
1220 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1221 "Too many target MO flags");
1222 (*MI->memoperands_begin())
1223 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1224}
1225
1226bool
1227AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1228 unsigned &Offset,
1229 const TargetRegisterInfo *TRI) const {
1230 switch (LdSt->getOpcode()) {
1231 default:
1232 return false;
1233 case AArch64::STRSui:
1234 case AArch64::STRDui:
1235 case AArch64::STRQui:
1236 case AArch64::STRXui:
1237 case AArch64::STRWui:
1238 case AArch64::LDRSui:
1239 case AArch64::LDRDui:
1240 case AArch64::LDRQui:
1241 case AArch64::LDRXui:
1242 case AArch64::LDRWui:
1243 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1244 return false;
1245 BaseReg = LdSt->getOperand(1).getReg();
1246 MachineFunction &MF = *LdSt->getParent()->getParent();
1247 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1248 Offset = LdSt->getOperand(2).getImm() * Width;
1249 return true;
1250 };
1251}
1252
1253/// Detect opportunities for ldp/stp formation.
1254///
1255/// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
1256bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1257 MachineInstr *SecondLdSt,
1258 unsigned NumLoads) const {
1259 // Only cluster up to a single pair.
1260 if (NumLoads > 1)
1261 return false;
1262 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1263 return false;
1264 // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
1265 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1266 // Allow 6 bits of positive range.
1267 if (Ofs1 > 64)
1268 return false;
1269 // The caller should already have ordered First/SecondLdSt by offset.
1270 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1271 return Ofs1 + 1 == Ofs2;
1272}
1273
1274bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1275 MachineInstr *Second) const {
1276 // Cyclone can fuse CMN, CMP followed by Bcc.
1277
1278 // FIXME: B0 can also fuse:
1279 // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
1280 if (Second->getOpcode() != AArch64::Bcc)
1281 return false;
1282 switch (First->getOpcode()) {
1283 default:
1284 return false;
1285 case AArch64::SUBSWri:
1286 case AArch64::ADDSWri:
1287 case AArch64::ANDSWri:
1288 case AArch64::SUBSXri:
1289 case AArch64::ADDSXri:
1290 case AArch64::ANDSXri:
1291 return true;
1292 }
1293}
1294
1295MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
1296 int FrameIx,
1297 uint64_t Offset,
1298 const MDNode *MDPtr,
1299 DebugLoc DL) const {
1300 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1301 .addFrameIndex(FrameIx)
1302 .addImm(0)
1303 .addImm(Offset)
1304 .addMetadata(MDPtr);
1305 return &*MIB;
1306}
1307
1308static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1309 unsigned Reg, unsigned SubIdx,
1310 unsigned State,
1311 const TargetRegisterInfo *TRI) {
1312 if (!SubIdx)
1313 return MIB.addReg(Reg, State);
1314
1315 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1316 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1317 return MIB.addReg(Reg, State, SubIdx);
1318}
1319
1320static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1321 unsigned NumRegs) {
1322 // We really want the positive remainder mod 32 here, that happens to be
1323 // easily obtainable with a mask.
1324 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1325}
1326
1327void AArch64InstrInfo::copyPhysRegTuple(
1328 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1329 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1330 llvm::ArrayRef<unsigned> Indices) const {
Eric Christopher58f32662014-06-10 22:57:21 +00001331 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001332 "Unexpected register copy without NEON");
1333 const TargetRegisterInfo *TRI = &getRegisterInfo();
1334 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1335 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1336 unsigned NumRegs = Indices.size();
1337
1338 int SubReg = 0, End = NumRegs, Incr = 1;
1339 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1340 SubReg = NumRegs - 1;
1341 End = -1;
1342 Incr = -1;
1343 }
1344
1345 for (; SubReg != End; SubReg += Incr) {
1346 const MachineInstrBuilder &MIB = BuildMI(MBB, I, DL, get(Opcode));
1347 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1348 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1349 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1350 }
1351}
1352
1353void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1354 MachineBasicBlock::iterator I, DebugLoc DL,
1355 unsigned DestReg, unsigned SrcReg,
1356 bool KillSrc) const {
1357 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1358 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1359 const TargetRegisterInfo *TRI = &getRegisterInfo();
1360
1361 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1362 // If either operand is WSP, expand to ADD #0.
1363 if (Subtarget.hasZeroCycleRegMove()) {
1364 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1365 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1366 &AArch64::GPR64spRegClass);
1367 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1368 &AArch64::GPR64spRegClass);
1369 // This instruction is reading and writing X registers. This may upset
1370 // the register scavenger and machine verifier, so we need to indicate
1371 // that we are reading an undefined value from SrcRegX, but a proper
1372 // value from SrcReg.
1373 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1374 .addReg(SrcRegX, RegState::Undef)
1375 .addImm(0)
1376 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1377 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1378 } else {
1379 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1380 .addReg(SrcReg, getKillRegState(KillSrc))
1381 .addImm(0)
1382 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1383 }
1384 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1385 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1386 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1387 } else {
1388 if (Subtarget.hasZeroCycleRegMove()) {
1389 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1390 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1391 &AArch64::GPR64spRegClass);
1392 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1393 &AArch64::GPR64spRegClass);
1394 // This instruction is reading and writing X registers. This may upset
1395 // the register scavenger and machine verifier, so we need to indicate
1396 // that we are reading an undefined value from SrcRegX, but a proper
1397 // value from SrcReg.
1398 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1399 .addReg(AArch64::XZR)
1400 .addReg(SrcRegX, RegState::Undef)
1401 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1402 } else {
1403 // Otherwise, expand to ORR WZR.
1404 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1405 .addReg(AArch64::WZR)
1406 .addReg(SrcReg, getKillRegState(KillSrc));
1407 }
1408 }
1409 return;
1410 }
1411
1412 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1413 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1414 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1415 // If either operand is SP, expand to ADD #0.
1416 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1417 .addReg(SrcReg, getKillRegState(KillSrc))
1418 .addImm(0)
1419 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1420 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1421 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1422 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1423 } else {
1424 // Otherwise, expand to ORR XZR.
1425 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1426 .addReg(AArch64::XZR)
1427 .addReg(SrcReg, getKillRegState(KillSrc));
1428 }
1429 return;
1430 }
1431
1432 // Copy a DDDD register quad by copying the individual sub-registers.
1433 if (AArch64::DDDDRegClass.contains(DestReg) &&
1434 AArch64::DDDDRegClass.contains(SrcReg)) {
1435 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1436 AArch64::dsub2, AArch64::dsub3 };
1437 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1438 Indices);
1439 return;
1440 }
1441
1442 // Copy a DDD register triple by copying the individual sub-registers.
1443 if (AArch64::DDDRegClass.contains(DestReg) &&
1444 AArch64::DDDRegClass.contains(SrcReg)) {
1445 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1446 AArch64::dsub2 };
1447 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1448 Indices);
1449 return;
1450 }
1451
1452 // Copy a DD register pair by copying the individual sub-registers.
1453 if (AArch64::DDRegClass.contains(DestReg) &&
1454 AArch64::DDRegClass.contains(SrcReg)) {
1455 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1456 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1457 Indices);
1458 return;
1459 }
1460
1461 // Copy a QQQQ register quad by copying the individual sub-registers.
1462 if (AArch64::QQQQRegClass.contains(DestReg) &&
1463 AArch64::QQQQRegClass.contains(SrcReg)) {
1464 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1465 AArch64::qsub2, AArch64::qsub3 };
1466 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1467 Indices);
1468 return;
1469 }
1470
1471 // Copy a QQQ register triple by copying the individual sub-registers.
1472 if (AArch64::QQQRegClass.contains(DestReg) &&
1473 AArch64::QQQRegClass.contains(SrcReg)) {
1474 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1475 AArch64::qsub2 };
1476 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1477 Indices);
1478 return;
1479 }
1480
1481 // Copy a QQ register pair by copying the individual sub-registers.
1482 if (AArch64::QQRegClass.contains(DestReg) &&
1483 AArch64::QQRegClass.contains(SrcReg)) {
1484 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1485 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1486 Indices);
1487 return;
1488 }
1489
1490 if (AArch64::FPR128RegClass.contains(DestReg) &&
1491 AArch64::FPR128RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001492 if(Subtarget.hasNEON()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001493 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1494 .addReg(SrcReg)
1495 .addReg(SrcReg, getKillRegState(KillSrc));
1496 } else {
1497 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1498 .addReg(AArch64::SP, RegState::Define)
1499 .addReg(SrcReg, getKillRegState(KillSrc))
1500 .addReg(AArch64::SP)
1501 .addImm(-16);
1502 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1503 .addReg(AArch64::SP, RegState::Define)
1504 .addReg(DestReg, RegState::Define)
1505 .addReg(AArch64::SP)
1506 .addImm(16);
1507 }
1508 return;
1509 }
1510
1511 if (AArch64::FPR64RegClass.contains(DestReg) &&
1512 AArch64::FPR64RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001513 if(Subtarget.hasNEON()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001514 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1515 &AArch64::FPR128RegClass);
1516 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1517 &AArch64::FPR128RegClass);
1518 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1519 .addReg(SrcReg)
1520 .addReg(SrcReg, getKillRegState(KillSrc));
1521 } else {
1522 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1523 .addReg(SrcReg, getKillRegState(KillSrc));
1524 }
1525 return;
1526 }
1527
1528 if (AArch64::FPR32RegClass.contains(DestReg) &&
1529 AArch64::FPR32RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001530 if(Subtarget.hasNEON()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001531 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1532 &AArch64::FPR128RegClass);
1533 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1534 &AArch64::FPR128RegClass);
1535 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1536 .addReg(SrcReg)
1537 .addReg(SrcReg, getKillRegState(KillSrc));
1538 } else {
1539 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1540 .addReg(SrcReg, getKillRegState(KillSrc));
1541 }
1542 return;
1543 }
1544
1545 if (AArch64::FPR16RegClass.contains(DestReg) &&
1546 AArch64::FPR16RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001547 if(Subtarget.hasNEON()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001548 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1549 &AArch64::FPR128RegClass);
1550 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1551 &AArch64::FPR128RegClass);
1552 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1553 .addReg(SrcReg)
1554 .addReg(SrcReg, getKillRegState(KillSrc));
1555 } else {
1556 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1557 &AArch64::FPR32RegClass);
1558 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1559 &AArch64::FPR32RegClass);
1560 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1561 .addReg(SrcReg, getKillRegState(KillSrc));
1562 }
1563 return;
1564 }
1565
1566 if (AArch64::FPR8RegClass.contains(DestReg) &&
1567 AArch64::FPR8RegClass.contains(SrcReg)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001568 if(Subtarget.hasNEON()) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001569 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1570 &AArch64::FPR128RegClass);
1571 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1572 &AArch64::FPR128RegClass);
1573 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1574 .addReg(SrcReg)
1575 .addReg(SrcReg, getKillRegState(KillSrc));
1576 } else {
1577 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1578 &AArch64::FPR32RegClass);
1579 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1580 &AArch64::FPR32RegClass);
1581 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1582 .addReg(SrcReg, getKillRegState(KillSrc));
1583 }
1584 return;
1585 }
1586
1587 // Copies between GPR64 and FPR64.
1588 if (AArch64::FPR64RegClass.contains(DestReg) &&
1589 AArch64::GPR64RegClass.contains(SrcReg)) {
1590 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1591 .addReg(SrcReg, getKillRegState(KillSrc));
1592 return;
1593 }
1594 if (AArch64::GPR64RegClass.contains(DestReg) &&
1595 AArch64::FPR64RegClass.contains(SrcReg)) {
1596 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1597 .addReg(SrcReg, getKillRegState(KillSrc));
1598 return;
1599 }
1600 // Copies between GPR32 and FPR32.
1601 if (AArch64::FPR32RegClass.contains(DestReg) &&
1602 AArch64::GPR32RegClass.contains(SrcReg)) {
1603 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1604 .addReg(SrcReg, getKillRegState(KillSrc));
1605 return;
1606 }
1607 if (AArch64::GPR32RegClass.contains(DestReg) &&
1608 AArch64::FPR32RegClass.contains(SrcReg)) {
1609 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1610 .addReg(SrcReg, getKillRegState(KillSrc));
1611 return;
1612 }
1613
Tim Northover1bed9af2014-05-27 12:16:02 +00001614 if (DestReg == AArch64::NZCV) {
1615 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1616 BuildMI(MBB, I, DL, get(AArch64::MSR))
1617 .addImm(AArch64SysReg::NZCV)
1618 .addReg(SrcReg, getKillRegState(KillSrc))
1619 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1620 return;
1621 }
1622
1623 if (SrcReg == AArch64::NZCV) {
1624 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1625 BuildMI(MBB, I, DL, get(AArch64::MRS))
1626 .addReg(DestReg)
1627 .addImm(AArch64SysReg::NZCV)
1628 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1629 return;
1630 }
1631
1632 llvm_unreachable("unimplemented reg-to-reg copy");
Tim Northover3b0846e2014-05-24 12:50:23 +00001633}
1634
1635void AArch64InstrInfo::storeRegToStackSlot(
1636 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1637 bool isKill, int FI, const TargetRegisterClass *RC,
1638 const TargetRegisterInfo *TRI) const {
1639 DebugLoc DL;
1640 if (MBBI != MBB.end())
1641 DL = MBBI->getDebugLoc();
1642 MachineFunction &MF = *MBB.getParent();
1643 MachineFrameInfo &MFI = *MF.getFrameInfo();
1644 unsigned Align = MFI.getObjectAlignment(FI);
1645
1646 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1647 MachineMemOperand *MMO = MF.getMachineMemOperand(
1648 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1649 unsigned Opc = 0;
1650 bool Offset = true;
1651 switch (RC->getSize()) {
1652 case 1:
1653 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1654 Opc = AArch64::STRBui;
1655 break;
1656 case 2:
1657 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1658 Opc = AArch64::STRHui;
1659 break;
1660 case 4:
1661 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1662 Opc = AArch64::STRWui;
1663 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1664 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1665 else
1666 assert(SrcReg != AArch64::WSP);
1667 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1668 Opc = AArch64::STRSui;
1669 break;
1670 case 8:
1671 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1672 Opc = AArch64::STRXui;
1673 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1674 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1675 else
1676 assert(SrcReg != AArch64::SP);
1677 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1678 Opc = AArch64::STRDui;
1679 break;
1680 case 16:
1681 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1682 Opc = AArch64::STRQui;
1683 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001684 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001685 "Unexpected register store without NEON");
1686 Opc = AArch64::ST1Twov1d, Offset = false;
1687 }
1688 break;
1689 case 24:
1690 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001691 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001692 "Unexpected register store without NEON");
1693 Opc = AArch64::ST1Threev1d, Offset = false;
1694 }
1695 break;
1696 case 32:
1697 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001698 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001699 "Unexpected register store without NEON");
1700 Opc = AArch64::ST1Fourv1d, Offset = false;
1701 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001702 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001703 "Unexpected register store without NEON");
1704 Opc = AArch64::ST1Twov2d, Offset = false;
1705 }
1706 break;
1707 case 48:
1708 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001709 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001710 "Unexpected register store without NEON");
1711 Opc = AArch64::ST1Threev2d, Offset = false;
1712 }
1713 break;
1714 case 64:
1715 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001716 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001717 "Unexpected register store without NEON");
1718 Opc = AArch64::ST1Fourv2d, Offset = false;
1719 }
1720 break;
1721 }
1722 assert(Opc && "Unknown register class");
1723
1724 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1725 .addReg(SrcReg, getKillRegState(isKill))
1726 .addFrameIndex(FI);
1727
1728 if (Offset)
1729 MI.addImm(0);
1730 MI.addMemOperand(MMO);
1731}
1732
1733void AArch64InstrInfo::loadRegFromStackSlot(
1734 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1735 int FI, const TargetRegisterClass *RC,
1736 const TargetRegisterInfo *TRI) const {
1737 DebugLoc DL;
1738 if (MBBI != MBB.end())
1739 DL = MBBI->getDebugLoc();
1740 MachineFunction &MF = *MBB.getParent();
1741 MachineFrameInfo &MFI = *MF.getFrameInfo();
1742 unsigned Align = MFI.getObjectAlignment(FI);
1743 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1744 MachineMemOperand *MMO = MF.getMachineMemOperand(
1745 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1746
1747 unsigned Opc = 0;
1748 bool Offset = true;
1749 switch (RC->getSize()) {
1750 case 1:
1751 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1752 Opc = AArch64::LDRBui;
1753 break;
1754 case 2:
1755 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1756 Opc = AArch64::LDRHui;
1757 break;
1758 case 4:
1759 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1760 Opc = AArch64::LDRWui;
1761 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1762 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1763 else
1764 assert(DestReg != AArch64::WSP);
1765 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1766 Opc = AArch64::LDRSui;
1767 break;
1768 case 8:
1769 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1770 Opc = AArch64::LDRXui;
1771 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1772 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1773 else
1774 assert(DestReg != AArch64::SP);
1775 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1776 Opc = AArch64::LDRDui;
1777 break;
1778 case 16:
1779 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1780 Opc = AArch64::LDRQui;
1781 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001782 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001783 "Unexpected register load without NEON");
1784 Opc = AArch64::LD1Twov1d, Offset = false;
1785 }
1786 break;
1787 case 24:
1788 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001789 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001790 "Unexpected register load without NEON");
1791 Opc = AArch64::LD1Threev1d, Offset = false;
1792 }
1793 break;
1794 case 32:
1795 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001796 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001797 "Unexpected register load without NEON");
1798 Opc = AArch64::LD1Fourv1d, Offset = false;
1799 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001800 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001801 "Unexpected register load without NEON");
1802 Opc = AArch64::LD1Twov2d, Offset = false;
1803 }
1804 break;
1805 case 48:
1806 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001807 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001808 "Unexpected register load without NEON");
1809 Opc = AArch64::LD1Threev2d, Offset = false;
1810 }
1811 break;
1812 case 64:
1813 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
Eric Christopher58f32662014-06-10 22:57:21 +00001814 assert(Subtarget.hasNEON() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001815 "Unexpected register load without NEON");
1816 Opc = AArch64::LD1Fourv2d, Offset = false;
1817 }
1818 break;
1819 }
1820 assert(Opc && "Unknown register class");
1821
1822 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1823 .addReg(DestReg, getDefRegState(true))
1824 .addFrameIndex(FI);
1825 if (Offset)
1826 MI.addImm(0);
1827 MI.addMemOperand(MMO);
1828}
1829
1830void llvm::emitFrameOffset(MachineBasicBlock &MBB,
1831 MachineBasicBlock::iterator MBBI, DebugLoc DL,
1832 unsigned DestReg, unsigned SrcReg, int Offset,
Eric Christopherbc76b972014-06-10 17:33:39 +00001833 const TargetInstrInfo *TII,
Tim Northover3b0846e2014-05-24 12:50:23 +00001834 MachineInstr::MIFlag Flag, bool SetNZCV) {
1835 if (DestReg == SrcReg && Offset == 0)
1836 return;
1837
1838 bool isSub = Offset < 0;
1839 if (isSub)
1840 Offset = -Offset;
1841
1842 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
1843 // scratch register. If DestReg is a virtual register, use it as the
1844 // scratch register; otherwise, create a new virtual register (to be
1845 // replaced by the scavenger at the end of PEI). That case can be optimized
1846 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
1847 // register can be loaded with offset%8 and the add/sub can use an extending
1848 // instruction with LSL#3.
1849 // Currently the function handles any offsets but generates a poor sequence
1850 // of code.
1851 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
1852
1853 unsigned Opc;
1854 if (SetNZCV)
1855 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
1856 else
1857 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
1858 const unsigned MaxEncoding = 0xfff;
1859 const unsigned ShiftSize = 12;
1860 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
1861 while (((unsigned)Offset) >= (1 << ShiftSize)) {
1862 unsigned ThisVal;
1863 if (((unsigned)Offset) > MaxEncodableValue) {
1864 ThisVal = MaxEncodableValue;
1865 } else {
1866 ThisVal = Offset & MaxEncodableValue;
1867 }
1868 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
1869 "Encoding cannot handle value that big");
1870 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1871 .addReg(SrcReg)
1872 .addImm(ThisVal >> ShiftSize)
1873 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
1874 .setMIFlag(Flag);
1875
1876 SrcReg = DestReg;
1877 Offset -= ThisVal;
1878 if (Offset == 0)
1879 return;
1880 }
1881 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1882 .addReg(SrcReg)
1883 .addImm(Offset)
1884 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1885 .setMIFlag(Flag);
1886}
1887
1888MachineInstr *
1889AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
1890 const SmallVectorImpl<unsigned> &Ops,
1891 int FrameIndex) const {
1892 // This is a bit of a hack. Consider this instruction:
1893 //
1894 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
1895 //
1896 // We explicitly chose GPR64all for the virtual register so such a copy might
1897 // be eliminated by RegisterCoalescer. However, that may not be possible, and
1898 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
1899 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
1900 //
1901 // To prevent that, we are going to constrain the %vreg0 register class here.
1902 //
1903 // <rdar://problem/11522048>
1904 //
1905 if (MI->isCopy()) {
1906 unsigned DstReg = MI->getOperand(0).getReg();
1907 unsigned SrcReg = MI->getOperand(1).getReg();
1908 if (SrcReg == AArch64::SP &&
1909 TargetRegisterInfo::isVirtualRegister(DstReg)) {
1910 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
1911 return nullptr;
1912 }
1913 if (DstReg == AArch64::SP &&
1914 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
1915 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1916 return nullptr;
1917 }
1918 }
1919
1920 // Cannot fold.
1921 return nullptr;
1922}
1923
1924int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
1925 bool *OutUseUnscaledOp,
1926 unsigned *OutUnscaledOp,
1927 int *EmittableOffset) {
1928 int Scale = 1;
1929 bool IsSigned = false;
1930 // The ImmIdx should be changed case by case if it is not 2.
1931 unsigned ImmIdx = 2;
1932 unsigned UnscaledOp = 0;
1933 // Set output values in case of early exit.
1934 if (EmittableOffset)
1935 *EmittableOffset = 0;
1936 if (OutUseUnscaledOp)
1937 *OutUseUnscaledOp = false;
1938 if (OutUnscaledOp)
1939 *OutUnscaledOp = 0;
1940 switch (MI.getOpcode()) {
1941 default:
Craig Topper2a30d782014-06-18 05:05:13 +00001942 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
Tim Northover3b0846e2014-05-24 12:50:23 +00001943 // Vector spills/fills can't take an immediate offset.
1944 case AArch64::LD1Twov2d:
1945 case AArch64::LD1Threev2d:
1946 case AArch64::LD1Fourv2d:
1947 case AArch64::LD1Twov1d:
1948 case AArch64::LD1Threev1d:
1949 case AArch64::LD1Fourv1d:
1950 case AArch64::ST1Twov2d:
1951 case AArch64::ST1Threev2d:
1952 case AArch64::ST1Fourv2d:
1953 case AArch64::ST1Twov1d:
1954 case AArch64::ST1Threev1d:
1955 case AArch64::ST1Fourv1d:
1956 return AArch64FrameOffsetCannotUpdate;
1957 case AArch64::PRFMui:
1958 Scale = 8;
1959 UnscaledOp = AArch64::PRFUMi;
1960 break;
1961 case AArch64::LDRXui:
1962 Scale = 8;
1963 UnscaledOp = AArch64::LDURXi;
1964 break;
1965 case AArch64::LDRWui:
1966 Scale = 4;
1967 UnscaledOp = AArch64::LDURWi;
1968 break;
1969 case AArch64::LDRBui:
1970 Scale = 1;
1971 UnscaledOp = AArch64::LDURBi;
1972 break;
1973 case AArch64::LDRHui:
1974 Scale = 2;
1975 UnscaledOp = AArch64::LDURHi;
1976 break;
1977 case AArch64::LDRSui:
1978 Scale = 4;
1979 UnscaledOp = AArch64::LDURSi;
1980 break;
1981 case AArch64::LDRDui:
1982 Scale = 8;
1983 UnscaledOp = AArch64::LDURDi;
1984 break;
1985 case AArch64::LDRQui:
1986 Scale = 16;
1987 UnscaledOp = AArch64::LDURQi;
1988 break;
1989 case AArch64::LDRBBui:
1990 Scale = 1;
1991 UnscaledOp = AArch64::LDURBBi;
1992 break;
1993 case AArch64::LDRHHui:
1994 Scale = 2;
1995 UnscaledOp = AArch64::LDURHHi;
1996 break;
1997 case AArch64::LDRSBXui:
1998 Scale = 1;
1999 UnscaledOp = AArch64::LDURSBXi;
2000 break;
2001 case AArch64::LDRSBWui:
2002 Scale = 1;
2003 UnscaledOp = AArch64::LDURSBWi;
2004 break;
2005 case AArch64::LDRSHXui:
2006 Scale = 2;
2007 UnscaledOp = AArch64::LDURSHXi;
2008 break;
2009 case AArch64::LDRSHWui:
2010 Scale = 2;
2011 UnscaledOp = AArch64::LDURSHWi;
2012 break;
2013 case AArch64::LDRSWui:
2014 Scale = 4;
2015 UnscaledOp = AArch64::LDURSWi;
2016 break;
2017
2018 case AArch64::STRXui:
2019 Scale = 8;
2020 UnscaledOp = AArch64::STURXi;
2021 break;
2022 case AArch64::STRWui:
2023 Scale = 4;
2024 UnscaledOp = AArch64::STURWi;
2025 break;
2026 case AArch64::STRBui:
2027 Scale = 1;
2028 UnscaledOp = AArch64::STURBi;
2029 break;
2030 case AArch64::STRHui:
2031 Scale = 2;
2032 UnscaledOp = AArch64::STURHi;
2033 break;
2034 case AArch64::STRSui:
2035 Scale = 4;
2036 UnscaledOp = AArch64::STURSi;
2037 break;
2038 case AArch64::STRDui:
2039 Scale = 8;
2040 UnscaledOp = AArch64::STURDi;
2041 break;
2042 case AArch64::STRQui:
2043 Scale = 16;
2044 UnscaledOp = AArch64::STURQi;
2045 break;
2046 case AArch64::STRBBui:
2047 Scale = 1;
2048 UnscaledOp = AArch64::STURBBi;
2049 break;
2050 case AArch64::STRHHui:
2051 Scale = 2;
2052 UnscaledOp = AArch64::STURHHi;
2053 break;
2054
2055 case AArch64::LDPXi:
2056 case AArch64::LDPDi:
2057 case AArch64::STPXi:
2058 case AArch64::STPDi:
2059 IsSigned = true;
2060 Scale = 8;
2061 break;
2062 case AArch64::LDPQi:
2063 case AArch64::STPQi:
2064 IsSigned = true;
2065 Scale = 16;
2066 break;
2067 case AArch64::LDPWi:
2068 case AArch64::LDPSi:
2069 case AArch64::STPWi:
2070 case AArch64::STPSi:
2071 IsSigned = true;
2072 Scale = 4;
2073 break;
2074
2075 case AArch64::LDURXi:
2076 case AArch64::LDURWi:
2077 case AArch64::LDURBi:
2078 case AArch64::LDURHi:
2079 case AArch64::LDURSi:
2080 case AArch64::LDURDi:
2081 case AArch64::LDURQi:
2082 case AArch64::LDURHHi:
2083 case AArch64::LDURBBi:
2084 case AArch64::LDURSBXi:
2085 case AArch64::LDURSBWi:
2086 case AArch64::LDURSHXi:
2087 case AArch64::LDURSHWi:
2088 case AArch64::LDURSWi:
2089 case AArch64::STURXi:
2090 case AArch64::STURWi:
2091 case AArch64::STURBi:
2092 case AArch64::STURHi:
2093 case AArch64::STURSi:
2094 case AArch64::STURDi:
2095 case AArch64::STURQi:
2096 case AArch64::STURBBi:
2097 case AArch64::STURHHi:
2098 Scale = 1;
2099 break;
2100 }
2101
2102 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2103
2104 bool useUnscaledOp = false;
2105 // If the offset doesn't match the scale, we rewrite the instruction to
2106 // use the unscaled instruction instead. Likewise, if we have a negative
2107 // offset (and have an unscaled op to use).
2108 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2109 useUnscaledOp = true;
2110
2111 // Use an unscaled addressing mode if the instruction has a negative offset
2112 // (or if the instruction is already using an unscaled addressing mode).
2113 unsigned MaskBits;
2114 if (IsSigned) {
2115 // ldp/stp instructions.
2116 MaskBits = 7;
2117 Offset /= Scale;
2118 } else if (UnscaledOp == 0 || useUnscaledOp) {
2119 MaskBits = 9;
2120 IsSigned = true;
2121 Scale = 1;
2122 } else {
2123 MaskBits = 12;
2124 IsSigned = false;
2125 Offset /= Scale;
2126 }
2127
2128 // Attempt to fold address computation.
2129 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2130 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2131 if (Offset >= MinOff && Offset <= MaxOff) {
2132 if (EmittableOffset)
2133 *EmittableOffset = Offset;
2134 Offset = 0;
2135 } else {
2136 int NewOff = Offset < 0 ? MinOff : MaxOff;
2137 if (EmittableOffset)
2138 *EmittableOffset = NewOff;
2139 Offset = (Offset - NewOff) * Scale;
2140 }
2141 if (OutUseUnscaledOp)
2142 *OutUseUnscaledOp = useUnscaledOp;
2143 if (OutUnscaledOp)
2144 *OutUnscaledOp = UnscaledOp;
2145 return AArch64FrameOffsetCanUpdate |
2146 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2147}
2148
2149bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2150 unsigned FrameReg, int &Offset,
2151 const AArch64InstrInfo *TII) {
2152 unsigned Opcode = MI.getOpcode();
2153 unsigned ImmIdx = FrameRegIdx + 1;
2154
2155 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2156 Offset += MI.getOperand(ImmIdx).getImm();
2157 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2158 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2159 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2160 MI.eraseFromParent();
2161 Offset = 0;
2162 return true;
2163 }
2164
2165 int NewOffset;
2166 unsigned UnscaledOp;
2167 bool UseUnscaledOp;
2168 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2169 &UnscaledOp, &NewOffset);
2170 if (Status & AArch64FrameOffsetCanUpdate) {
2171 if (Status & AArch64FrameOffsetIsLegal)
2172 // Replace the FrameIndex with FrameReg.
2173 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2174 if (UseUnscaledOp)
2175 MI.setDesc(TII->get(UnscaledOp));
2176
2177 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2178 return Offset == 0;
2179 }
2180
2181 return false;
2182}
2183
2184void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2185 NopInst.setOpcode(AArch64::HINT);
2186 NopInst.addOperand(MCOperand::CreateImm(0));
2187}