blob: 0afc4f05c9a4ffe528eaddaa98b1acb582a4dda9 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the AArch64 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
Benjamin Kramera7c40ef2014-08-13 16:26:38 +000014#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15#define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
Tim Northover3b0846e2014-05-24 12:50:23 +000016
17#include "AArch64.h"
18#include "AArch64RegisterInfo.h"
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +000019#include "llvm/CodeGen/MachineCombinerPattern.h"
Chandler Carruthd9903882015-01-14 11:23:27 +000020#include "llvm/Target/TargetInstrInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000021
22#define GET_INSTRINFO_HEADER
23#include "AArch64GenInstrInfo.inc"
24
25namespace llvm {
26
27class AArch64Subtarget;
28class AArch64TargetMachine;
29
30class AArch64InstrInfo : public AArch64GenInstrInfo {
31 // Reserve bits in the MachineMemOperand target hint flags, starting at 1.
32 // They will be shifted into MOTargetHintStart when accessed.
33 enum TargetMemOperandFlags {
34 MOSuppressPair = 1
35 };
36
Tim Northover3b0846e2014-05-24 12:50:23 +000037 const AArch64Subtarget &Subtarget;
38
39public:
40 explicit AArch64InstrInfo(const AArch64Subtarget &STI);
41
Tim Northover3b0846e2014-05-24 12:50:23 +000042 unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
43
Jiangning Liucd296372014-07-29 02:09:26 +000044 bool isAsCheapAsAMove(const MachineInstr *MI) const override;
45
Tim Northover3b0846e2014-05-24 12:50:23 +000046 bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
47 unsigned &DstReg, unsigned &SubIdx) const override;
48
Chad Rosier3528c1e2014-09-08 14:43:48 +000049 bool
50 areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
51 AliasAnalysis *AA = nullptr) const override;
52
Tim Northover3b0846e2014-05-24 12:50:23 +000053 unsigned isLoadFromStackSlot(const MachineInstr *MI,
54 int &FrameIndex) const override;
55 unsigned isStoreToStackSlot(const MachineInstr *MI,
56 int &FrameIndex) const override;
57
58 /// Returns true if there is a shiftable register and that the shift value
59 /// is non-zero.
60 bool hasShiftedReg(const MachineInstr *MI) const;
61
62 /// Returns true if there is an extendable register and that the extending
63 /// value is non-zero.
64 bool hasExtendedReg(const MachineInstr *MI) const;
65
66 /// \brief Does this instruction set its full destination register to zero?
67 bool isGPRZero(const MachineInstr *MI) const;
68
69 /// \brief Does this instruction rename a GPR without modifying bits?
70 bool isGPRCopy(const MachineInstr *MI) const;
71
72 /// \brief Does this instruction rename an FPR without modifying bits?
73 bool isFPRCopy(const MachineInstr *MI) const;
74
75 /// Return true if this is load/store scales or extends its register offset.
76 /// This refers to scaling a dynamic index as opposed to scaled immediates.
77 /// MI should be a memory op that allows scaled addressing.
78 bool isScaledAddr(const MachineInstr *MI) const;
79
80 /// Return true if pairing the given load or store is hinted to be
81 /// unprofitable.
82 bool isLdStPairSuppressed(const MachineInstr *MI) const;
83
84 /// Hint that pairing the given load or store is unprofitable.
85 void suppressLdStPair(MachineInstr *MI) const;
86
87 bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
88 unsigned &Offset,
89 const TargetRegisterInfo *TRI) const override;
90
Chad Rosier3528c1e2014-09-08 14:43:48 +000091 bool getLdStBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
92 int &Offset, int &Width,
93 const TargetRegisterInfo *TRI) const;
94
Tim Northover3b0846e2014-05-24 12:50:23 +000095 bool enableClusterLoads() const override { return true; }
96
97 bool shouldClusterLoads(MachineInstr *FirstLdSt, MachineInstr *SecondLdSt,
98 unsigned NumLoads) const override;
99
100 bool shouldScheduleAdjacent(MachineInstr *First,
101 MachineInstr *Second) const override;
102
103 MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
Adrian Prantl87b7eb92014-10-01 18:55:02 +0000104 uint64_t Offset, const MDNode *Var,
105 const MDNode *Expr, DebugLoc DL) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000106 void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
107 DebugLoc DL, unsigned DestReg, unsigned SrcReg,
108 bool KillSrc, unsigned Opcode,
109 llvm::ArrayRef<unsigned> Indices) const;
110 void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
111 DebugLoc DL, unsigned DestReg, unsigned SrcReg,
112 bool KillSrc) const override;
113
114 void storeRegToStackSlot(MachineBasicBlock &MBB,
115 MachineBasicBlock::iterator MBBI, unsigned SrcReg,
116 bool isKill, int FrameIndex,
117 const TargetRegisterClass *RC,
118 const TargetRegisterInfo *TRI) const override;
119
120 void loadRegFromStackSlot(MachineBasicBlock &MBB,
121 MachineBasicBlock::iterator MBBI, unsigned DestReg,
122 int FrameIndex, const TargetRegisterClass *RC,
123 const TargetRegisterInfo *TRI) const override;
124
Aaron Ballmaned9fabd2014-07-31 12:58:50 +0000125 using TargetInstrInfo::foldMemoryOperandImpl;
Benjamin Kramerf1362f62015-02-28 12:04:00 +0000126 MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
127 ArrayRef<unsigned> Ops,
128 int FrameIndex) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000129
130 bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
131 MachineBasicBlock *&FBB,
132 SmallVectorImpl<MachineOperand> &Cond,
133 bool AllowModify = false) const override;
134 unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
135 unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
136 MachineBasicBlock *FBB,
137 const SmallVectorImpl<MachineOperand> &Cond,
138 DebugLoc DL) const override;
139 bool
140 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
141 bool canInsertSelect(const MachineBasicBlock &,
142 const SmallVectorImpl<MachineOperand> &Cond, unsigned,
143 unsigned, int &, int &, int &) const override;
144 void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
145 DebugLoc DL, unsigned DstReg,
146 const SmallVectorImpl<MachineOperand> &Cond,
147 unsigned TrueReg, unsigned FalseReg) const override;
148 void getNoopForMachoTarget(MCInst &NopInst) const override;
149
150 /// analyzeCompare - For a comparison instruction, return the source registers
151 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
152 /// Return true if the comparison instruction can be analyzed.
153 bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
154 unsigned &SrcReg2, int &CmpMask,
155 int &CmpValue) const override;
156 /// optimizeCompareInstr - Convert the instruction supplying the argument to
157 /// the comparison into one that sets the zero bit in the flags register.
158 bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
159 unsigned SrcReg2, int CmpMask, int CmpValue,
160 const MachineRegisterInfo *MRI) const override;
NAKAMURA Takumi949fb6d2014-10-27 23:29:27 +0000161 bool optimizeCondBranch(MachineInstr *MI) const override;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000162 /// hasPattern - return true when there is potentially a faster code sequence
163 /// for an instruction chain ending in <Root>. All potential patterns are
164 /// listed
165 /// in the <Pattern> array.
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000166 bool hasPattern(MachineInstr &Root,
167 SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Pattern)
168 const override;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000169
170 /// genAlternativeCodeSequence - when hasPattern() finds a pattern
171 /// this function generates the instructions that could replace the
172 /// original code sequence
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000173 void genAlternativeCodeSequence(
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000174 MachineInstr &Root, MachineCombinerPattern::MC_PATTERN P,
175 SmallVectorImpl<MachineInstr *> &InsInstrs,
176 SmallVectorImpl<MachineInstr *> &DelInstrs,
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000177 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000178 /// useMachineCombiner - AArch64 supports MachineCombiner
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000179 bool useMachineCombiner() const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000180
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000181 bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000182private:
183 void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL,
184 MachineBasicBlock *TBB,
185 const SmallVectorImpl<MachineOperand> &Cond) const;
186};
187
188/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
189/// plus Offset. This is intended to be used from within the prolog/epilog
190/// insertion (PEI) pass, where a virtual scratch register may be allocated
191/// if necessary, to be replaced by the scavenger at the end of PEI.
192void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
193 DebugLoc DL, unsigned DestReg, unsigned SrcReg, int Offset,
Eric Christopherbc76b972014-06-10 17:33:39 +0000194 const TargetInstrInfo *TII,
Tim Northover3b0846e2014-05-24 12:50:23 +0000195 MachineInstr::MIFlag = MachineInstr::NoFlags,
196 bool SetNZCV = false);
197
198/// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
199/// FP. Return false if the offset could not be handled directly in MI, and
200/// return the left-over portion by reference.
201bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
202 unsigned FrameReg, int &Offset,
203 const AArch64InstrInfo *TII);
204
205/// \brief Use to report the frame offset status in isAArch64FrameOffsetLegal.
206enum AArch64FrameOffsetStatus {
207 AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
208 AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
209 AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
210};
211
212/// \brief Check if the @p Offset is a valid frame offset for @p MI.
213/// The returned value reports the validity of the frame offset for @p MI.
214/// It uses the values defined by AArch64FrameOffsetStatus for that.
215/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
216/// use an offset.eq
217/// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
218/// rewriten in @p MI.
219/// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
220/// amount that is off the limit of the legal offset.
221/// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
222/// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
223/// If set, @p EmittableOffset contains the amount that can be set in @p MI
224/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
225/// is a legal offset.
226int isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
227 bool *OutUseUnscaledOp = nullptr,
228 unsigned *OutUnscaledOp = nullptr,
229 int *EmittableOffset = nullptr);
230
231static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
232
233static inline bool isCondBranchOpcode(int Opc) {
234 switch (Opc) {
235 case AArch64::Bcc:
236 case AArch64::CBZW:
237 case AArch64::CBZX:
238 case AArch64::CBNZW:
239 case AArch64::CBNZX:
240 case AArch64::TBZW:
241 case AArch64::TBZX:
242 case AArch64::TBNZW:
243 case AArch64::TBNZX:
244 return true;
245 default:
246 return false;
247 }
248}
249
250static inline bool isIndirectBranchOpcode(int Opc) { return Opc == AArch64::BR; }
251
252} // end namespace llvm
253
254#endif