| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 1 | //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===// | 
|  | 2 | // | 
|  | 3 | //                     The LLVM Compiler Infrastructure | 
|  | 4 | // | 
|  | 5 | // This file is distributed under the University of Illinois Open Source | 
|  | 6 | // License. See LICENSE.TXT for details. | 
|  | 7 | // | 
|  | 8 | //===----------------------------------------------------------------------===// | 
|  | 9 | // | 
|  | 10 | // This file contains the AArch64 implementation of the TargetInstrInfo class. | 
|  | 11 | // | 
|  | 12 | //===----------------------------------------------------------------------===// | 
|  | 13 |  | 
|  | 14 | #ifndef LLVM_TARGET_AArch64INSTRINFO_H | 
|  | 15 | #define LLVM_TARGET_AArch64INSTRINFO_H | 
|  | 16 |  | 
|  | 17 | #include "AArch64.h" | 
|  | 18 | #include "AArch64RegisterInfo.h" | 
|  | 19 | #include "llvm/Target/TargetInstrInfo.h" | 
|  | 20 |  | 
|  | 21 | #define GET_INSTRINFO_HEADER | 
|  | 22 | #include "AArch64GenInstrInfo.inc" | 
|  | 23 |  | 
|  | 24 | namespace llvm { | 
|  | 25 |  | 
|  | 26 | class AArch64Subtarget; | 
|  | 27 | class AArch64TargetMachine; | 
|  | 28 |  | 
|  | 29 | class AArch64InstrInfo : public AArch64GenInstrInfo { | 
|  | 30 | // Reserve bits in the MachineMemOperand target hint flags, starting at 1. | 
|  | 31 | // They will be shifted into MOTargetHintStart when accessed. | 
|  | 32 | enum TargetMemOperandFlags { | 
|  | 33 | MOSuppressPair = 1 | 
|  | 34 | }; | 
|  | 35 |  | 
|  | 36 | const AArch64RegisterInfo RI; | 
|  | 37 | const AArch64Subtarget &Subtarget; | 
|  | 38 |  | 
|  | 39 | public: | 
|  | 40 | explicit AArch64InstrInfo(const AArch64Subtarget &STI); | 
|  | 41 |  | 
|  | 42 | /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As | 
|  | 43 | /// such, whenever a client has an instance of instruction info, it should | 
|  | 44 | /// always be able to get register info as well (through this method). | 
|  | 45 | const AArch64RegisterInfo &getRegisterInfo() const { return RI; } | 
|  | 46 |  | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 47 | unsigned GetInstSizeInBytes(const MachineInstr *MI) const; | 
|  | 48 |  | 
| Jiangning Liu | cd29637 | 2014-07-29 02:09:26 +0000 | [diff] [blame] | 49 | bool isAsCheapAsAMove(const MachineInstr *MI) const override; | 
|  | 50 |  | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 51 | bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, | 
|  | 52 | unsigned &DstReg, unsigned &SubIdx) const override; | 
|  | 53 |  | 
|  | 54 | unsigned isLoadFromStackSlot(const MachineInstr *MI, | 
|  | 55 | int &FrameIndex) const override; | 
|  | 56 | unsigned isStoreToStackSlot(const MachineInstr *MI, | 
|  | 57 | int &FrameIndex) const override; | 
|  | 58 |  | 
|  | 59 | /// Returns true if there is a shiftable register and that the shift value | 
|  | 60 | /// is non-zero. | 
|  | 61 | bool hasShiftedReg(const MachineInstr *MI) const; | 
|  | 62 |  | 
|  | 63 | /// Returns true if there is an extendable register and that the extending | 
|  | 64 | /// value is non-zero. | 
|  | 65 | bool hasExtendedReg(const MachineInstr *MI) const; | 
|  | 66 |  | 
|  | 67 | /// \brief Does this instruction set its full destination register to zero? | 
|  | 68 | bool isGPRZero(const MachineInstr *MI) const; | 
|  | 69 |  | 
|  | 70 | /// \brief Does this instruction rename a GPR without modifying bits? | 
|  | 71 | bool isGPRCopy(const MachineInstr *MI) const; | 
|  | 72 |  | 
|  | 73 | /// \brief Does this instruction rename an FPR without modifying bits? | 
|  | 74 | bool isFPRCopy(const MachineInstr *MI) const; | 
|  | 75 |  | 
|  | 76 | /// Return true if this is load/store scales or extends its register offset. | 
|  | 77 | /// This refers to scaling a dynamic index as opposed to scaled immediates. | 
|  | 78 | /// MI should be a memory op that allows scaled addressing. | 
|  | 79 | bool isScaledAddr(const MachineInstr *MI) const; | 
|  | 80 |  | 
|  | 81 | /// Return true if pairing the given load or store is hinted to be | 
|  | 82 | /// unprofitable. | 
|  | 83 | bool isLdStPairSuppressed(const MachineInstr *MI) const; | 
|  | 84 |  | 
|  | 85 | /// Hint that pairing the given load or store is unprofitable. | 
|  | 86 | void suppressLdStPair(MachineInstr *MI) const; | 
|  | 87 |  | 
|  | 88 | bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, | 
|  | 89 | unsigned &Offset, | 
|  | 90 | const TargetRegisterInfo *TRI) const override; | 
|  | 91 |  | 
|  | 92 | bool enableClusterLoads() const override { return true; } | 
|  | 93 |  | 
|  | 94 | bool shouldClusterLoads(MachineInstr *FirstLdSt, MachineInstr *SecondLdSt, | 
|  | 95 | unsigned NumLoads) const override; | 
|  | 96 |  | 
|  | 97 | bool shouldScheduleAdjacent(MachineInstr *First, | 
|  | 98 | MachineInstr *Second) const override; | 
|  | 99 |  | 
|  | 100 | MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx, | 
|  | 101 | uint64_t Offset, const MDNode *MDPtr, | 
|  | 102 | DebugLoc DL) const; | 
|  | 103 | void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, | 
|  | 104 | DebugLoc DL, unsigned DestReg, unsigned SrcReg, | 
|  | 105 | bool KillSrc, unsigned Opcode, | 
|  | 106 | llvm::ArrayRef<unsigned> Indices) const; | 
|  | 107 | void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, | 
|  | 108 | DebugLoc DL, unsigned DestReg, unsigned SrcReg, | 
|  | 109 | bool KillSrc) const override; | 
|  | 110 |  | 
|  | 111 | void storeRegToStackSlot(MachineBasicBlock &MBB, | 
|  | 112 | MachineBasicBlock::iterator MBBI, unsigned SrcReg, | 
|  | 113 | bool isKill, int FrameIndex, | 
|  | 114 | const TargetRegisterClass *RC, | 
|  | 115 | const TargetRegisterInfo *TRI) const override; | 
|  | 116 |  | 
|  | 117 | void loadRegFromStackSlot(MachineBasicBlock &MBB, | 
|  | 118 | MachineBasicBlock::iterator MBBI, unsigned DestReg, | 
|  | 119 | int FrameIndex, const TargetRegisterClass *RC, | 
|  | 120 | const TargetRegisterInfo *TRI) const override; | 
|  | 121 |  | 
| Aaron Ballman | ed9fabd | 2014-07-31 12:58:50 +0000 | [diff] [blame] | 122 | using TargetInstrInfo::foldMemoryOperandImpl; | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 123 | MachineInstr * | 
|  | 124 | foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, | 
|  | 125 | const SmallVectorImpl<unsigned> &Ops, | 
|  | 126 | int FrameIndex) const override; | 
|  | 127 |  | 
|  | 128 | bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, | 
|  | 129 | MachineBasicBlock *&FBB, | 
|  | 130 | SmallVectorImpl<MachineOperand> &Cond, | 
|  | 131 | bool AllowModify = false) const override; | 
|  | 132 | unsigned RemoveBranch(MachineBasicBlock &MBB) const override; | 
|  | 133 | unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, | 
|  | 134 | MachineBasicBlock *FBB, | 
|  | 135 | const SmallVectorImpl<MachineOperand> &Cond, | 
|  | 136 | DebugLoc DL) const override; | 
|  | 137 | bool | 
|  | 138 | ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; | 
|  | 139 | bool canInsertSelect(const MachineBasicBlock &, | 
|  | 140 | const SmallVectorImpl<MachineOperand> &Cond, unsigned, | 
|  | 141 | unsigned, int &, int &, int &) const override; | 
|  | 142 | void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, | 
|  | 143 | DebugLoc DL, unsigned DstReg, | 
|  | 144 | const SmallVectorImpl<MachineOperand> &Cond, | 
|  | 145 | unsigned TrueReg, unsigned FalseReg) const override; | 
|  | 146 | void getNoopForMachoTarget(MCInst &NopInst) const override; | 
|  | 147 |  | 
|  | 148 | /// analyzeCompare - For a comparison instruction, return the source registers | 
|  | 149 | /// in SrcReg and SrcReg2, and the value it compares against in CmpValue. | 
|  | 150 | /// Return true if the comparison instruction can be analyzed. | 
|  | 151 | bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, | 
|  | 152 | unsigned &SrcReg2, int &CmpMask, | 
|  | 153 | int &CmpValue) const override; | 
|  | 154 | /// optimizeCompareInstr - Convert the instruction supplying the argument to | 
|  | 155 | /// the comparison into one that sets the zero bit in the flags register. | 
|  | 156 | bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, | 
|  | 157 | unsigned SrcReg2, int CmpMask, int CmpValue, | 
|  | 158 | const MachineRegisterInfo *MRI) const override; | 
|  | 159 |  | 
| Akira Hatanaka | e5b6e0d | 2014-07-25 19:31:34 +0000 | [diff] [blame] | 160 | bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override; | 
| Kevin Qin | ec10052 | 2014-08-05 05:43:47 +0000 | [diff] [blame^] | 161 |  | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 162 | private: | 
|  | 163 | void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL, | 
|  | 164 | MachineBasicBlock *TBB, | 
|  | 165 | const SmallVectorImpl<MachineOperand> &Cond) const; | 
|  | 166 | }; | 
|  | 167 |  | 
|  | 168 | /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg | 
|  | 169 | /// plus Offset.  This is intended to be used from within the prolog/epilog | 
|  | 170 | /// insertion (PEI) pass, where a virtual scratch register may be allocated | 
|  | 171 | /// if necessary, to be replaced by the scavenger at the end of PEI. | 
|  | 172 | void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, | 
|  | 173 | DebugLoc DL, unsigned DestReg, unsigned SrcReg, int Offset, | 
| Eric Christopher | bc76b97 | 2014-06-10 17:33:39 +0000 | [diff] [blame] | 174 | const TargetInstrInfo *TII, | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 175 | MachineInstr::MIFlag = MachineInstr::NoFlags, | 
|  | 176 | bool SetNZCV = false); | 
|  | 177 |  | 
|  | 178 | /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the | 
|  | 179 | /// FP. Return false if the offset could not be handled directly in MI, and | 
|  | 180 | /// return the left-over portion by reference. | 
|  | 181 | bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, | 
|  | 182 | unsigned FrameReg, int &Offset, | 
|  | 183 | const AArch64InstrInfo *TII); | 
|  | 184 |  | 
|  | 185 | /// \brief Use to report the frame offset status in isAArch64FrameOffsetLegal. | 
|  | 186 | enum AArch64FrameOffsetStatus { | 
|  | 187 | AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply. | 
|  | 188 | AArch64FrameOffsetIsLegal = 0x1,      ///< Offset is legal. | 
|  | 189 | AArch64FrameOffsetCanUpdate = 0x2     ///< Offset can apply, at least partly. | 
|  | 190 | }; | 
|  | 191 |  | 
|  | 192 | /// \brief Check if the @p Offset is a valid frame offset for @p MI. | 
|  | 193 | /// The returned value reports the validity of the frame offset for @p MI. | 
|  | 194 | /// It uses the values defined by AArch64FrameOffsetStatus for that. | 
|  | 195 | /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to | 
|  | 196 | /// use an offset.eq | 
|  | 197 | /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be | 
|  | 198 | /// rewriten in @p MI. | 
|  | 199 | /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the | 
|  | 200 | /// amount that is off the limit of the legal offset. | 
|  | 201 | /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be | 
|  | 202 | /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp. | 
|  | 203 | /// If set, @p EmittableOffset contains the amount that can be set in @p MI | 
|  | 204 | /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that | 
|  | 205 | /// is a legal offset. | 
|  | 206 | int isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset, | 
|  | 207 | bool *OutUseUnscaledOp = nullptr, | 
|  | 208 | unsigned *OutUnscaledOp = nullptr, | 
|  | 209 | int *EmittableOffset = nullptr); | 
|  | 210 |  | 
|  | 211 | static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; } | 
|  | 212 |  | 
|  | 213 | static inline bool isCondBranchOpcode(int Opc) { | 
|  | 214 | switch (Opc) { | 
|  | 215 | case AArch64::Bcc: | 
|  | 216 | case AArch64::CBZW: | 
|  | 217 | case AArch64::CBZX: | 
|  | 218 | case AArch64::CBNZW: | 
|  | 219 | case AArch64::CBNZX: | 
|  | 220 | case AArch64::TBZW: | 
|  | 221 | case AArch64::TBZX: | 
|  | 222 | case AArch64::TBNZW: | 
|  | 223 | case AArch64::TBNZX: | 
|  | 224 | return true; | 
|  | 225 | default: | 
|  | 226 | return false; | 
|  | 227 | } | 
|  | 228 | } | 
|  | 229 |  | 
|  | 230 | static inline bool isIndirectBranchOpcode(int Opc) { return Opc == AArch64::BR; } | 
|  | 231 |  | 
|  | 232 | } // end namespace llvm | 
|  | 233 |  | 
|  | 234 | #endif |