Tim Northover | 72062f5 | 2013-01-31 12:12:40 +0000 | [diff] [blame] | 1 | //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines the interfaces that AArch64 uses to lower LLVM code into a |
| 11 | // selection DAG. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #ifndef LLVM_TARGET_AARCH64_ISELLOWERING_H |
| 16 | #define LLVM_TARGET_AARCH64_ISELLOWERING_H |
| 17 | |
Tim Northover | 19254c4 | 2013-02-05 13:24:47 +0000 | [diff] [blame] | 18 | #include "Utils/AArch64BaseInfo.h" |
Tim Northover | 72062f5 | 2013-01-31 12:12:40 +0000 | [diff] [blame] | 19 | #include "llvm/CodeGen/CallingConvLower.h" |
| 20 | #include "llvm/CodeGen/SelectionDAG.h" |
| 21 | #include "llvm/Target/TargetLowering.h" |
| 22 | |
| 23 | |
| 24 | namespace llvm { |
| 25 | namespace AArch64ISD { |
| 26 | enum NodeType { |
| 27 | // Start the numbering from where ISD NodeType finishes. |
| 28 | FIRST_NUMBER = ISD::BUILTIN_OP_END, |
| 29 | |
| 30 | // This is a conditional branch which also notes the flag needed |
| 31 | // (eq/sgt/...). A64 puts this information on the branches rather than |
| 32 | // compares as LLVM does. |
| 33 | BR_CC, |
| 34 | |
| 35 | // A node to be selected to an actual call operation: either BL or BLR in |
| 36 | // the absence of tail calls. |
| 37 | Call, |
| 38 | |
| 39 | // Indicates a floating-point immediate which fits into the format required |
| 40 | // by the FMOV instructions. First (and only) operand is the 8-bit encoded |
| 41 | // value of that immediate. |
| 42 | FPMOV, |
| 43 | |
| 44 | // Corresponds directly to an EXTR instruction. Operands are an LHS an RHS |
| 45 | // and an LSB. |
| 46 | EXTR, |
| 47 | |
| 48 | // Wraps a load from the GOT, which should always be performed with a 64-bit |
| 49 | // load instruction. This prevents the DAG combiner folding a truncate to |
| 50 | // form a smaller memory access. |
| 51 | GOTLoad, |
| 52 | |
| 53 | // Performs a bitfield insert. Arguments are: the value being inserted into; |
| 54 | // the value being inserted; least significant bit changed; width of the |
| 55 | // field. |
| 56 | BFI, |
| 57 | |
| 58 | // Simply a convenient node inserted during ISelLowering to represent |
| 59 | // procedure return. Will almost certainly be selected to "RET". |
| 60 | Ret, |
| 61 | |
| 62 | /// Extracts a field of contiguous bits from the source and sign extends |
| 63 | /// them into a single register. Arguments are: source; immr; imms. Note |
| 64 | /// these are pre-encoded since DAG matching can't cope with combining LSB |
| 65 | /// and Width into these values itself. |
| 66 | SBFX, |
| 67 | |
| 68 | /// This is an A64-ification of the standard LLVM SELECT_CC operation. The |
| 69 | /// main difference is that it only has the values and an A64 condition, |
| 70 | /// which will be produced by a setcc instruction. |
| 71 | SELECT_CC, |
| 72 | |
| 73 | /// This serves most of the functions of the LLVM SETCC instruction, for two |
| 74 | /// purposes. First, it prevents optimisations from fiddling with the |
| 75 | /// compare after we've moved the CondCode information onto the SELECT_CC or |
| 76 | /// BR_CC instructions. Second, it gives a legal instruction for the actual |
| 77 | /// comparison. |
| 78 | /// |
| 79 | /// It keeps a record of the condition flags asked for because certain |
| 80 | /// instructions are only valid for a subset of condition codes. |
| 81 | SETCC, |
| 82 | |
| 83 | // Designates a node which is a tail call: both a call and a return |
| 84 | // instruction as far as selction is concerned. It should be selected to an |
| 85 | // unconditional branch. Has the usual plethora of call operands, but: 1st |
| 86 | // is callee, 2nd is stack adjustment required immediately before branch. |
| 87 | TC_RETURN, |
| 88 | |
| 89 | // Designates a call used to support the TLS descriptor ABI. The call itself |
| 90 | // will be indirect ("BLR xN") but a relocation-specifier (".tlsdesccall |
| 91 | // var") must be attached somehow during code generation. It takes two |
| 92 | // operands: the callee and the symbol to be relocated against. |
| 93 | TLSDESCCALL, |
| 94 | |
| 95 | // Leaf node which will be lowered to an appropriate MRS to obtain the |
| 96 | // thread pointer: TPIDR_EL0. |
| 97 | THREAD_POINTER, |
| 98 | |
| 99 | /// Extracts a field of contiguous bits from the source and zero extends |
| 100 | /// them into a single register. Arguments are: source; immr; imms. Note |
| 101 | /// these are pre-encoded since DAG matching can't cope with combining LSB |
| 102 | /// and Width into these values itself. |
| 103 | UBFX, |
| 104 | |
| 105 | // Wraps an address which the ISelLowering phase has decided should be |
| 106 | // created using the small absolute memory model: i.e. adrp/add or |
| 107 | // adrp/mem-op. This exists to prevent bare TargetAddresses which may never |
| 108 | // get selected. |
| 109 | WrapperSmall |
| 110 | }; |
| 111 | } |
| 112 | |
| 113 | |
| 114 | class AArch64Subtarget; |
| 115 | class AArch64TargetMachine; |
| 116 | |
| 117 | class AArch64TargetLowering : public TargetLowering { |
| 118 | public: |
| 119 | explicit AArch64TargetLowering(AArch64TargetMachine &TM); |
| 120 | |
| 121 | const char *getTargetNodeName(unsigned Opcode) const; |
| 122 | |
| 123 | CCAssignFn *CCAssignFnForNode(CallingConv::ID CC) const; |
| 124 | |
| 125 | SDValue LowerFormalArguments(SDValue Chain, |
| 126 | CallingConv::ID CallConv, bool isVarArg, |
| 127 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 128 | DebugLoc dl, SelectionDAG &DAG, |
| 129 | SmallVectorImpl<SDValue> &InVals) const; |
| 130 | |
| 131 | SDValue LowerReturn(SDValue Chain, |
| 132 | CallingConv::ID CallConv, bool isVarArg, |
| 133 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 134 | const SmallVectorImpl<SDValue> &OutVals, |
| 135 | DebugLoc dl, SelectionDAG &DAG) const; |
| 136 | |
| 137 | SDValue LowerCall(CallLoweringInfo &CLI, |
| 138 | SmallVectorImpl<SDValue> &InVals) const; |
| 139 | |
| 140 | SDValue LowerCallResult(SDValue Chain, SDValue InFlag, |
| 141 | CallingConv::ID CallConv, bool IsVarArg, |
| 142 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 143 | DebugLoc dl, SelectionDAG &DAG, |
| 144 | SmallVectorImpl<SDValue> &InVals) const; |
| 145 | |
| 146 | void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, |
| 147 | DebugLoc DL, SDValue &Chain) const; |
| 148 | |
| 149 | |
| 150 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 151 | /// for tail call optimization. Targets which want to do tail call |
| 152 | /// optimization should implement this function. |
| 153 | bool IsEligibleForTailCallOptimization(SDValue Callee, |
| 154 | CallingConv::ID CalleeCC, |
| 155 | bool IsVarArg, |
| 156 | bool IsCalleeStructRet, |
| 157 | bool IsCallerStructRet, |
| 158 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 159 | const SmallVectorImpl<SDValue> &OutVals, |
| 160 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 161 | SelectionDAG& DAG) const; |
| 162 | |
| 163 | /// Finds the incoming stack arguments which overlap the given fixed stack |
Tim Northover | dfe076a | 2013-02-05 13:24:56 +0000 | [diff] [blame] | 164 | /// object and incorporates their load into the current chain. This prevents |
| 165 | /// an upcoming store from clobbering the stack argument before it's used. |
Tim Northover | 72062f5 | 2013-01-31 12:12:40 +0000 | [diff] [blame] | 166 | SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG, |
| 167 | MachineFrameInfo *MFI, int ClobberedFI) const; |
| 168 | |
| 169 | EVT getSetCCResultType(EVT VT) const; |
| 170 | |
| 171 | bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const; |
| 172 | |
| 173 | bool IsTailCallConvention(CallingConv::ID CallCC) const; |
| 174 | |
| 175 | SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; |
| 176 | |
| 177 | bool isLegalICmpImmediate(int64_t Val) const; |
| 178 | SDValue getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| 179 | SDValue &A64cc, SelectionDAG &DAG, DebugLoc &dl) const; |
| 180 | |
| 181 | virtual MachineBasicBlock * |
| 182 | EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; |
| 183 | |
| 184 | MachineBasicBlock * |
| 185 | emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB, |
| 186 | unsigned Size, unsigned Opcode) const; |
| 187 | |
| 188 | MachineBasicBlock * |
| 189 | emitAtomicBinaryMinMax(MachineInstr *MI, MachineBasicBlock *BB, |
| 190 | unsigned Size, unsigned CmpOp, |
| 191 | A64CC::CondCodes Cond) const; |
| 192 | MachineBasicBlock * |
| 193 | emitAtomicCmpSwap(MachineInstr *MI, MachineBasicBlock *BB, |
| 194 | unsigned Size) const; |
| 195 | |
| 196 | MachineBasicBlock * |
| 197 | EmitF128CSEL(MachineInstr *MI, MachineBasicBlock *MBB) const; |
| 198 | |
| 199 | SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; |
| 200 | SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const; |
| 201 | SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; |
| 202 | SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; |
| 203 | SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; |
| 204 | SDValue LowerF128ToCall(SDValue Op, SelectionDAG &DAG, |
| 205 | RTLIB::Libcall Call) const; |
| 206 | SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; |
| 207 | SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; |
| 208 | SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, bool IsSigned) const; |
| 209 | SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; |
| 210 | SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, DebugLoc DL, |
| 211 | SelectionDAG &DAG) const; |
| 212 | SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; |
| 213 | SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool IsSigned) const; |
| 214 | SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; |
| 215 | SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; |
| 216 | SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; |
| 217 | SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; |
| 218 | SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; |
| 219 | SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; |
| 220 | |
| 221 | virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; |
| 222 | |
| 223 | /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than |
| 224 | /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to |
| 225 | /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd |
| 226 | /// is expanded to mul + add. |
| 227 | virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; } |
| 228 | |
| 229 | ConstraintType getConstraintType(const std::string &Constraint) const; |
| 230 | |
| 231 | ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &Info, |
| 232 | const char *Constraint) const; |
| 233 | void LowerAsmOperandForConstraint(SDValue Op, |
| 234 | std::string &Constraint, |
| 235 | std::vector<SDValue> &Ops, |
| 236 | SelectionDAG &DAG) const; |
| 237 | |
| 238 | std::pair<unsigned, const TargetRegisterClass*> |
| 239 | getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; |
| 240 | private: |
| 241 | const AArch64Subtarget *Subtarget; |
| 242 | const TargetRegisterInfo *RegInfo; |
| 243 | const InstrItineraryData *Itins; |
| 244 | }; |
| 245 | } // namespace llvm |
| 246 | |
| 247 | #endif // LLVM_TARGET_AARCH64_ISELLOWERING_H |