blob: b815f55da6b3cfa823745d5e32f53e7e2a3790a5 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that AArch64 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
Benjamin Kramera7c40ef2014-08-13 16:26:38 +000015#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
Tim Northover3b0846e2014-05-24 12:50:23 +000017
18#include "llvm/CodeGen/CallingConvLower.h"
19#include "llvm/CodeGen/SelectionDAG.h"
20#include "llvm/IR/CallingConv.h"
Chad Rosier54390052015-02-23 19:15:16 +000021#include "llvm/IR/Instruction.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000022#include "llvm/Target/TargetLowering.h"
23
24namespace llvm {
25
26namespace AArch64ISD {
27
Matthias Braund04893f2015-05-07 21:33:59 +000028enum NodeType : unsigned {
Tim Northover3b0846e2014-05-24 12:50:23 +000029 FIRST_NUMBER = ISD::BUILTIN_OP_END,
30 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31 CALL, // Function call.
32
Kristof Beylsaea84612015-03-04 09:12:08 +000033 // Produces the full sequence of instructions for getting the thread pointer
34 // offset of a variable into X0, using the TLSDesc model.
35 TLSDESC_CALLSEQ,
Tim Northover3b0846e2014-05-24 12:50:23 +000036 ADRP, // Page address of a TargetGlobalAddress operand.
37 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
38 LOADgot, // Load from automatically generated descriptor (e.g. Global
39 // Offset Table, TLS record).
40 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
41 BRCOND, // Conditional branch instruction; "b.cond".
42 CSEL,
43 FCSEL, // Conditional move instruction.
44 CSINV, // Conditional select invert.
45 CSNEG, // Conditional select negate.
46 CSINC, // Conditional select increment.
47
48 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
49 // ELF.
50 THREAD_POINTER,
51 ADC,
52 SBC, // adc, sbc instructions
53
54 // Arithmetic instructions which write flags.
55 ADDS,
56 SUBS,
57 ADCS,
58 SBCS,
59 ANDS,
60
Matthias Braunaf7d7702015-07-16 20:02:37 +000061 // Conditional compares. Operands: left,right,falsecc,cc,flags
62 CCMP,
63 CCMN,
64 FCCMP,
65
Tim Northover3b0846e2014-05-24 12:50:23 +000066 // Floating point comparison
67 FCMP,
68
Tim Northover3b0846e2014-05-24 12:50:23 +000069 // Scalar extract
70 EXTR,
71
72 // Scalar-to-vector duplication
73 DUP,
74 DUPLANE8,
75 DUPLANE16,
76 DUPLANE32,
77 DUPLANE64,
78
79 // Vector immedate moves
80 MOVI,
81 MOVIshift,
82 MOVIedit,
83 MOVImsl,
84 FMOV,
85 MVNIshift,
86 MVNImsl,
87
88 // Vector immediate ops
89 BICi,
90 ORRi,
91
92 // Vector bit select: similar to ISD::VSELECT but not all bits within an
93 // element must be identical.
94 BSL,
95
96 // Vector arithmetic negation
97 NEG,
98
99 // Vector shuffles
100 ZIP1,
101 ZIP2,
102 UZP1,
103 UZP2,
104 TRN1,
105 TRN2,
106 REV16,
107 REV32,
108 REV64,
109 EXT,
110
111 // Vector shift by scalar
112 VSHL,
113 VLSHR,
114 VASHR,
115
116 // Vector shift by scalar (again)
117 SQSHL_I,
118 UQSHL_I,
119 SQSHLU_I,
120 SRSHR_I,
121 URSHR_I,
122
123 // Vector comparisons
124 CMEQ,
125 CMGE,
126 CMGT,
127 CMHI,
128 CMHS,
129 FCMEQ,
130 FCMGE,
131 FCMGT,
132
133 // Vector zero comparisons
134 CMEQz,
135 CMGEz,
136 CMGTz,
137 CMLEz,
138 CMLTz,
139 FCMEQz,
140 FCMGEz,
141 FCMGTz,
142 FCMLEz,
143 FCMLTz,
144
Ahmed Bougachafab58922015-03-10 20:45:38 +0000145 // Vector across-lanes addition
146 // Only the lower result lane is defined.
147 SADDV,
148 UADDV,
149
150 // Vector across-lanes min/max
151 // Only the lower result lane is defined.
152 SMINV,
153 UMINV,
154 SMAXV,
155 UMAXV,
156
Tim Northover3b0846e2014-05-24 12:50:23 +0000157 // Vector bitwise negation
158 NOT,
159
160 // Vector bitwise selection
161 BIT,
162
163 // Compare-and-branch
164 CBZ,
165 CBNZ,
166 TBZ,
167 TBNZ,
168
169 // Tail calls
170 TC_RETURN,
171
172 // Custom prefetch handling
173 PREFETCH,
174
175 // {s|u}int to FP within a FP register.
176 SITOF,
177 UITOF,
178
Tim Northoverbb72e6c2014-09-04 09:46:14 +0000179 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
180 /// world w.r.t vectors; which causes additional REV instructions to be
181 /// generated to compensate for the byte-swapping. But sometimes we do
182 /// need to re-interpret the data in SIMD vector registers in big-endian
183 /// mode without emitting such REV instructions.
184 NVCAST,
185
Chad Rosierd9d0f862014-10-08 02:31:24 +0000186 SMULL,
187 UMULL,
188
Tim Northover3b0846e2014-05-24 12:50:23 +0000189 // NEON Load/Store with post-increment base updates
190 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
191 LD3post,
192 LD4post,
193 ST2post,
194 ST3post,
195 ST4post,
196 LD1x2post,
197 LD1x3post,
198 LD1x4post,
199 ST1x2post,
200 ST1x3post,
201 ST1x4post,
202 LD1DUPpost,
203 LD2DUPpost,
204 LD3DUPpost,
205 LD4DUPpost,
206 LD1LANEpost,
207 LD2LANEpost,
208 LD3LANEpost,
209 LD4LANEpost,
210 ST2LANEpost,
211 ST3LANEpost,
212 ST4LANEpost
213};
214
215} // end namespace AArch64ISD
216
217class AArch64Subtarget;
218class AArch64TargetMachine;
219
220class AArch64TargetLowering : public TargetLowering {
Tim Northover3b0846e2014-05-24 12:50:23 +0000221public:
Eric Christopher905f12d2015-01-29 00:19:42 +0000222 explicit AArch64TargetLowering(const TargetMachine &TM,
223 const AArch64Subtarget &STI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000224
Robin Morisset039781e2014-08-29 21:53:01 +0000225 /// Selects the correct CCAssignFn for a given CallingConvention value.
Tim Northover3b0846e2014-05-24 12:50:23 +0000226 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
227
228 /// computeKnownBitsForTargetNode - Determine which of the bits specified in
229 /// Mask are known to be either zero or one and return them in the
230 /// KnownZero/KnownOne bitsets.
231 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
232 APInt &KnownOne, const SelectionDAG &DAG,
233 unsigned Depth = 0) const override;
234
Mehdi Aminieaabc512015-07-09 15:12:23 +0000235 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000236
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000237 /// allowsMisalignedMemoryAccesses - Returns true if the target allows
Sanjay Patel08efcd92015-01-28 22:37:32 +0000238 /// unaligned memory accesses of the specified type.
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000239 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
240 unsigned Align = 1,
Akira Hatanakaf53b0402015-07-29 14:17:26 +0000241 bool *Fast = nullptr) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000242
243 /// LowerOperation - Provide custom lowering hooks for some operations.
244 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
245
246 const char *getTargetNodeName(unsigned Opcode) const override;
247
248 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
249
250 /// getFunctionAlignment - Return the Log2 alignment of this function.
251 unsigned getFunctionAlignment(const Function *F) const;
252
Tim Northover3b0846e2014-05-24 12:50:23 +0000253 /// Returns true if a cast between SrcAS and DestAS is a noop.
254 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
255 // Addrspacecasts are always noops.
256 return true;
257 }
258
259 /// createFastISel - This method returns a target specific FastISel object,
260 /// or null if the target does not support "fast" ISel.
261 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
262 const TargetLibraryInfo *libInfo) const override;
263
264 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
265
266 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
267
268 /// isShuffleMaskLegal - Return true if the given shuffle mask can be
269 /// codegen'd directly, or if it should be stack expanded.
270 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
271
272 /// getSetCCResultType - Return the ISD::SETCC ValueType
Mehdi Amini44ede332015-07-09 02:09:04 +0000273 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
274 EVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000275
276 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
277
278 MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
279 MachineBasicBlock *BB) const;
280
281 MachineBasicBlock *
282 EmitInstrWithCustomInserter(MachineInstr *MI,
283 MachineBasicBlock *MBB) const override;
284
285 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
286 unsigned Intrinsic) const override;
287
288 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
289 bool isTruncateFree(EVT VT1, EVT VT2) const override;
290
Chad Rosier54390052015-02-23 19:15:16 +0000291 bool isProfitableToHoist(Instruction *I) const override;
292
Tim Northover3b0846e2014-05-24 12:50:23 +0000293 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
294 bool isZExtFree(EVT VT1, EVT VT2) const override;
295 bool isZExtFree(SDValue Val, EVT VT2) const override;
296
297 bool hasPairedLoad(Type *LoadedType,
298 unsigned &RequiredAligment) const override;
299 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
300
Hao Liu7ec8ee32015-06-26 02:32:07 +0000301 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
302
303 bool lowerInterleavedLoad(LoadInst *LI,
304 ArrayRef<ShuffleVectorInst *> Shuffles,
305 ArrayRef<unsigned> Indices,
306 unsigned Factor) const override;
307 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
308 unsigned Factor) const override;
309
Tim Northover3b0846e2014-05-24 12:50:23 +0000310 bool isLegalAddImmediate(int64_t) const override;
311 bool isLegalICmpImmediate(int64_t) const override;
312
313 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
314 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
315 MachineFunction &MF) const override;
316
317 /// isLegalAddressingMode - Return true if the addressing mode represented
318 /// by AM is legal for this target, for a load/store of the specified type.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000319 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000320 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000321
322 /// \brief Return the cost of the scaling factor used in the addressing
323 /// mode represented by AM for this target, for a load/store
324 /// of the specified type.
325 /// If the AM is supported, the return value must be >= 0.
326 /// If the AM is not supported, it returns a negative value.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000327 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000328 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000329
330 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
331 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
332 /// expanded to FMAs when this method returns true, otherwise fmuladd is
333 /// expanded to fmul + fadd.
334 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
335
336 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
337
338 /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
339 bool isDesirableToCommuteWithShift(const SDNode *N) const override;
340
341 /// \brief Returns true if it is beneficial to convert a load of a constant
342 /// to just the constant itself.
343 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
344 Type *Ty) const override;
345
346 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
347 AtomicOrdering Ord) const override;
348 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
349 Value *Addr, AtomicOrdering Ord) const override;
350
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000351 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
352
Ahmed Bougacha52468672015-09-11 17:08:28 +0000353 TargetLoweringBase::AtomicExpansionKind
354 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
Robin Morisseted3d48f2014-09-03 21:29:59 +0000355 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
Ahmed Bougacha9d677132015-09-11 17:08:17 +0000356 TargetLoweringBase::AtomicExpansionKind
JF Bastienf14889e2015-03-04 15:47:57 +0000357 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000358
Ahmed Bougacha52468672015-09-11 17:08:28 +0000359 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
360
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000361 bool useLoadStackGuardNode() const override;
Chandler Carruth9d010ff2014-07-03 00:23:43 +0000362 TargetLoweringBase::LegalizeTypeAction
363 getPreferredVectorAction(EVT VT) const override;
364
Tim Northover3b0846e2014-05-24 12:50:23 +0000365private:
Quentin Colombet6843ac42015-03-31 20:52:32 +0000366 bool isExtFreeImpl(const Instruction *Ext) const override;
367
Tim Northover3b0846e2014-05-24 12:50:23 +0000368 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
369 /// make the right decision when generating code for different targets.
370 const AArch64Subtarget *Subtarget;
371
372 void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
373 void addDRTypeForNEON(MVT VT);
374 void addQRTypeForNEON(MVT VT);
375
376 SDValue
377 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
378 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
379 SelectionDAG &DAG,
380 SmallVectorImpl<SDValue> &InVals) const override;
381
382 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
383 SmallVectorImpl<SDValue> &InVals) const override;
384
385 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
386 CallingConv::ID CallConv, bool isVarArg,
387 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
388 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
389 bool isThisReturn, SDValue ThisVal) const;
390
Adhemerval Zanella7bc33192015-07-28 13:03:31 +0000391 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
392
Tim Northover3b0846e2014-05-24 12:50:23 +0000393 bool isEligibleForTailCallOptimization(
394 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
395 bool isCalleeStructRet, bool isCallerStructRet,
396 const SmallVectorImpl<ISD::OutputArg> &Outs,
397 const SmallVectorImpl<SDValue> &OutVals,
398 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
399
400 /// Finds the incoming stack arguments which overlap the given fixed stack
401 /// object and incorporates their load into the current chain. This prevents
402 /// an upcoming store from clobbering the stack argument before it's used.
403 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
404 MachineFrameInfo *MFI, int ClobberedFI) const;
405
406 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
407
408 bool IsTailCallConvention(CallingConv::ID CallCC) const;
409
410 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
411 SDValue &Chain) const;
412
413 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
414 bool isVarArg,
415 const SmallVectorImpl<ISD::OutputArg> &Outs,
416 LLVMContext &Context) const override;
417
418 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
419 const SmallVectorImpl<ISD::OutputArg> &Outs,
420 const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
421 SelectionDAG &DAG) const override;
422
423 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
424 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
425 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
426 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
Kristof Beylsaea84612015-03-04 09:12:08 +0000427 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
428 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000429 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
430 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
431 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
432 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
Matthias Braunb6ac8fa2015-04-07 17:33:05 +0000433 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
434 SDValue TVal, SDValue FVal, SDLoc dl,
435 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000436 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
437 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
438 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
439 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
440 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
441 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
442 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
443 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
444 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
445 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
446 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
447 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
448 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
449 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
450 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
451 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
452 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
453 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
454 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
455 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
456 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
457 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
458 RTLIB::Libcall Call) const;
459 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
460 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
461 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
462 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
463 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
466 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
467 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
468
Chad Rosier17020f92014-07-23 14:57:52 +0000469 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000470 std::vector<SDNode *> *Created) const override;
Sanjay Patel1dd15592015-07-28 23:05:48 +0000471 unsigned combineRepeatedFPDivisors() const override;
Chad Rosier17020f92014-07-23 14:57:52 +0000472
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000473 ConstraintType getConstraintType(StringRef Constraint) const override;
Pat Gavlina717f252015-07-09 17:40:29 +0000474 unsigned getRegisterByName(const char* RegName, EVT VT,
475 SelectionDAG &DAG) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000476
477 /// Examine constraint string and operand type and determine a weight value.
478 /// The operand object must already have been set up with the operand type.
479 ConstraintWeight
480 getSingleConstraintMatchWeight(AsmOperandInfo &info,
481 const char *constraint) const override;
482
483 std::pair<unsigned, const TargetRegisterClass *>
Eric Christopher11e4df72015-02-26 22:38:43 +0000484 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000485 StringRef Constraint, MVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000486 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
487 std::vector<SDValue> &Ops,
488 SelectionDAG &DAG) const override;
489
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000490 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000491 if (ConstraintCode == "Q")
492 return InlineAsm::Constraint_Q;
493 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
494 // followed by llvm_unreachable so we'll leave them unimplemented in
495 // the backend for now.
496 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
Daniel Sandersbf5b80f2015-03-16 13:13:41 +0000497 }
498
Tim Northover3b0846e2014-05-24 12:50:23 +0000499 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
500 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
501 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
502 ISD::MemIndexedMode &AM, bool &IsInc,
503 SelectionDAG &DAG) const;
504 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
505 ISD::MemIndexedMode &AM,
506 SelectionDAG &DAG) const override;
507 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
508 SDValue &Offset, ISD::MemIndexedMode &AM,
509 SelectionDAG &DAG) const override;
510
511 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
512 SelectionDAG &DAG) const override;
Tim Northover3c55cca2014-11-27 21:02:42 +0000513
514 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
515 CallingConv::ID CallConv,
Craig Topper44586dc2014-11-28 03:58:26 +0000516 bool isVarArg) const override;
Matthias Braunaf7d7702015-07-16 20:02:37 +0000517
518 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000519};
520
521namespace AArch64 {
522FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
523 const TargetLibraryInfo *libInfo);
524} // end namespace AArch64
525
526} // end namespace llvm
527
Benjamin Kramera7c40ef2014-08-13 16:26:38 +0000528#endif