blob: 2f5b5f36718e5b9cb575d8446487374873810281 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that AArch64 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
Benjamin Kramera7c40ef2014-08-13 16:26:38 +000015#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
Tim Northover3b0846e2014-05-24 12:50:23 +000017
Joseph Tremouletf748c892015-11-07 01:11:31 +000018#include "AArch64.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000019#include "llvm/CodeGen/CallingConvLower.h"
20#include "llvm/CodeGen/SelectionDAG.h"
21#include "llvm/IR/CallingConv.h"
Chad Rosier54390052015-02-23 19:15:16 +000022#include "llvm/IR/Instruction.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000023#include "llvm/Target/TargetLowering.h"
24
25namespace llvm {
26
27namespace AArch64ISD {
28
Matthias Braund04893f2015-05-07 21:33:59 +000029enum NodeType : unsigned {
Tim Northover3b0846e2014-05-24 12:50:23 +000030 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32 CALL, // Function call.
33
Kristof Beylsaea84612015-03-04 09:12:08 +000034 // Produces the full sequence of instructions for getting the thread pointer
35 // offset of a variable into X0, using the TLSDesc model.
36 TLSDESC_CALLSEQ,
Tim Northover3b0846e2014-05-24 12:50:23 +000037 ADRP, // Page address of a TargetGlobalAddress operand.
38 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39 LOADgot, // Load from automatically generated descriptor (e.g. Global
40 // Offset Table, TLS record).
41 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42 BRCOND, // Conditional branch instruction; "b.cond".
43 CSEL,
44 FCSEL, // Conditional move instruction.
45 CSINV, // Conditional select invert.
46 CSNEG, // Conditional select negate.
47 CSINC, // Conditional select increment.
48
49 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50 // ELF.
51 THREAD_POINTER,
52 ADC,
53 SBC, // adc, sbc instructions
54
55 // Arithmetic instructions which write flags.
56 ADDS,
57 SUBS,
58 ADCS,
59 SBCS,
60 ANDS,
61
Matthias Braunaf7d7702015-07-16 20:02:37 +000062 // Conditional compares. Operands: left,right,falsecc,cc,flags
63 CCMP,
64 CCMN,
65 FCCMP,
66
Tim Northover3b0846e2014-05-24 12:50:23 +000067 // Floating point comparison
68 FCMP,
69
Tim Northover3b0846e2014-05-24 12:50:23 +000070 // Scalar extract
71 EXTR,
72
73 // Scalar-to-vector duplication
74 DUP,
75 DUPLANE8,
76 DUPLANE16,
77 DUPLANE32,
78 DUPLANE64,
79
80 // Vector immedate moves
81 MOVI,
82 MOVIshift,
83 MOVIedit,
84 MOVImsl,
85 FMOV,
86 MVNIshift,
87 MVNImsl,
88
89 // Vector immediate ops
90 BICi,
91 ORRi,
92
93 // Vector bit select: similar to ISD::VSELECT but not all bits within an
94 // element must be identical.
95 BSL,
96
97 // Vector arithmetic negation
98 NEG,
99
100 // Vector shuffles
101 ZIP1,
102 ZIP2,
103 UZP1,
104 UZP2,
105 TRN1,
106 TRN2,
107 REV16,
108 REV32,
109 REV64,
110 EXT,
111
112 // Vector shift by scalar
113 VSHL,
114 VLSHR,
115 VASHR,
116
117 // Vector shift by scalar (again)
118 SQSHL_I,
119 UQSHL_I,
120 SQSHLU_I,
121 SRSHR_I,
122 URSHR_I,
123
124 // Vector comparisons
125 CMEQ,
126 CMGE,
127 CMGT,
128 CMHI,
129 CMHS,
130 FCMEQ,
131 FCMGE,
132 FCMGT,
133
134 // Vector zero comparisons
135 CMEQz,
136 CMGEz,
137 CMGTz,
138 CMLEz,
139 CMLTz,
140 FCMEQz,
141 FCMGEz,
142 FCMGTz,
143 FCMLEz,
144 FCMLTz,
145
Ahmed Bougachafab58922015-03-10 20:45:38 +0000146 // Vector across-lanes addition
147 // Only the lower result lane is defined.
148 SADDV,
149 UADDV,
150
151 // Vector across-lanes min/max
152 // Only the lower result lane is defined.
153 SMINV,
154 UMINV,
155 SMAXV,
156 UMAXV,
157
Tim Northover3b0846e2014-05-24 12:50:23 +0000158 // Vector bitwise negation
159 NOT,
160
161 // Vector bitwise selection
162 BIT,
163
164 // Compare-and-branch
165 CBZ,
166 CBNZ,
167 TBZ,
168 TBNZ,
169
170 // Tail calls
171 TC_RETURN,
172
173 // Custom prefetch handling
174 PREFETCH,
175
176 // {s|u}int to FP within a FP register.
177 SITOF,
178 UITOF,
179
Tim Northoverbb72e6c2014-09-04 09:46:14 +0000180 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181 /// world w.r.t vectors; which causes additional REV instructions to be
182 /// generated to compensate for the byte-swapping. But sometimes we do
183 /// need to re-interpret the data in SIMD vector registers in big-endian
184 /// mode without emitting such REV instructions.
185 NVCAST,
186
Chad Rosierd9d0f862014-10-08 02:31:24 +0000187 SMULL,
188 UMULL,
189
Tim Northover3b0846e2014-05-24 12:50:23 +0000190 // NEON Load/Store with post-increment base updates
191 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
192 LD3post,
193 LD4post,
194 ST2post,
195 ST3post,
196 ST4post,
197 LD1x2post,
198 LD1x3post,
199 LD1x4post,
200 ST1x2post,
201 ST1x3post,
202 ST1x4post,
203 LD1DUPpost,
204 LD2DUPpost,
205 LD3DUPpost,
206 LD4DUPpost,
207 LD1LANEpost,
208 LD2LANEpost,
209 LD3LANEpost,
210 LD4LANEpost,
211 ST2LANEpost,
212 ST3LANEpost,
213 ST4LANEpost
214};
215
216} // end namespace AArch64ISD
217
218class AArch64Subtarget;
219class AArch64TargetMachine;
220
221class AArch64TargetLowering : public TargetLowering {
Tim Northover3b0846e2014-05-24 12:50:23 +0000222public:
Eric Christopher905f12d2015-01-29 00:19:42 +0000223 explicit AArch64TargetLowering(const TargetMachine &TM,
224 const AArch64Subtarget &STI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000225
Robin Morisset039781e2014-08-29 21:53:01 +0000226 /// Selects the correct CCAssignFn for a given CallingConvention value.
Tim Northover3b0846e2014-05-24 12:50:23 +0000227 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
228
Sanjay Patel776e59b2015-11-09 19:18:26 +0000229 /// Determine which of the bits specified in Mask are known to be either zero
230 /// or one and return them in the KnownZero/KnownOne bitsets.
Tim Northover3b0846e2014-05-24 12:50:23 +0000231 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
232 APInt &KnownOne, const SelectionDAG &DAG,
233 unsigned Depth = 0) const override;
234
Mehdi Aminieaabc512015-07-09 15:12:23 +0000235 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000236
Sanjay Patel776e59b2015-11-09 19:18:26 +0000237 /// Returns true if the target allows unaligned memory accesses of the
238 /// specified type.
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000239 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
240 unsigned Align = 1,
Akira Hatanakaf53b0402015-07-29 14:17:26 +0000241 bool *Fast = nullptr) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000242
Sanjay Patel776e59b2015-11-09 19:18:26 +0000243 /// Provide custom lowering hooks for some operations.
Tim Northover3b0846e2014-05-24 12:50:23 +0000244 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
245
246 const char *getTargetNodeName(unsigned Opcode) const override;
247
248 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
249
Tim Northover3b0846e2014-05-24 12:50:23 +0000250 /// Returns true if a cast between SrcAS and DestAS is a noop.
251 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
252 // Addrspacecasts are always noops.
253 return true;
254 }
255
Sanjay Patel776e59b2015-11-09 19:18:26 +0000256 /// This method returns a target specific FastISel object, or null if the
257 /// target does not support "fast" ISel.
Tim Northover3b0846e2014-05-24 12:50:23 +0000258 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
259 const TargetLibraryInfo *libInfo) const override;
260
261 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
262
263 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
264
Sanjay Patel776e59b2015-11-09 19:18:26 +0000265 /// Return true if the given shuffle mask can be codegen'd directly, or if it
266 /// should be stack expanded.
Tim Northover3b0846e2014-05-24 12:50:23 +0000267 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
268
Sanjay Patel776e59b2015-11-09 19:18:26 +0000269 /// Return the ISD::SETCC ValueType.
Mehdi Amini44ede332015-07-09 02:09:04 +0000270 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
271 EVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000272
273 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
274
275 MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
276 MachineBasicBlock *BB) const;
277
278 MachineBasicBlock *
279 EmitInstrWithCustomInserter(MachineInstr *MI,
280 MachineBasicBlock *MBB) const override;
281
282 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
283 unsigned Intrinsic) const override;
284
285 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
286 bool isTruncateFree(EVT VT1, EVT VT2) const override;
287
Chad Rosier54390052015-02-23 19:15:16 +0000288 bool isProfitableToHoist(Instruction *I) const override;
289
Tim Northover3b0846e2014-05-24 12:50:23 +0000290 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
291 bool isZExtFree(EVT VT1, EVT VT2) const override;
292 bool isZExtFree(SDValue Val, EVT VT2) const override;
293
294 bool hasPairedLoad(Type *LoadedType,
295 unsigned &RequiredAligment) const override;
296 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
297
Hao Liu7ec8ee32015-06-26 02:32:07 +0000298 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
299
300 bool lowerInterleavedLoad(LoadInst *LI,
301 ArrayRef<ShuffleVectorInst *> Shuffles,
302 ArrayRef<unsigned> Indices,
303 unsigned Factor) const override;
304 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
305 unsigned Factor) const override;
306
Tim Northover3b0846e2014-05-24 12:50:23 +0000307 bool isLegalAddImmediate(int64_t) const override;
308 bool isLegalICmpImmediate(int64_t) const override;
309
310 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
311 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
312 MachineFunction &MF) const override;
313
Sanjay Patel776e59b2015-11-09 19:18:26 +0000314 /// Return true if the addressing mode represented by AM is legal for this
315 /// target, for a load/store of the specified type.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000316 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000317 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000318
319 /// \brief Return the cost of the scaling factor used in the addressing
320 /// mode represented by AM for this target, for a load/store
321 /// of the specified type.
322 /// If the AM is supported, the return value must be >= 0.
323 /// If the AM is not supported, it returns a negative value.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000324 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000325 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000326
Sanjay Patel776e59b2015-11-09 19:18:26 +0000327 /// Return true if an FMA operation is faster than a pair of fmul and fadd
328 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
329 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
Tim Northover3b0846e2014-05-24 12:50:23 +0000330 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
331
332 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
333
334 /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
335 bool isDesirableToCommuteWithShift(const SDNode *N) const override;
336
337 /// \brief Returns true if it is beneficial to convert a load of a constant
338 /// to just the constant itself.
339 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
340 Type *Ty) const override;
341
342 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
343 AtomicOrdering Ord) const override;
344 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
345 Value *Addr, AtomicOrdering Ord) const override;
346
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000347 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
348
Ahmed Bougacha52468672015-09-11 17:08:28 +0000349 TargetLoweringBase::AtomicExpansionKind
350 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
Robin Morisseted3d48f2014-09-03 21:29:59 +0000351 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
Ahmed Bougacha9d677132015-09-11 17:08:17 +0000352 TargetLoweringBase::AtomicExpansionKind
JF Bastienf14889e2015-03-04 15:47:57 +0000353 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000354
Ahmed Bougacha52468672015-09-11 17:08:28 +0000355 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
356
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000357 bool useLoadStackGuardNode() const override;
Chandler Carruth9d010ff2014-07-03 00:23:43 +0000358 TargetLoweringBase::LegalizeTypeAction
359 getPreferredVectorAction(EVT VT) const override;
360
Evgeniy Stepanovd1aad262015-10-26 18:28:25 +0000361 /// If the target has a standard location for the unsafe stack pointer,
362 /// returns the address of that location. Otherwise, returns nullptr.
363 Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
364
Joseph Tremouletf748c892015-11-07 01:11:31 +0000365 /// If a physical register, this returns the register that receives the
366 /// exception address on entry to an EH pad.
367 unsigned
368 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
369 // FIXME: This is a guess. Has this been defined yet?
370 return AArch64::X0;
371 }
372
373 /// If a physical register, this returns the register that receives the
374 /// exception typeid on entry to a landing pad.
375 unsigned
376 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
377 // FIXME: This is a guess. Has this been defined yet?
378 return AArch64::X1;
379 }
380
Haicheng Wu6a6bc752016-03-28 18:17:07 +0000381 bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
382
Sanjay Patel241c31f2015-11-10 18:11:37 +0000383 bool isCheapToSpeculateCttz() const override {
384 return true;
385 }
386
387 bool isCheapToSpeculateCtlz() const override {
388 return true;
389 }
Manman Rencbe4f942015-12-16 21:04:19 +0000390 bool supportSplitCSR(MachineFunction *MF) const override {
391 return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
392 MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
393 }
394 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
395 void insertCopiesSplitCSR(
396 MachineBasicBlock *Entry,
397 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
Sanjay Patel241c31f2015-11-10 18:11:37 +0000398
Tim Northover3b0846e2014-05-24 12:50:23 +0000399private:
Quentin Colombet6843ac42015-03-31 20:52:32 +0000400 bool isExtFreeImpl(const Instruction *Ext) const override;
401
Sanjay Patel776e59b2015-11-09 19:18:26 +0000402 /// Keep a pointer to the AArch64Subtarget around so that we can
Tim Northover3b0846e2014-05-24 12:50:23 +0000403 /// make the right decision when generating code for different targets.
404 const AArch64Subtarget *Subtarget;
405
406 void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
407 void addDRTypeForNEON(MVT VT);
408 void addQRTypeForNEON(MVT VT);
409
410 SDValue
411 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
412 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
413 SelectionDAG &DAG,
414 SmallVectorImpl<SDValue> &InVals) const override;
415
416 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
417 SmallVectorImpl<SDValue> &InVals) const override;
418
419 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
420 CallingConv::ID CallConv, bool isVarArg,
421 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
422 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
423 bool isThisReturn, SDValue ThisVal) const;
424
Adhemerval Zanella7bc33192015-07-28 13:03:31 +0000425 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
426
Tim Northover3b0846e2014-05-24 12:50:23 +0000427 bool isEligibleForTailCallOptimization(
428 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
Tim Northover3b0846e2014-05-24 12:50:23 +0000429 const SmallVectorImpl<ISD::OutputArg> &Outs,
430 const SmallVectorImpl<SDValue> &OutVals,
431 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
432
433 /// Finds the incoming stack arguments which overlap the given fixed stack
434 /// object and incorporates their load into the current chain. This prevents
435 /// an upcoming store from clobbering the stack argument before it's used.
436 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
437 MachineFrameInfo *MFI, int ClobberedFI) const;
438
439 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
440
441 bool IsTailCallConvention(CallingConv::ID CallCC) const;
442
443 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
444 SDValue &Chain) const;
445
446 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
447 bool isVarArg,
448 const SmallVectorImpl<ISD::OutputArg> &Outs,
449 LLVMContext &Context) const override;
450
451 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
452 const SmallVectorImpl<ISD::OutputArg> &Outs,
453 const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
454 SelectionDAG &DAG) const override;
455
456 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
457 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
458 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
459 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
Kristof Beylsaea84612015-03-04 09:12:08 +0000460 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
461 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000462 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
463 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
Matthias Braunb6ac8fa2015-04-07 17:33:05 +0000466 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
467 SDValue TVal, SDValue FVal, SDLoc dl,
468 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000469 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
470 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
471 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
472 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
473 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
474 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
475 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
476 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
477 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
478 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
479 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
480 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
481 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
482 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
483 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
484 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
485 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
486 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
487 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
488 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
489 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
490 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
491 RTLIB::Libcall Call) const;
Balaram Makam92431702016-02-01 19:13:07 +0000492 SDValue LowerAND(SDValue Op, SelectionDAG &DAG) const;
493 SDValue LowerOR(SDValue Op, SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000494 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
495 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
496 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
497 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
498 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
499 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
500 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
501 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
502 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
503
Chad Rosier17020f92014-07-23 14:57:52 +0000504 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000505 std::vector<SDNode *> *Created) const override;
Sanjay Patel1dd15592015-07-28 23:05:48 +0000506 unsigned combineRepeatedFPDivisors() const override;
Chad Rosier17020f92014-07-23 14:57:52 +0000507
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000508 ConstraintType getConstraintType(StringRef Constraint) const override;
Pat Gavlina717f252015-07-09 17:40:29 +0000509 unsigned getRegisterByName(const char* RegName, EVT VT,
510 SelectionDAG &DAG) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000511
512 /// Examine constraint string and operand type and determine a weight value.
513 /// The operand object must already have been set up with the operand type.
514 ConstraintWeight
515 getSingleConstraintMatchWeight(AsmOperandInfo &info,
516 const char *constraint) const override;
517
518 std::pair<unsigned, const TargetRegisterClass *>
Eric Christopher11e4df72015-02-26 22:38:43 +0000519 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000520 StringRef Constraint, MVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000521 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
522 std::vector<SDValue> &Ops,
523 SelectionDAG &DAG) const override;
524
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000525 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000526 if (ConstraintCode == "Q")
527 return InlineAsm::Constraint_Q;
528 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
529 // followed by llvm_unreachable so we'll leave them unimplemented in
530 // the backend for now.
531 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
Daniel Sandersbf5b80f2015-03-16 13:13:41 +0000532 }
533
Tim Northover3b0846e2014-05-24 12:50:23 +0000534 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
535 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
536 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
537 ISD::MemIndexedMode &AM, bool &IsInc,
538 SelectionDAG &DAG) const;
539 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
540 ISD::MemIndexedMode &AM,
541 SelectionDAG &DAG) const override;
542 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
543 SDValue &Offset, ISD::MemIndexedMode &AM,
544 SelectionDAG &DAG) const override;
545
546 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
547 SelectionDAG &DAG) const override;
Tim Northover3c55cca2014-11-27 21:02:42 +0000548
549 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
550 CallingConv::ID CallConv,
Craig Topper44586dc2014-11-28 03:58:26 +0000551 bool isVarArg) const override;
Matthias Braunaf7d7702015-07-16 20:02:37 +0000552
553 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000554};
555
556namespace AArch64 {
557FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
558 const TargetLibraryInfo *libInfo);
559} // end namespace AArch64
560
561} // end namespace llvm
562
Benjamin Kramera7c40ef2014-08-13 16:26:38 +0000563#endif