blob: cf1c12292fc8083200b861a7ae8bcb3e4842cd3c [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that AArch64 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
Benjamin Kramera7c40ef2014-08-13 16:26:38 +000015#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
Tim Northover3b0846e2014-05-24 12:50:23 +000017
Joseph Tremouletf748c892015-11-07 01:11:31 +000018#include "AArch64.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000019#include "llvm/CodeGen/CallingConvLower.h"
20#include "llvm/CodeGen/SelectionDAG.h"
21#include "llvm/IR/CallingConv.h"
Chad Rosier54390052015-02-23 19:15:16 +000022#include "llvm/IR/Instruction.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000023#include "llvm/Target/TargetLowering.h"
24
25namespace llvm {
26
27namespace AArch64ISD {
28
Matthias Braund04893f2015-05-07 21:33:59 +000029enum NodeType : unsigned {
Tim Northover3b0846e2014-05-24 12:50:23 +000030 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32 CALL, // Function call.
33
Kristof Beylsaea84612015-03-04 09:12:08 +000034 // Produces the full sequence of instructions for getting the thread pointer
35 // offset of a variable into X0, using the TLSDesc model.
36 TLSDESC_CALLSEQ,
Tim Northover3b0846e2014-05-24 12:50:23 +000037 ADRP, // Page address of a TargetGlobalAddress operand.
38 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39 LOADgot, // Load from automatically generated descriptor (e.g. Global
40 // Offset Table, TLS record).
41 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42 BRCOND, // Conditional branch instruction; "b.cond".
43 CSEL,
44 FCSEL, // Conditional move instruction.
45 CSINV, // Conditional select invert.
46 CSNEG, // Conditional select negate.
47 CSINC, // Conditional select increment.
48
49 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50 // ELF.
51 THREAD_POINTER,
52 ADC,
53 SBC, // adc, sbc instructions
54
55 // Arithmetic instructions which write flags.
56 ADDS,
57 SUBS,
58 ADCS,
59 SBCS,
60 ANDS,
61
Matthias Braunaf7d7702015-07-16 20:02:37 +000062 // Conditional compares. Operands: left,right,falsecc,cc,flags
63 CCMP,
64 CCMN,
65 FCCMP,
66
Tim Northover3b0846e2014-05-24 12:50:23 +000067 // Floating point comparison
68 FCMP,
69
Tim Northover3b0846e2014-05-24 12:50:23 +000070 // Scalar extract
71 EXTR,
72
73 // Scalar-to-vector duplication
74 DUP,
75 DUPLANE8,
76 DUPLANE16,
77 DUPLANE32,
78 DUPLANE64,
79
80 // Vector immedate moves
81 MOVI,
82 MOVIshift,
83 MOVIedit,
84 MOVImsl,
85 FMOV,
86 MVNIshift,
87 MVNImsl,
88
89 // Vector immediate ops
90 BICi,
91 ORRi,
92
93 // Vector bit select: similar to ISD::VSELECT but not all bits within an
94 // element must be identical.
95 BSL,
96
97 // Vector arithmetic negation
98 NEG,
99
100 // Vector shuffles
101 ZIP1,
102 ZIP2,
103 UZP1,
104 UZP2,
105 TRN1,
106 TRN2,
107 REV16,
108 REV32,
109 REV64,
110 EXT,
111
112 // Vector shift by scalar
113 VSHL,
114 VLSHR,
115 VASHR,
116
117 // Vector shift by scalar (again)
118 SQSHL_I,
119 UQSHL_I,
120 SQSHLU_I,
121 SRSHR_I,
122 URSHR_I,
123
124 // Vector comparisons
125 CMEQ,
126 CMGE,
127 CMGT,
128 CMHI,
129 CMHS,
130 FCMEQ,
131 FCMGE,
132 FCMGT,
133
134 // Vector zero comparisons
135 CMEQz,
136 CMGEz,
137 CMGTz,
138 CMLEz,
139 CMLTz,
140 FCMEQz,
141 FCMGEz,
142 FCMGTz,
143 FCMLEz,
144 FCMLTz,
145
Ahmed Bougachafab58922015-03-10 20:45:38 +0000146 // Vector across-lanes addition
147 // Only the lower result lane is defined.
148 SADDV,
149 UADDV,
150
151 // Vector across-lanes min/max
152 // Only the lower result lane is defined.
153 SMINV,
154 UMINV,
155 SMAXV,
156 UMAXV,
157
Tim Northover3b0846e2014-05-24 12:50:23 +0000158 // Vector bitwise negation
159 NOT,
160
161 // Vector bitwise selection
162 BIT,
163
164 // Compare-and-branch
165 CBZ,
166 CBNZ,
167 TBZ,
168 TBNZ,
169
170 // Tail calls
171 TC_RETURN,
172
173 // Custom prefetch handling
174 PREFETCH,
175
176 // {s|u}int to FP within a FP register.
177 SITOF,
178 UITOF,
179
Tim Northoverbb72e6c2014-09-04 09:46:14 +0000180 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181 /// world w.r.t vectors; which causes additional REV instructions to be
182 /// generated to compensate for the byte-swapping. But sometimes we do
183 /// need to re-interpret the data in SIMD vector registers in big-endian
184 /// mode without emitting such REV instructions.
185 NVCAST,
186
Chad Rosierd9d0f862014-10-08 02:31:24 +0000187 SMULL,
188 UMULL,
189
Tim Northover3b0846e2014-05-24 12:50:23 +0000190 // NEON Load/Store with post-increment base updates
191 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
192 LD3post,
193 LD4post,
194 ST2post,
195 ST3post,
196 ST4post,
197 LD1x2post,
198 LD1x3post,
199 LD1x4post,
200 ST1x2post,
201 ST1x3post,
202 ST1x4post,
203 LD1DUPpost,
204 LD2DUPpost,
205 LD3DUPpost,
206 LD4DUPpost,
207 LD1LANEpost,
208 LD2LANEpost,
209 LD3LANEpost,
210 LD4LANEpost,
211 ST2LANEpost,
212 ST3LANEpost,
213 ST4LANEpost
214};
215
216} // end namespace AArch64ISD
217
218class AArch64Subtarget;
219class AArch64TargetMachine;
220
221class AArch64TargetLowering : public TargetLowering {
Tim Northover3b0846e2014-05-24 12:50:23 +0000222public:
Eric Christopher905f12d2015-01-29 00:19:42 +0000223 explicit AArch64TargetLowering(const TargetMachine &TM,
224 const AArch64Subtarget &STI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000225
Robin Morisset039781e2014-08-29 21:53:01 +0000226 /// Selects the correct CCAssignFn for a given CallingConvention value.
Tim Northover3b0846e2014-05-24 12:50:23 +0000227 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
228
Sanjay Patel776e59b2015-11-09 19:18:26 +0000229 /// Determine which of the bits specified in Mask are known to be either zero
230 /// or one and return them in the KnownZero/KnownOne bitsets.
Tim Northover3b0846e2014-05-24 12:50:23 +0000231 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
232 APInt &KnownOne, const SelectionDAG &DAG,
233 unsigned Depth = 0) const override;
234
Mehdi Aminieaabc512015-07-09 15:12:23 +0000235 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000236
Sanjay Patel776e59b2015-11-09 19:18:26 +0000237 /// Returns true if the target allows unaligned memory accesses of the
238 /// specified type.
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000239 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
240 unsigned Align = 1,
Akira Hatanakaf53b0402015-07-29 14:17:26 +0000241 bool *Fast = nullptr) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000242
Sanjay Patel776e59b2015-11-09 19:18:26 +0000243 /// Provide custom lowering hooks for some operations.
Tim Northover3b0846e2014-05-24 12:50:23 +0000244 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
245
246 const char *getTargetNodeName(unsigned Opcode) const override;
247
248 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
249
Tim Northover3b0846e2014-05-24 12:50:23 +0000250 /// Returns true if a cast between SrcAS and DestAS is a noop.
251 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
252 // Addrspacecasts are always noops.
253 return true;
254 }
255
Sanjay Patel776e59b2015-11-09 19:18:26 +0000256 /// This method returns a target specific FastISel object, or null if the
257 /// target does not support "fast" ISel.
Tim Northover3b0846e2014-05-24 12:50:23 +0000258 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
259 const TargetLibraryInfo *libInfo) const override;
260
261 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
262
263 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
264
Sanjay Patel776e59b2015-11-09 19:18:26 +0000265 /// Return true if the given shuffle mask can be codegen'd directly, or if it
266 /// should be stack expanded.
Tim Northover3b0846e2014-05-24 12:50:23 +0000267 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
268
Sanjay Patel776e59b2015-11-09 19:18:26 +0000269 /// Return the ISD::SETCC ValueType.
Mehdi Amini44ede332015-07-09 02:09:04 +0000270 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
271 EVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000272
273 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
274
275 MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
276 MachineBasicBlock *BB) const;
277
278 MachineBasicBlock *
279 EmitInstrWithCustomInserter(MachineInstr *MI,
280 MachineBasicBlock *MBB) const override;
281
282 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
283 unsigned Intrinsic) const override;
284
285 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
286 bool isTruncateFree(EVT VT1, EVT VT2) const override;
287
Chad Rosier54390052015-02-23 19:15:16 +0000288 bool isProfitableToHoist(Instruction *I) const override;
289
Tim Northover3b0846e2014-05-24 12:50:23 +0000290 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
291 bool isZExtFree(EVT VT1, EVT VT2) const override;
292 bool isZExtFree(SDValue Val, EVT VT2) const override;
293
294 bool hasPairedLoad(Type *LoadedType,
295 unsigned &RequiredAligment) const override;
296 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
297
Hao Liu7ec8ee32015-06-26 02:32:07 +0000298 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
299
300 bool lowerInterleavedLoad(LoadInst *LI,
301 ArrayRef<ShuffleVectorInst *> Shuffles,
302 ArrayRef<unsigned> Indices,
303 unsigned Factor) const override;
304 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
305 unsigned Factor) const override;
306
Tim Northover3b0846e2014-05-24 12:50:23 +0000307 bool isLegalAddImmediate(int64_t) const override;
308 bool isLegalICmpImmediate(int64_t) const override;
309
310 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
311 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
312 MachineFunction &MF) const override;
313
Sanjay Patel776e59b2015-11-09 19:18:26 +0000314 /// Return true if the addressing mode represented by AM is legal for this
315 /// target, for a load/store of the specified type.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000316 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000317 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000318
319 /// \brief Return the cost of the scaling factor used in the addressing
320 /// mode represented by AM for this target, for a load/store
321 /// of the specified type.
322 /// If the AM is supported, the return value must be >= 0.
323 /// If the AM is not supported, it returns a negative value.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000324 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000325 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000326
Sanjay Patel776e59b2015-11-09 19:18:26 +0000327 /// Return true if an FMA operation is faster than a pair of fmul and fadd
328 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
329 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
Tim Northover3b0846e2014-05-24 12:50:23 +0000330 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
331
332 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
333
334 /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
335 bool isDesirableToCommuteWithShift(const SDNode *N) const override;
336
337 /// \brief Returns true if it is beneficial to convert a load of a constant
338 /// to just the constant itself.
339 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
340 Type *Ty) const override;
341
342 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
343 AtomicOrdering Ord) const override;
344 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
345 Value *Addr, AtomicOrdering Ord) const override;
346
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000347 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
348
Ahmed Bougacha52468672015-09-11 17:08:28 +0000349 TargetLoweringBase::AtomicExpansionKind
350 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
Robin Morisseted3d48f2014-09-03 21:29:59 +0000351 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
Ahmed Bougacha9d677132015-09-11 17:08:17 +0000352 TargetLoweringBase::AtomicExpansionKind
JF Bastienf14889e2015-03-04 15:47:57 +0000353 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000354
Ahmed Bougacha52468672015-09-11 17:08:28 +0000355 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
356
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000357 bool useLoadStackGuardNode() const override;
Chandler Carruth9d010ff2014-07-03 00:23:43 +0000358 TargetLoweringBase::LegalizeTypeAction
359 getPreferredVectorAction(EVT VT) const override;
360
Evgeniy Stepanovdde29e22016-04-05 22:41:50 +0000361 /// If the target has a standard location for the stack protector cookie,
362 /// returns the address of that location. Otherwise, returns nullptr.
Tim Shen00127562016-04-08 21:26:31 +0000363 Value *getIRStackGuard(IRBuilder<> &IRB) const override;
Evgeniy Stepanovdde29e22016-04-05 22:41:50 +0000364
Evgeniy Stepanovd1aad262015-10-26 18:28:25 +0000365 /// If the target has a standard location for the unsafe stack pointer,
366 /// returns the address of that location. Otherwise, returns nullptr.
367 Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
368
Joseph Tremouletf748c892015-11-07 01:11:31 +0000369 /// If a physical register, this returns the register that receives the
370 /// exception address on entry to an EH pad.
371 unsigned
372 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
373 // FIXME: This is a guess. Has this been defined yet?
374 return AArch64::X0;
375 }
376
377 /// If a physical register, this returns the register that receives the
378 /// exception typeid on entry to a landing pad.
379 unsigned
380 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
381 // FIXME: This is a guess. Has this been defined yet?
382 return AArch64::X1;
383 }
384
Haicheng Wu6a6bc752016-03-28 18:17:07 +0000385 bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
386
Sanjay Patel241c31f2015-11-10 18:11:37 +0000387 bool isCheapToSpeculateCttz() const override {
388 return true;
389 }
390
391 bool isCheapToSpeculateCtlz() const override {
392 return true;
393 }
Manman Rencbe4f942015-12-16 21:04:19 +0000394 bool supportSplitCSR(MachineFunction *MF) const override {
395 return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
396 MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
397 }
398 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
399 void insertCopiesSplitCSR(
400 MachineBasicBlock *Entry,
401 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
Sanjay Patel241c31f2015-11-10 18:11:37 +0000402
Manman Ren57518142016-04-11 21:08:06 +0000403 bool supportSwiftError() const override {
404 return true;
405 }
406
Tim Northover3b0846e2014-05-24 12:50:23 +0000407private:
Quentin Colombet6843ac42015-03-31 20:52:32 +0000408 bool isExtFreeImpl(const Instruction *Ext) const override;
409
Sanjay Patel776e59b2015-11-09 19:18:26 +0000410 /// Keep a pointer to the AArch64Subtarget around so that we can
Tim Northover3b0846e2014-05-24 12:50:23 +0000411 /// make the right decision when generating code for different targets.
412 const AArch64Subtarget *Subtarget;
413
Craig Topper18e69f42016-04-15 06:20:21 +0000414 void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
Tim Northover3b0846e2014-05-24 12:50:23 +0000415 void addDRTypeForNEON(MVT VT);
416 void addQRTypeForNEON(MVT VT);
417
418 SDValue
419 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
420 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
421 SelectionDAG &DAG,
422 SmallVectorImpl<SDValue> &InVals) const override;
423
424 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
425 SmallVectorImpl<SDValue> &InVals) const override;
426
427 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
428 CallingConv::ID CallConv, bool isVarArg,
429 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
430 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
431 bool isThisReturn, SDValue ThisVal) const;
432
Adhemerval Zanella7bc33192015-07-28 13:03:31 +0000433 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
434
Tim Northover3b0846e2014-05-24 12:50:23 +0000435 bool isEligibleForTailCallOptimization(
436 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
Tim Northover3b0846e2014-05-24 12:50:23 +0000437 const SmallVectorImpl<ISD::OutputArg> &Outs,
438 const SmallVectorImpl<SDValue> &OutVals,
439 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
440
441 /// Finds the incoming stack arguments which overlap the given fixed stack
442 /// object and incorporates their load into the current chain. This prevents
443 /// an upcoming store from clobbering the stack argument before it's used.
444 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
445 MachineFrameInfo *MFI, int ClobberedFI) const;
446
447 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
448
449 bool IsTailCallConvention(CallingConv::ID CallCC) const;
450
451 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
452 SDValue &Chain) const;
453
454 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
455 bool isVarArg,
456 const SmallVectorImpl<ISD::OutputArg> &Outs,
457 LLVMContext &Context) const override;
458
459 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
460 const SmallVectorImpl<ISD::OutputArg> &Outs,
461 const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
462 SelectionDAG &DAG) const override;
463
464 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
466 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
467 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
Kristof Beylsaea84612015-03-04 09:12:08 +0000468 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
469 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000470 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
471 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
472 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
473 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
Matthias Braunb6ac8fa2015-04-07 17:33:05 +0000474 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
475 SDValue TVal, SDValue FVal, SDLoc dl,
476 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000477 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
478 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
479 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
480 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
481 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
482 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
483 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
484 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
485 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
486 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
487 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
488 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
489 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
490 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
491 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
492 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
493 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
494 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
495 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
496 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
497 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
498 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
499 RTLIB::Libcall Call) const;
Balaram Makam92431702016-02-01 19:13:07 +0000500 SDValue LowerAND(SDValue Op, SelectionDAG &DAG) const;
501 SDValue LowerOR(SDValue Op, SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000502 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
503 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
504 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
505 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
506 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
507 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
508 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
509 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
510 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
511
Chad Rosier17020f92014-07-23 14:57:52 +0000512 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000513 std::vector<SDNode *> *Created) const override;
Sanjay Patel1dd15592015-07-28 23:05:48 +0000514 unsigned combineRepeatedFPDivisors() const override;
Chad Rosier17020f92014-07-23 14:57:52 +0000515
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000516 ConstraintType getConstraintType(StringRef Constraint) const override;
Pat Gavlina717f252015-07-09 17:40:29 +0000517 unsigned getRegisterByName(const char* RegName, EVT VT,
518 SelectionDAG &DAG) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000519
520 /// Examine constraint string and operand type and determine a weight value.
521 /// The operand object must already have been set up with the operand type.
522 ConstraintWeight
523 getSingleConstraintMatchWeight(AsmOperandInfo &info,
524 const char *constraint) const override;
525
526 std::pair<unsigned, const TargetRegisterClass *>
Eric Christopher11e4df72015-02-26 22:38:43 +0000527 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000528 StringRef Constraint, MVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000529 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
530 std::vector<SDValue> &Ops,
531 SelectionDAG &DAG) const override;
532
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000533 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000534 if (ConstraintCode == "Q")
535 return InlineAsm::Constraint_Q;
536 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
537 // followed by llvm_unreachable so we'll leave them unimplemented in
538 // the backend for now.
539 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
Daniel Sandersbf5b80f2015-03-16 13:13:41 +0000540 }
541
Tim Northover3b0846e2014-05-24 12:50:23 +0000542 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
543 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
544 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
545 ISD::MemIndexedMode &AM, bool &IsInc,
546 SelectionDAG &DAG) const;
547 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
548 ISD::MemIndexedMode &AM,
549 SelectionDAG &DAG) const override;
550 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
551 SDValue &Offset, ISD::MemIndexedMode &AM,
552 SelectionDAG &DAG) const override;
553
554 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
555 SelectionDAG &DAG) const override;
Tim Northover3c55cca2014-11-27 21:02:42 +0000556
557 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
558 CallingConv::ID CallConv,
Craig Topper44586dc2014-11-28 03:58:26 +0000559 bool isVarArg) const override;
Matthias Braunaf7d7702015-07-16 20:02:37 +0000560
561 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000562};
563
564namespace AArch64 {
565FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
566 const TargetLibraryInfo *libInfo);
567} // end namespace AArch64
568
569} // end namespace llvm
570
Benjamin Kramera7c40ef2014-08-13 16:26:38 +0000571#endif