blob: c8d9ca3ef07ecfe7e904e5ee674ec598d324e19b [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that AArch64 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
Benjamin Kramera7c40ef2014-08-13 16:26:38 +000015#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
Tim Northover3b0846e2014-05-24 12:50:23 +000017
Joseph Tremouletf748c892015-11-07 01:11:31 +000018#include "AArch64.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000019#include "llvm/CodeGen/CallingConvLower.h"
20#include "llvm/CodeGen/SelectionDAG.h"
21#include "llvm/IR/CallingConv.h"
Chad Rosier54390052015-02-23 19:15:16 +000022#include "llvm/IR/Instruction.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000023#include "llvm/Target/TargetLowering.h"
24
25namespace llvm {
26
27namespace AArch64ISD {
28
Matthias Braund04893f2015-05-07 21:33:59 +000029enum NodeType : unsigned {
Tim Northover3b0846e2014-05-24 12:50:23 +000030 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32 CALL, // Function call.
33
Kristof Beylsaea84612015-03-04 09:12:08 +000034 // Produces the full sequence of instructions for getting the thread pointer
35 // offset of a variable into X0, using the TLSDesc model.
36 TLSDESC_CALLSEQ,
Tim Northover3b0846e2014-05-24 12:50:23 +000037 ADRP, // Page address of a TargetGlobalAddress operand.
38 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39 LOADgot, // Load from automatically generated descriptor (e.g. Global
40 // Offset Table, TLS record).
41 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42 BRCOND, // Conditional branch instruction; "b.cond".
43 CSEL,
44 FCSEL, // Conditional move instruction.
45 CSINV, // Conditional select invert.
46 CSNEG, // Conditional select negate.
47 CSINC, // Conditional select increment.
48
49 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50 // ELF.
51 THREAD_POINTER,
52 ADC,
53 SBC, // adc, sbc instructions
54
55 // Arithmetic instructions which write flags.
56 ADDS,
57 SUBS,
58 ADCS,
59 SBCS,
60 ANDS,
61
Matthias Braunaf7d7702015-07-16 20:02:37 +000062 // Conditional compares. Operands: left,right,falsecc,cc,flags
63 CCMP,
64 CCMN,
65 FCCMP,
66
Tim Northover3b0846e2014-05-24 12:50:23 +000067 // Floating point comparison
68 FCMP,
69
Tim Northover3b0846e2014-05-24 12:50:23 +000070 // Scalar extract
71 EXTR,
72
73 // Scalar-to-vector duplication
74 DUP,
75 DUPLANE8,
76 DUPLANE16,
77 DUPLANE32,
78 DUPLANE64,
79
80 // Vector immedate moves
81 MOVI,
82 MOVIshift,
83 MOVIedit,
84 MOVImsl,
85 FMOV,
86 MVNIshift,
87 MVNImsl,
88
89 // Vector immediate ops
90 BICi,
91 ORRi,
92
93 // Vector bit select: similar to ISD::VSELECT but not all bits within an
94 // element must be identical.
95 BSL,
96
97 // Vector arithmetic negation
98 NEG,
99
100 // Vector shuffles
101 ZIP1,
102 ZIP2,
103 UZP1,
104 UZP2,
105 TRN1,
106 TRN2,
107 REV16,
108 REV32,
109 REV64,
110 EXT,
111
112 // Vector shift by scalar
113 VSHL,
114 VLSHR,
115 VASHR,
116
117 // Vector shift by scalar (again)
118 SQSHL_I,
119 UQSHL_I,
120 SQSHLU_I,
121 SRSHR_I,
122 URSHR_I,
123
124 // Vector comparisons
125 CMEQ,
126 CMGE,
127 CMGT,
128 CMHI,
129 CMHS,
130 FCMEQ,
131 FCMGE,
132 FCMGT,
133
134 // Vector zero comparisons
135 CMEQz,
136 CMGEz,
137 CMGTz,
138 CMLEz,
139 CMLTz,
140 FCMEQz,
141 FCMGEz,
142 FCMGTz,
143 FCMLEz,
144 FCMLTz,
145
Ahmed Bougachafab58922015-03-10 20:45:38 +0000146 // Vector across-lanes addition
147 // Only the lower result lane is defined.
148 SADDV,
149 UADDV,
150
151 // Vector across-lanes min/max
152 // Only the lower result lane is defined.
153 SMINV,
154 UMINV,
155 SMAXV,
156 UMAXV,
157
Tim Northover3b0846e2014-05-24 12:50:23 +0000158 // Vector bitwise negation
159 NOT,
160
161 // Vector bitwise selection
162 BIT,
163
164 // Compare-and-branch
165 CBZ,
166 CBNZ,
167 TBZ,
168 TBNZ,
169
170 // Tail calls
171 TC_RETURN,
172
173 // Custom prefetch handling
174 PREFETCH,
175
176 // {s|u}int to FP within a FP register.
177 SITOF,
178 UITOF,
179
Tim Northoverbb72e6c2014-09-04 09:46:14 +0000180 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181 /// world w.r.t vectors; which causes additional REV instructions to be
182 /// generated to compensate for the byte-swapping. But sometimes we do
183 /// need to re-interpret the data in SIMD vector registers in big-endian
184 /// mode without emitting such REV instructions.
185 NVCAST,
186
Chad Rosierd9d0f862014-10-08 02:31:24 +0000187 SMULL,
188 UMULL,
189
Tim Northover3b0846e2014-05-24 12:50:23 +0000190 // NEON Load/Store with post-increment base updates
191 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
192 LD3post,
193 LD4post,
194 ST2post,
195 ST3post,
196 ST4post,
197 LD1x2post,
198 LD1x3post,
199 LD1x4post,
200 ST1x2post,
201 ST1x3post,
202 ST1x4post,
203 LD1DUPpost,
204 LD2DUPpost,
205 LD3DUPpost,
206 LD4DUPpost,
207 LD1LANEpost,
208 LD2LANEpost,
209 LD3LANEpost,
210 LD4LANEpost,
211 ST2LANEpost,
212 ST3LANEpost,
213 ST4LANEpost
214};
215
216} // end namespace AArch64ISD
217
218class AArch64Subtarget;
219class AArch64TargetMachine;
220
221class AArch64TargetLowering : public TargetLowering {
Tim Northover3b0846e2014-05-24 12:50:23 +0000222public:
Eric Christopher905f12d2015-01-29 00:19:42 +0000223 explicit AArch64TargetLowering(const TargetMachine &TM,
224 const AArch64Subtarget &STI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000225
Robin Morisset039781e2014-08-29 21:53:01 +0000226 /// Selects the correct CCAssignFn for a given CallingConvention value.
Tim Northover3b0846e2014-05-24 12:50:23 +0000227 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
228
Sanjay Patel776e59b2015-11-09 19:18:26 +0000229 /// Determine which of the bits specified in Mask are known to be either zero
230 /// or one and return them in the KnownZero/KnownOne bitsets.
Tim Northover3b0846e2014-05-24 12:50:23 +0000231 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
232 APInt &KnownOne, const SelectionDAG &DAG,
233 unsigned Depth = 0) const override;
234
Mehdi Aminieaabc512015-07-09 15:12:23 +0000235 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000236
Sanjay Patel776e59b2015-11-09 19:18:26 +0000237 /// Returns true if the target allows unaligned memory accesses of the
238 /// specified type.
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000239 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
240 unsigned Align = 1,
Akira Hatanakaf53b0402015-07-29 14:17:26 +0000241 bool *Fast = nullptr) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000242
Sanjay Patel776e59b2015-11-09 19:18:26 +0000243 /// Provide custom lowering hooks for some operations.
Tim Northover3b0846e2014-05-24 12:50:23 +0000244 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
245
246 const char *getTargetNodeName(unsigned Opcode) const override;
247
248 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
249
Sanjay Patel776e59b2015-11-09 19:18:26 +0000250 /// Return the Log2 alignment of this function.
Tim Northover3b0846e2014-05-24 12:50:23 +0000251 unsigned getFunctionAlignment(const Function *F) const;
252
Tim Northover3b0846e2014-05-24 12:50:23 +0000253 /// Returns true if a cast between SrcAS and DestAS is a noop.
254 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
255 // Addrspacecasts are always noops.
256 return true;
257 }
258
Sanjay Patel776e59b2015-11-09 19:18:26 +0000259 /// This method returns a target specific FastISel object, or null if the
260 /// target does not support "fast" ISel.
Tim Northover3b0846e2014-05-24 12:50:23 +0000261 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
262 const TargetLibraryInfo *libInfo) const override;
263
264 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
265
266 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
267
Sanjay Patel776e59b2015-11-09 19:18:26 +0000268 /// Return true if the given shuffle mask can be codegen'd directly, or if it
269 /// should be stack expanded.
Tim Northover3b0846e2014-05-24 12:50:23 +0000270 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
271
Sanjay Patel776e59b2015-11-09 19:18:26 +0000272 /// Return the ISD::SETCC ValueType.
Mehdi Amini44ede332015-07-09 02:09:04 +0000273 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
274 EVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000275
276 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
277
278 MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
279 MachineBasicBlock *BB) const;
280
281 MachineBasicBlock *
282 EmitInstrWithCustomInserter(MachineInstr *MI,
283 MachineBasicBlock *MBB) const override;
284
285 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
286 unsigned Intrinsic) const override;
287
288 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
289 bool isTruncateFree(EVT VT1, EVT VT2) const override;
290
Chad Rosier54390052015-02-23 19:15:16 +0000291 bool isProfitableToHoist(Instruction *I) const override;
292
Tim Northover3b0846e2014-05-24 12:50:23 +0000293 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
294 bool isZExtFree(EVT VT1, EVT VT2) const override;
295 bool isZExtFree(SDValue Val, EVT VT2) const override;
296
297 bool hasPairedLoad(Type *LoadedType,
298 unsigned &RequiredAligment) const override;
299 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
300
Hao Liu7ec8ee32015-06-26 02:32:07 +0000301 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
302
303 bool lowerInterleavedLoad(LoadInst *LI,
304 ArrayRef<ShuffleVectorInst *> Shuffles,
305 ArrayRef<unsigned> Indices,
306 unsigned Factor) const override;
307 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
308 unsigned Factor) const override;
309
Tim Northover3b0846e2014-05-24 12:50:23 +0000310 bool isLegalAddImmediate(int64_t) const override;
311 bool isLegalICmpImmediate(int64_t) const override;
312
313 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
314 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
315 MachineFunction &MF) const override;
316
Sanjay Patel776e59b2015-11-09 19:18:26 +0000317 /// Return true if the addressing mode represented by AM is legal for this
318 /// target, for a load/store of the specified type.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000319 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000320 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000321
322 /// \brief Return the cost of the scaling factor used in the addressing
323 /// mode represented by AM for this target, for a load/store
324 /// of the specified type.
325 /// If the AM is supported, the return value must be >= 0.
326 /// If the AM is not supported, it returns a negative value.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000327 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000328 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000329
Sanjay Patel776e59b2015-11-09 19:18:26 +0000330 /// Return true if an FMA operation is faster than a pair of fmul and fadd
331 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
332 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
Tim Northover3b0846e2014-05-24 12:50:23 +0000333 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
334
335 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
336
337 /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
338 bool isDesirableToCommuteWithShift(const SDNode *N) const override;
339
340 /// \brief Returns true if it is beneficial to convert a load of a constant
341 /// to just the constant itself.
342 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
343 Type *Ty) const override;
344
345 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
346 AtomicOrdering Ord) const override;
347 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
348 Value *Addr, AtomicOrdering Ord) const override;
349
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000350 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
351
Ahmed Bougacha52468672015-09-11 17:08:28 +0000352 TargetLoweringBase::AtomicExpansionKind
353 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
Robin Morisseted3d48f2014-09-03 21:29:59 +0000354 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
Ahmed Bougacha9d677132015-09-11 17:08:17 +0000355 TargetLoweringBase::AtomicExpansionKind
JF Bastienf14889e2015-03-04 15:47:57 +0000356 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000357
Ahmed Bougacha52468672015-09-11 17:08:28 +0000358 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
359
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000360 bool useLoadStackGuardNode() const override;
Chandler Carruth9d010ff2014-07-03 00:23:43 +0000361 TargetLoweringBase::LegalizeTypeAction
362 getPreferredVectorAction(EVT VT) const override;
363
Evgeniy Stepanovd1aad262015-10-26 18:28:25 +0000364 /// If the target has a standard location for the unsafe stack pointer,
365 /// returns the address of that location. Otherwise, returns nullptr.
366 Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
367
Joseph Tremouletf748c892015-11-07 01:11:31 +0000368 /// If a physical register, this returns the register that receives the
369 /// exception address on entry to an EH pad.
370 unsigned
371 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
372 // FIXME: This is a guess. Has this been defined yet?
373 return AArch64::X0;
374 }
375
376 /// If a physical register, this returns the register that receives the
377 /// exception typeid on entry to a landing pad.
378 unsigned
379 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
380 // FIXME: This is a guess. Has this been defined yet?
381 return AArch64::X1;
382 }
383
Sanjay Patel241c31f2015-11-10 18:11:37 +0000384 bool isCheapToSpeculateCttz() const override {
385 return true;
386 }
387
388 bool isCheapToSpeculateCtlz() const override {
389 return true;
390 }
391
Tim Northover3b0846e2014-05-24 12:50:23 +0000392private:
Quentin Colombet6843ac42015-03-31 20:52:32 +0000393 bool isExtFreeImpl(const Instruction *Ext) const override;
394
Sanjay Patel776e59b2015-11-09 19:18:26 +0000395 /// Keep a pointer to the AArch64Subtarget around so that we can
Tim Northover3b0846e2014-05-24 12:50:23 +0000396 /// make the right decision when generating code for different targets.
397 const AArch64Subtarget *Subtarget;
398
399 void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
400 void addDRTypeForNEON(MVT VT);
401 void addQRTypeForNEON(MVT VT);
402
403 SDValue
404 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
405 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
406 SelectionDAG &DAG,
407 SmallVectorImpl<SDValue> &InVals) const override;
408
409 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
410 SmallVectorImpl<SDValue> &InVals) const override;
411
412 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
413 CallingConv::ID CallConv, bool isVarArg,
414 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
415 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
416 bool isThisReturn, SDValue ThisVal) const;
417
Adhemerval Zanella7bc33192015-07-28 13:03:31 +0000418 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
419
Tim Northover3b0846e2014-05-24 12:50:23 +0000420 bool isEligibleForTailCallOptimization(
421 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
422 bool isCalleeStructRet, bool isCallerStructRet,
423 const SmallVectorImpl<ISD::OutputArg> &Outs,
424 const SmallVectorImpl<SDValue> &OutVals,
425 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
426
427 /// Finds the incoming stack arguments which overlap the given fixed stack
428 /// object and incorporates their load into the current chain. This prevents
429 /// an upcoming store from clobbering the stack argument before it's used.
430 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
431 MachineFrameInfo *MFI, int ClobberedFI) const;
432
433 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
434
435 bool IsTailCallConvention(CallingConv::ID CallCC) const;
436
437 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
438 SDValue &Chain) const;
439
440 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
441 bool isVarArg,
442 const SmallVectorImpl<ISD::OutputArg> &Outs,
443 LLVMContext &Context) const override;
444
445 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
446 const SmallVectorImpl<ISD::OutputArg> &Outs,
447 const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
448 SelectionDAG &DAG) const override;
449
450 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
451 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
452 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
453 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
Kristof Beylsaea84612015-03-04 09:12:08 +0000454 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
455 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000456 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
457 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
458 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
459 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
Matthias Braunb6ac8fa2015-04-07 17:33:05 +0000460 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
461 SDValue TVal, SDValue FVal, SDLoc dl,
462 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000463 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
466 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
467 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
468 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
469 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
470 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
471 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
472 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
473 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
474 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
475 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
476 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
477 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
478 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
479 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
480 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
481 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
482 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
483 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
484 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
485 RTLIB::Libcall Call) const;
486 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
487 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
488 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
489 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
490 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
491 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
492 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
493 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
494 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
495
Chad Rosier17020f92014-07-23 14:57:52 +0000496 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000497 std::vector<SDNode *> *Created) const override;
Sanjay Patel1dd15592015-07-28 23:05:48 +0000498 unsigned combineRepeatedFPDivisors() const override;
Chad Rosier17020f92014-07-23 14:57:52 +0000499
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000500 ConstraintType getConstraintType(StringRef Constraint) const override;
Pat Gavlina717f252015-07-09 17:40:29 +0000501 unsigned getRegisterByName(const char* RegName, EVT VT,
502 SelectionDAG &DAG) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000503
504 /// Examine constraint string and operand type and determine a weight value.
505 /// The operand object must already have been set up with the operand type.
506 ConstraintWeight
507 getSingleConstraintMatchWeight(AsmOperandInfo &info,
508 const char *constraint) const override;
509
510 std::pair<unsigned, const TargetRegisterClass *>
Eric Christopher11e4df72015-02-26 22:38:43 +0000511 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000512 StringRef Constraint, MVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000513 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
514 std::vector<SDValue> &Ops,
515 SelectionDAG &DAG) const override;
516
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000517 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000518 if (ConstraintCode == "Q")
519 return InlineAsm::Constraint_Q;
520 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
521 // followed by llvm_unreachable so we'll leave them unimplemented in
522 // the backend for now.
523 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
Daniel Sandersbf5b80f2015-03-16 13:13:41 +0000524 }
525
Tim Northover3b0846e2014-05-24 12:50:23 +0000526 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
527 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
528 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
529 ISD::MemIndexedMode &AM, bool &IsInc,
530 SelectionDAG &DAG) const;
531 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
532 ISD::MemIndexedMode &AM,
533 SelectionDAG &DAG) const override;
534 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
535 SDValue &Offset, ISD::MemIndexedMode &AM,
536 SelectionDAG &DAG) const override;
537
538 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
539 SelectionDAG &DAG) const override;
Tim Northover3c55cca2014-11-27 21:02:42 +0000540
541 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
542 CallingConv::ID CallConv,
Craig Topper44586dc2014-11-28 03:58:26 +0000543 bool isVarArg) const override;
Matthias Braunaf7d7702015-07-16 20:02:37 +0000544
545 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000546};
547
548namespace AArch64 {
549FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
550 const TargetLibraryInfo *libInfo);
551} // end namespace AArch64
552
553} // end namespace llvm
554
Benjamin Kramera7c40ef2014-08-13 16:26:38 +0000555#endif