blob: 7c10cb1166946c1d27186d7b529b476fcc69126e [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that AArch64 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
Benjamin Kramera7c40ef2014-08-13 16:26:38 +000015#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
Tim Northover3b0846e2014-05-24 12:50:23 +000017
Joseph Tremouletf748c892015-11-07 01:11:31 +000018#include "AArch64.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000019#include "llvm/CodeGen/CallingConvLower.h"
20#include "llvm/CodeGen/SelectionDAG.h"
21#include "llvm/IR/CallingConv.h"
Chad Rosier54390052015-02-23 19:15:16 +000022#include "llvm/IR/Instruction.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000023#include "llvm/Target/TargetLowering.h"
24
25namespace llvm {
26
27namespace AArch64ISD {
28
Matthias Braund04893f2015-05-07 21:33:59 +000029enum NodeType : unsigned {
Tim Northover3b0846e2014-05-24 12:50:23 +000030 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32 CALL, // Function call.
33
Kristof Beylsaea84612015-03-04 09:12:08 +000034 // Produces the full sequence of instructions for getting the thread pointer
35 // offset of a variable into X0, using the TLSDesc model.
36 TLSDESC_CALLSEQ,
Tim Northover3b0846e2014-05-24 12:50:23 +000037 ADRP, // Page address of a TargetGlobalAddress operand.
38 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39 LOADgot, // Load from automatically generated descriptor (e.g. Global
40 // Offset Table, TLS record).
41 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42 BRCOND, // Conditional branch instruction; "b.cond".
43 CSEL,
44 FCSEL, // Conditional move instruction.
45 CSINV, // Conditional select invert.
46 CSNEG, // Conditional select negate.
47 CSINC, // Conditional select increment.
48
49 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50 // ELF.
51 THREAD_POINTER,
52 ADC,
53 SBC, // adc, sbc instructions
54
55 // Arithmetic instructions which write flags.
56 ADDS,
57 SUBS,
58 ADCS,
59 SBCS,
60 ANDS,
61
Matthias Braunaf7d7702015-07-16 20:02:37 +000062 // Conditional compares. Operands: left,right,falsecc,cc,flags
63 CCMP,
64 CCMN,
65 FCCMP,
66
Tim Northover3b0846e2014-05-24 12:50:23 +000067 // Floating point comparison
68 FCMP,
69
Tim Northover3b0846e2014-05-24 12:50:23 +000070 // Scalar extract
71 EXTR,
72
73 // Scalar-to-vector duplication
74 DUP,
75 DUPLANE8,
76 DUPLANE16,
77 DUPLANE32,
78 DUPLANE64,
79
80 // Vector immedate moves
81 MOVI,
82 MOVIshift,
83 MOVIedit,
84 MOVImsl,
85 FMOV,
86 MVNIshift,
87 MVNImsl,
88
89 // Vector immediate ops
90 BICi,
91 ORRi,
92
93 // Vector bit select: similar to ISD::VSELECT but not all bits within an
94 // element must be identical.
95 BSL,
96
97 // Vector arithmetic negation
98 NEG,
99
100 // Vector shuffles
101 ZIP1,
102 ZIP2,
103 UZP1,
104 UZP2,
105 TRN1,
106 TRN2,
107 REV16,
108 REV32,
109 REV64,
110 EXT,
111
112 // Vector shift by scalar
113 VSHL,
114 VLSHR,
115 VASHR,
116
117 // Vector shift by scalar (again)
118 SQSHL_I,
119 UQSHL_I,
120 SQSHLU_I,
121 SRSHR_I,
122 URSHR_I,
123
124 // Vector comparisons
125 CMEQ,
126 CMGE,
127 CMGT,
128 CMHI,
129 CMHS,
130 FCMEQ,
131 FCMGE,
132 FCMGT,
133
134 // Vector zero comparisons
135 CMEQz,
136 CMGEz,
137 CMGTz,
138 CMLEz,
139 CMLTz,
140 FCMEQz,
141 FCMGEz,
142 FCMGTz,
143 FCMLEz,
144 FCMLTz,
145
Ahmed Bougachafab58922015-03-10 20:45:38 +0000146 // Vector across-lanes addition
147 // Only the lower result lane is defined.
148 SADDV,
149 UADDV,
150
151 // Vector across-lanes min/max
152 // Only the lower result lane is defined.
153 SMINV,
154 UMINV,
155 SMAXV,
156 UMAXV,
157
Tim Northover3b0846e2014-05-24 12:50:23 +0000158 // Vector bitwise negation
159 NOT,
160
161 // Vector bitwise selection
162 BIT,
163
164 // Compare-and-branch
165 CBZ,
166 CBNZ,
167 TBZ,
168 TBNZ,
169
170 // Tail calls
171 TC_RETURN,
172
173 // Custom prefetch handling
174 PREFETCH,
175
176 // {s|u}int to FP within a FP register.
177 SITOF,
178 UITOF,
179
Tim Northoverbb72e6c2014-09-04 09:46:14 +0000180 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181 /// world w.r.t vectors; which causes additional REV instructions to be
182 /// generated to compensate for the byte-swapping. But sometimes we do
183 /// need to re-interpret the data in SIMD vector registers in big-endian
184 /// mode without emitting such REV instructions.
185 NVCAST,
186
Chad Rosierd9d0f862014-10-08 02:31:24 +0000187 SMULL,
188 UMULL,
189
Tim Northover3b0846e2014-05-24 12:50:23 +0000190 // NEON Load/Store with post-increment base updates
191 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
192 LD3post,
193 LD4post,
194 ST2post,
195 ST3post,
196 ST4post,
197 LD1x2post,
198 LD1x3post,
199 LD1x4post,
200 ST1x2post,
201 ST1x3post,
202 ST1x4post,
203 LD1DUPpost,
204 LD2DUPpost,
205 LD3DUPpost,
206 LD4DUPpost,
207 LD1LANEpost,
208 LD2LANEpost,
209 LD3LANEpost,
210 LD4LANEpost,
211 ST2LANEpost,
212 ST3LANEpost,
213 ST4LANEpost
214};
215
216} // end namespace AArch64ISD
217
218class AArch64Subtarget;
219class AArch64TargetMachine;
220
221class AArch64TargetLowering : public TargetLowering {
Tim Northover3b0846e2014-05-24 12:50:23 +0000222public:
Eric Christopher905f12d2015-01-29 00:19:42 +0000223 explicit AArch64TargetLowering(const TargetMachine &TM,
224 const AArch64Subtarget &STI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000225
Robin Morisset039781e2014-08-29 21:53:01 +0000226 /// Selects the correct CCAssignFn for a given CallingConvention value.
Tim Northover3b0846e2014-05-24 12:50:23 +0000227 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
228
229 /// computeKnownBitsForTargetNode - Determine which of the bits specified in
230 /// Mask are known to be either zero or one and return them in the
231 /// KnownZero/KnownOne bitsets.
232 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
233 APInt &KnownOne, const SelectionDAG &DAG,
234 unsigned Depth = 0) const override;
235
Mehdi Aminieaabc512015-07-09 15:12:23 +0000236 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000237
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000238 /// allowsMisalignedMemoryAccesses - Returns true if the target allows
Sanjay Patel08efcd92015-01-28 22:37:32 +0000239 /// unaligned memory accesses of the specified type.
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000240 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
241 unsigned Align = 1,
Akira Hatanakaf53b0402015-07-29 14:17:26 +0000242 bool *Fast = nullptr) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000243
244 /// LowerOperation - Provide custom lowering hooks for some operations.
245 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
246
247 const char *getTargetNodeName(unsigned Opcode) const override;
248
249 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
250
251 /// getFunctionAlignment - Return the Log2 alignment of this function.
252 unsigned getFunctionAlignment(const Function *F) const;
253
Tim Northover3b0846e2014-05-24 12:50:23 +0000254 /// Returns true if a cast between SrcAS and DestAS is a noop.
255 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
256 // Addrspacecasts are always noops.
257 return true;
258 }
259
260 /// createFastISel - This method returns a target specific FastISel object,
261 /// or null if the target does not support "fast" ISel.
262 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
263 const TargetLibraryInfo *libInfo) const override;
264
265 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
266
267 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
268
269 /// isShuffleMaskLegal - Return true if the given shuffle mask can be
270 /// codegen'd directly, or if it should be stack expanded.
271 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
272
273 /// getSetCCResultType - Return the ISD::SETCC ValueType
Mehdi Amini44ede332015-07-09 02:09:04 +0000274 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
275 EVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000276
277 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
278
279 MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
280 MachineBasicBlock *BB) const;
281
282 MachineBasicBlock *
283 EmitInstrWithCustomInserter(MachineInstr *MI,
284 MachineBasicBlock *MBB) const override;
285
286 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
287 unsigned Intrinsic) const override;
288
289 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
290 bool isTruncateFree(EVT VT1, EVT VT2) const override;
291
Chad Rosier54390052015-02-23 19:15:16 +0000292 bool isProfitableToHoist(Instruction *I) const override;
293
Tim Northover3b0846e2014-05-24 12:50:23 +0000294 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
295 bool isZExtFree(EVT VT1, EVT VT2) const override;
296 bool isZExtFree(SDValue Val, EVT VT2) const override;
297
298 bool hasPairedLoad(Type *LoadedType,
299 unsigned &RequiredAligment) const override;
300 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
301
Hao Liu7ec8ee32015-06-26 02:32:07 +0000302 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
303
304 bool lowerInterleavedLoad(LoadInst *LI,
305 ArrayRef<ShuffleVectorInst *> Shuffles,
306 ArrayRef<unsigned> Indices,
307 unsigned Factor) const override;
308 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
309 unsigned Factor) const override;
310
Tim Northover3b0846e2014-05-24 12:50:23 +0000311 bool isLegalAddImmediate(int64_t) const override;
312 bool isLegalICmpImmediate(int64_t) const override;
313
314 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
315 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
316 MachineFunction &MF) const override;
317
318 /// isLegalAddressingMode - Return true if the addressing mode represented
319 /// by AM is legal for this target, for a load/store of the specified type.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000320 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000321 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000322
323 /// \brief Return the cost of the scaling factor used in the addressing
324 /// mode represented by AM for this target, for a load/store
325 /// of the specified type.
326 /// If the AM is supported, the return value must be >= 0.
327 /// If the AM is not supported, it returns a negative value.
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000328 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
Matt Arsenaultbd7d80a2015-06-01 05:31:59 +0000329 unsigned AS) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000330
331 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
332 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
333 /// expanded to FMAs when this method returns true, otherwise fmuladd is
334 /// expanded to fmul + fadd.
335 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
336
337 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
338
339 /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
340 bool isDesirableToCommuteWithShift(const SDNode *N) const override;
341
342 /// \brief Returns true if it is beneficial to convert a load of a constant
343 /// to just the constant itself.
344 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
345 Type *Ty) const override;
346
347 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
348 AtomicOrdering Ord) const override;
349 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
350 Value *Addr, AtomicOrdering Ord) const override;
351
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000352 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
353
Ahmed Bougacha52468672015-09-11 17:08:28 +0000354 TargetLoweringBase::AtomicExpansionKind
355 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
Robin Morisseted3d48f2014-09-03 21:29:59 +0000356 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
Ahmed Bougacha9d677132015-09-11 17:08:17 +0000357 TargetLoweringBase::AtomicExpansionKind
JF Bastienf14889e2015-03-04 15:47:57 +0000358 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000359
Ahmed Bougacha52468672015-09-11 17:08:28 +0000360 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
361
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000362 bool useLoadStackGuardNode() const override;
Chandler Carruth9d010ff2014-07-03 00:23:43 +0000363 TargetLoweringBase::LegalizeTypeAction
364 getPreferredVectorAction(EVT VT) const override;
365
Evgeniy Stepanovd1aad262015-10-26 18:28:25 +0000366 /// If the target has a standard location for the unsafe stack pointer,
367 /// returns the address of that location. Otherwise, returns nullptr.
368 Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
369
Joseph Tremouletf748c892015-11-07 01:11:31 +0000370 /// If a physical register, this returns the register that receives the
371 /// exception address on entry to an EH pad.
372 unsigned
373 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
374 // FIXME: This is a guess. Has this been defined yet?
375 return AArch64::X0;
376 }
377
378 /// If a physical register, this returns the register that receives the
379 /// exception typeid on entry to a landing pad.
380 unsigned
381 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
382 // FIXME: This is a guess. Has this been defined yet?
383 return AArch64::X1;
384 }
385
Tim Northover3b0846e2014-05-24 12:50:23 +0000386private:
Quentin Colombet6843ac42015-03-31 20:52:32 +0000387 bool isExtFreeImpl(const Instruction *Ext) const override;
388
Tim Northover3b0846e2014-05-24 12:50:23 +0000389 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
390 /// make the right decision when generating code for different targets.
391 const AArch64Subtarget *Subtarget;
392
393 void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
394 void addDRTypeForNEON(MVT VT);
395 void addQRTypeForNEON(MVT VT);
396
397 SDValue
398 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
399 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
400 SelectionDAG &DAG,
401 SmallVectorImpl<SDValue> &InVals) const override;
402
403 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
404 SmallVectorImpl<SDValue> &InVals) const override;
405
406 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
407 CallingConv::ID CallConv, bool isVarArg,
408 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
409 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
410 bool isThisReturn, SDValue ThisVal) const;
411
Adhemerval Zanella7bc33192015-07-28 13:03:31 +0000412 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
413
Tim Northover3b0846e2014-05-24 12:50:23 +0000414 bool isEligibleForTailCallOptimization(
415 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
416 bool isCalleeStructRet, bool isCallerStructRet,
417 const SmallVectorImpl<ISD::OutputArg> &Outs,
418 const SmallVectorImpl<SDValue> &OutVals,
419 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
420
421 /// Finds the incoming stack arguments which overlap the given fixed stack
422 /// object and incorporates their load into the current chain. This prevents
423 /// an upcoming store from clobbering the stack argument before it's used.
424 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
425 MachineFrameInfo *MFI, int ClobberedFI) const;
426
427 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
428
429 bool IsTailCallConvention(CallingConv::ID CallCC) const;
430
431 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
432 SDValue &Chain) const;
433
434 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
435 bool isVarArg,
436 const SmallVectorImpl<ISD::OutputArg> &Outs,
437 LLVMContext &Context) const override;
438
439 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
440 const SmallVectorImpl<ISD::OutputArg> &Outs,
441 const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
442 SelectionDAG &DAG) const override;
443
444 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
445 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
446 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
447 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
Kristof Beylsaea84612015-03-04 09:12:08 +0000448 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
449 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000450 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
451 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
452 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
453 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
Matthias Braunb6ac8fa2015-04-07 17:33:05 +0000454 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
455 SDValue TVal, SDValue FVal, SDLoc dl,
456 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000457 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
458 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
459 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
460 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
461 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
462 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
463 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
466 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
467 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
468 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
469 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
470 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
471 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
472 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
473 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
474 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
475 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
476 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
477 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
478 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
479 RTLIB::Libcall Call) const;
480 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
481 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
482 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
483 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
484 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
485 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
486 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
487 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
488 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
489
Chad Rosier17020f92014-07-23 14:57:52 +0000490 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000491 std::vector<SDNode *> *Created) const override;
Sanjay Patel1dd15592015-07-28 23:05:48 +0000492 unsigned combineRepeatedFPDivisors() const override;
Chad Rosier17020f92014-07-23 14:57:52 +0000493
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000494 ConstraintType getConstraintType(StringRef Constraint) const override;
Pat Gavlina717f252015-07-09 17:40:29 +0000495 unsigned getRegisterByName(const char* RegName, EVT VT,
496 SelectionDAG &DAG) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000497
498 /// Examine constraint string and operand type and determine a weight value.
499 /// The operand object must already have been set up with the operand type.
500 ConstraintWeight
501 getSingleConstraintMatchWeight(AsmOperandInfo &info,
502 const char *constraint) const override;
503
504 std::pair<unsigned, const TargetRegisterClass *>
Eric Christopher11e4df72015-02-26 22:38:43 +0000505 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000506 StringRef Constraint, MVT VT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000507 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
508 std::vector<SDValue> &Ops,
509 SelectionDAG &DAG) const override;
510
Benjamin Kramer9bfb6272015-07-05 19:29:18 +0000511 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000512 if (ConstraintCode == "Q")
513 return InlineAsm::Constraint_Q;
514 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
515 // followed by llvm_unreachable so we'll leave them unimplemented in
516 // the backend for now.
517 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
Daniel Sandersbf5b80f2015-03-16 13:13:41 +0000518 }
519
Tim Northover3b0846e2014-05-24 12:50:23 +0000520 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
521 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
522 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
523 ISD::MemIndexedMode &AM, bool &IsInc,
524 SelectionDAG &DAG) const;
525 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
526 ISD::MemIndexedMode &AM,
527 SelectionDAG &DAG) const override;
528 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
529 SDValue &Offset, ISD::MemIndexedMode &AM,
530 SelectionDAG &DAG) const override;
531
532 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
533 SelectionDAG &DAG) const override;
Tim Northover3c55cca2014-11-27 21:02:42 +0000534
535 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
536 CallingConv::ID CallConv,
Craig Topper44586dc2014-11-28 03:58:26 +0000537 bool isVarArg) const override;
Matthias Braunaf7d7702015-07-16 20:02:37 +0000538
539 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000540};
541
542namespace AArch64 {
543FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
544 const TargetLibraryInfo *libInfo);
545} // end namespace AArch64
546
547} // end namespace llvm
548
Benjamin Kramera7c40ef2014-08-13 16:26:38 +0000549#endif