blob: 4d5b619e752eff73319e75e28b46564522c82466 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that AArch64 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
Benjamin Kramera7c40ef2014-08-13 16:26:38 +000015#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
Tim Northover3b0846e2014-05-24 12:50:23 +000017
18#include "llvm/CodeGen/CallingConvLower.h"
19#include "llvm/CodeGen/SelectionDAG.h"
20#include "llvm/IR/CallingConv.h"
Chad Rosier54390052015-02-23 19:15:16 +000021#include "llvm/IR/Instruction.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000022#include "llvm/Target/TargetLowering.h"
23
24namespace llvm {
25
26namespace AArch64ISD {
27
28enum {
29 FIRST_NUMBER = ISD::BUILTIN_OP_END,
30 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31 CALL, // Function call.
32
Kristof Beylsaea84612015-03-04 09:12:08 +000033 // Produces the full sequence of instructions for getting the thread pointer
34 // offset of a variable into X0, using the TLSDesc model.
35 TLSDESC_CALLSEQ,
Tim Northover3b0846e2014-05-24 12:50:23 +000036 ADRP, // Page address of a TargetGlobalAddress operand.
37 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
38 LOADgot, // Load from automatically generated descriptor (e.g. Global
39 // Offset Table, TLS record).
40 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
41 BRCOND, // Conditional branch instruction; "b.cond".
42 CSEL,
43 FCSEL, // Conditional move instruction.
44 CSINV, // Conditional select invert.
45 CSNEG, // Conditional select negate.
46 CSINC, // Conditional select increment.
47
48 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
49 // ELF.
50 THREAD_POINTER,
51 ADC,
52 SBC, // adc, sbc instructions
53
54 // Arithmetic instructions which write flags.
55 ADDS,
56 SUBS,
57 ADCS,
58 SBCS,
59 ANDS,
60
61 // Floating point comparison
62 FCMP,
63
64 // Floating point max and min instructions.
65 FMAX,
66 FMIN,
67
68 // Scalar extract
69 EXTR,
70
71 // Scalar-to-vector duplication
72 DUP,
73 DUPLANE8,
74 DUPLANE16,
75 DUPLANE32,
76 DUPLANE64,
77
78 // Vector immedate moves
79 MOVI,
80 MOVIshift,
81 MOVIedit,
82 MOVImsl,
83 FMOV,
84 MVNIshift,
85 MVNImsl,
86
87 // Vector immediate ops
88 BICi,
89 ORRi,
90
91 // Vector bit select: similar to ISD::VSELECT but not all bits within an
92 // element must be identical.
93 BSL,
94
95 // Vector arithmetic negation
96 NEG,
97
98 // Vector shuffles
99 ZIP1,
100 ZIP2,
101 UZP1,
102 UZP2,
103 TRN1,
104 TRN2,
105 REV16,
106 REV32,
107 REV64,
108 EXT,
109
110 // Vector shift by scalar
111 VSHL,
112 VLSHR,
113 VASHR,
114
115 // Vector shift by scalar (again)
116 SQSHL_I,
117 UQSHL_I,
118 SQSHLU_I,
119 SRSHR_I,
120 URSHR_I,
121
122 // Vector comparisons
123 CMEQ,
124 CMGE,
125 CMGT,
126 CMHI,
127 CMHS,
128 FCMEQ,
129 FCMGE,
130 FCMGT,
131
132 // Vector zero comparisons
133 CMEQz,
134 CMGEz,
135 CMGTz,
136 CMLEz,
137 CMLTz,
138 FCMEQz,
139 FCMGEz,
140 FCMGTz,
141 FCMLEz,
142 FCMLTz,
143
Ahmed Bougachafab58922015-03-10 20:45:38 +0000144 // Vector across-lanes addition
145 // Only the lower result lane is defined.
146 SADDV,
147 UADDV,
148
149 // Vector across-lanes min/max
150 // Only the lower result lane is defined.
151 SMINV,
152 UMINV,
153 SMAXV,
154 UMAXV,
155
Tim Northover3b0846e2014-05-24 12:50:23 +0000156 // Vector bitwise negation
157 NOT,
158
159 // Vector bitwise selection
160 BIT,
161
162 // Compare-and-branch
163 CBZ,
164 CBNZ,
165 TBZ,
166 TBNZ,
167
168 // Tail calls
169 TC_RETURN,
170
171 // Custom prefetch handling
172 PREFETCH,
173
174 // {s|u}int to FP within a FP register.
175 SITOF,
176 UITOF,
177
Tim Northoverbb72e6c2014-09-04 09:46:14 +0000178 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
179 /// world w.r.t vectors; which causes additional REV instructions to be
180 /// generated to compensate for the byte-swapping. But sometimes we do
181 /// need to re-interpret the data in SIMD vector registers in big-endian
182 /// mode without emitting such REV instructions.
183 NVCAST,
184
Chad Rosierd9d0f862014-10-08 02:31:24 +0000185 SMULL,
186 UMULL,
187
Tim Northover3b0846e2014-05-24 12:50:23 +0000188 // NEON Load/Store with post-increment base updates
189 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
190 LD3post,
191 LD4post,
192 ST2post,
193 ST3post,
194 ST4post,
195 LD1x2post,
196 LD1x3post,
197 LD1x4post,
198 ST1x2post,
199 ST1x3post,
200 ST1x4post,
201 LD1DUPpost,
202 LD2DUPpost,
203 LD3DUPpost,
204 LD4DUPpost,
205 LD1LANEpost,
206 LD2LANEpost,
207 LD3LANEpost,
208 LD4LANEpost,
209 ST2LANEpost,
210 ST3LANEpost,
211 ST4LANEpost
212};
213
214} // end namespace AArch64ISD
215
216class AArch64Subtarget;
217class AArch64TargetMachine;
218
219class AArch64TargetLowering : public TargetLowering {
220 bool RequireStrictAlign;
221
222public:
Eric Christopher905f12d2015-01-29 00:19:42 +0000223 explicit AArch64TargetLowering(const TargetMachine &TM,
224 const AArch64Subtarget &STI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000225
Robin Morisset039781e2014-08-29 21:53:01 +0000226 /// Selects the correct CCAssignFn for a given CallingConvention value.
Tim Northover3b0846e2014-05-24 12:50:23 +0000227 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
228
229 /// computeKnownBitsForTargetNode - Determine which of the bits specified in
230 /// Mask are known to be either zero or one and return them in the
231 /// KnownZero/KnownOne bitsets.
232 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
233 APInt &KnownOne, const SelectionDAG &DAG,
234 unsigned Depth = 0) const override;
235
236 MVT getScalarShiftAmountTy(EVT LHSTy) const override;
237
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000238 /// allowsMisalignedMemoryAccesses - Returns true if the target allows
Sanjay Patel08efcd92015-01-28 22:37:32 +0000239 /// unaligned memory accesses of the specified type.
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000240 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
241 unsigned Align = 1,
242 bool *Fast = nullptr) const override {
Tim Northover3b0846e2014-05-24 12:50:23 +0000243 if (RequireStrictAlign)
244 return false;
245 // FIXME: True for Cyclone, but not necessary others.
246 if (Fast)
247 *Fast = true;
248 return true;
249 }
250
251 /// LowerOperation - Provide custom lowering hooks for some operations.
252 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
253
254 const char *getTargetNodeName(unsigned Opcode) const override;
255
256 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
257
258 /// getFunctionAlignment - Return the Log2 alignment of this function.
259 unsigned getFunctionAlignment(const Function *F) const;
260
Tim Northover3b0846e2014-05-24 12:50:23 +0000261 /// Returns true if a cast between SrcAS and DestAS is a noop.
262 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
263 // Addrspacecasts are always noops.
264 return true;
265 }
266
267 /// createFastISel - This method returns a target specific FastISel object,
268 /// or null if the target does not support "fast" ISel.
269 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
270 const TargetLibraryInfo *libInfo) const override;
271
272 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
273
274 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
275
276 /// isShuffleMaskLegal - Return true if the given shuffle mask can be
277 /// codegen'd directly, or if it should be stack expanded.
278 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
279
280 /// getSetCCResultType - Return the ISD::SETCC ValueType
281 EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
282
283 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
284
285 MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
286 MachineBasicBlock *BB) const;
287
288 MachineBasicBlock *
289 EmitInstrWithCustomInserter(MachineInstr *MI,
290 MachineBasicBlock *MBB) const override;
291
292 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
293 unsigned Intrinsic) const override;
294
295 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
296 bool isTruncateFree(EVT VT1, EVT VT2) const override;
297
Chad Rosier54390052015-02-23 19:15:16 +0000298 bool isProfitableToHoist(Instruction *I) const override;
299
Tim Northover3b0846e2014-05-24 12:50:23 +0000300 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
301 bool isZExtFree(EVT VT1, EVT VT2) const override;
302 bool isZExtFree(SDValue Val, EVT VT2) const override;
303
304 bool hasPairedLoad(Type *LoadedType,
305 unsigned &RequiredAligment) const override;
306 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
307
308 bool isLegalAddImmediate(int64_t) const override;
309 bool isLegalICmpImmediate(int64_t) const override;
310
311 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
312 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
313 MachineFunction &MF) const override;
314
315 /// isLegalAddressingMode - Return true if the addressing mode represented
316 /// by AM is legal for this target, for a load/store of the specified type.
317 bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
318
319 /// \brief Return the cost of the scaling factor used in the addressing
320 /// mode represented by AM for this target, for a load/store
321 /// of the specified type.
322 /// If the AM is supported, the return value must be >= 0.
323 /// If the AM is not supported, it returns a negative value.
324 int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
325
326 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
327 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
328 /// expanded to FMAs when this method returns true, otherwise fmuladd is
329 /// expanded to fmul + fadd.
330 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
331
332 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
333
334 /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
335 bool isDesirableToCommuteWithShift(const SDNode *N) const override;
336
337 /// \brief Returns true if it is beneficial to convert a load of a constant
338 /// to just the constant itself.
339 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
340 Type *Ty) const override;
341
Robin Morisset25c8e312014-09-17 00:06:58 +0000342 bool hasLoadLinkedStoreConditional() const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000343 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
344 AtomicOrdering Ord) const override;
345 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
346 Value *Addr, AtomicOrdering Ord) const override;
347
Robin Morisseted3d48f2014-09-03 21:29:59 +0000348 bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
349 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
JF Bastienf14889e2015-03-04 15:47:57 +0000350 TargetLoweringBase::AtomicRMWExpansionKind
351 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000352
Akira Hatanakae5b6e0d2014-07-25 19:31:34 +0000353 bool useLoadStackGuardNode() const override;
Chandler Carruth9d010ff2014-07-03 00:23:43 +0000354 TargetLoweringBase::LegalizeTypeAction
355 getPreferredVectorAction(EVT VT) const override;
356
Tim Northover3b0846e2014-05-24 12:50:23 +0000357private:
358 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
359 /// make the right decision when generating code for different targets.
360 const AArch64Subtarget *Subtarget;
361
362 void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
363 void addDRTypeForNEON(MVT VT);
364 void addQRTypeForNEON(MVT VT);
365
366 SDValue
367 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
368 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
369 SelectionDAG &DAG,
370 SmallVectorImpl<SDValue> &InVals) const override;
371
372 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
373 SmallVectorImpl<SDValue> &InVals) const override;
374
375 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
376 CallingConv::ID CallConv, bool isVarArg,
377 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
378 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
379 bool isThisReturn, SDValue ThisVal) const;
380
381 bool isEligibleForTailCallOptimization(
382 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
383 bool isCalleeStructRet, bool isCallerStructRet,
384 const SmallVectorImpl<ISD::OutputArg> &Outs,
385 const SmallVectorImpl<SDValue> &OutVals,
386 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
387
388 /// Finds the incoming stack arguments which overlap the given fixed stack
389 /// object and incorporates their load into the current chain. This prevents
390 /// an upcoming store from clobbering the stack argument before it's used.
391 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
392 MachineFrameInfo *MFI, int ClobberedFI) const;
393
394 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
395
396 bool IsTailCallConvention(CallingConv::ID CallCC) const;
397
398 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
399 SDValue &Chain) const;
400
401 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
402 bool isVarArg,
403 const SmallVectorImpl<ISD::OutputArg> &Outs,
404 LLVMContext &Context) const override;
405
406 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
407 const SmallVectorImpl<ISD::OutputArg> &Outs,
408 const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
409 SelectionDAG &DAG) const override;
410
411 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
412 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
413 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
414 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
Kristof Beylsaea84612015-03-04 09:12:08 +0000415 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
416 SelectionDAG &DAG) const;
Tim Northover3b0846e2014-05-24 12:50:23 +0000417 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
418 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
419 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
420 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
421 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
422 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
423 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
424 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
425 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
426 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
427 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
428 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
429 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
430 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
431 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
432 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
433 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
434 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
435 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
436 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
437 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
438 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
439 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
440 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
441 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
442 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
443 RTLIB::Libcall Call) const;
444 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
445 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
446 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
447 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
448 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
449 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
450 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
451 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
452 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
453
Chad Rosier17020f92014-07-23 14:57:52 +0000454 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000455 std::vector<SDNode *> *Created) const override;
Hao Liu44e5d7a2014-11-21 06:39:58 +0000456 bool combineRepeatedFPDivisors(unsigned NumUsers) const override;
Chad Rosier17020f92014-07-23 14:57:52 +0000457
Tim Northover3b0846e2014-05-24 12:50:23 +0000458 ConstraintType
459 getConstraintType(const std::string &Constraint) const override;
460 unsigned getRegisterByName(const char* RegName, EVT VT) const override;
461
462 /// Examine constraint string and operand type and determine a weight value.
463 /// The operand object must already have been set up with the operand type.
464 ConstraintWeight
465 getSingleConstraintMatchWeight(AsmOperandInfo &info,
466 const char *constraint) const override;
467
468 std::pair<unsigned, const TargetRegisterClass *>
Eric Christopher11e4df72015-02-26 22:38:43 +0000469 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
470 const std::string &Constraint,
Tim Northover3b0846e2014-05-24 12:50:23 +0000471 MVT VT) const override;
472 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
473 std::vector<SDValue> &Ops,
474 SelectionDAG &DAG) const override;
475
Daniel Sandersbf5b80f2015-03-16 13:13:41 +0000476 unsigned getInlineAsmMemConstraint(
477 const std::string &ConstraintCode) const override {
478 // FIXME: Map different constraints differently.
479 return InlineAsm::Constraint_m;
480 }
481
Tim Northover3b0846e2014-05-24 12:50:23 +0000482 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
483 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
484 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
485 ISD::MemIndexedMode &AM, bool &IsInc,
486 SelectionDAG &DAG) const;
487 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
488 ISD::MemIndexedMode &AM,
489 SelectionDAG &DAG) const override;
490 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
491 SDValue &Offset, ISD::MemIndexedMode &AM,
492 SelectionDAG &DAG) const override;
493
494 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
495 SelectionDAG &DAG) const override;
Tim Northover3c55cca2014-11-27 21:02:42 +0000496
497 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
498 CallingConv::ID CallConv,
Craig Topper44586dc2014-11-28 03:58:26 +0000499 bool isVarArg) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +0000500};
501
502namespace AArch64 {
503FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
504 const TargetLibraryInfo *libInfo);
505} // end namespace AArch64
506
507} // end namespace llvm
508
Benjamin Kramera7c40ef2014-08-13 16:26:38 +0000509#endif