blob: 468c56156a4fc53391494e511a77558c89a7b223 [file] [log] [blame]
Tim Northover72062f52013-01-31 12:12:40 +00001//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the AArch64 target.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "aarch64-isel"
15#include "AArch64.h"
16#include "AArch64InstrInfo.h"
17#include "AArch64Subtarget.h"
18#include "AArch64TargetMachine.h"
Tim Northover19254c42013-02-05 13:24:47 +000019#include "Utils/AArch64BaseInfo.h"
Tim Northover72062f52013-01-31 12:12:40 +000020#include "llvm/ADT/APSInt.h"
21#include "llvm/CodeGen/SelectionDAGISel.h"
22#include "llvm/IR/GlobalValue.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Support/raw_ostream.h"
25
26using namespace llvm;
27
28//===--------------------------------------------------------------------===//
29/// AArch64 specific code to select AArch64 machine instructions for
30/// SelectionDAG operations.
31///
32namespace {
33
34class AArch64DAGToDAGISel : public SelectionDAGISel {
35 AArch64TargetMachine &TM;
36 const AArch64InstrInfo *TII;
37
38 /// Keep a pointer to the AArch64Subtarget around so that we can
39 /// make the right decision when generating code for different targets.
40 const AArch64Subtarget *Subtarget;
41
42public:
43 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
44 CodeGenOpt::Level OptLevel)
45 : SelectionDAGISel(tm, OptLevel), TM(tm),
46 TII(static_cast<const AArch64InstrInfo*>(TM.getInstrInfo())),
47 Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
48 }
49
50 virtual const char *getPassName() const {
51 return "AArch64 Instruction Selection";
52 }
53
54 // Include the pieces autogenerated from the target description.
55#include "AArch64GenDAGISel.inc"
56
57 template<unsigned MemSize>
58 bool SelectOffsetUImm12(SDValue N, SDValue &UImm12) {
59 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
60 if (!CN || CN->getZExtValue() % MemSize != 0
61 || CN->getZExtValue() / MemSize > 0xfff)
62 return false;
63
64 UImm12 = CurDAG->getTargetConstant(CN->getZExtValue() / MemSize, MVT::i64);
65 return true;
66 }
67
68 template<unsigned RegWidth>
69 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
70 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
71 }
72
73 bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
74
Tim Northoverdfe076a2013-02-05 13:24:56 +000075 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
76 unsigned RegWidth);
Tim Northover72062f52013-01-31 12:12:40 +000077
78 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
79 char ConstraintCode,
80 std::vector<SDValue> &OutOps);
81
82 bool SelectLogicalImm(SDValue N, SDValue &Imm);
83
84 template<unsigned RegWidth>
85 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos) {
86 return SelectTSTBOperand(N, FixedPos, RegWidth);
87 }
88
89 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth);
90
Tim Northover211ffd22013-04-08 08:40:41 +000091 SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32, unsigned Op64);
92
Tim Northover72062f52013-01-31 12:12:40 +000093 SDNode *TrySelectToMoveImm(SDNode *N);
Tim Northover1e883932013-02-15 09:33:43 +000094 SDNode *LowerToFPLitPool(SDNode *Node);
Tim Northover72062f52013-01-31 12:12:40 +000095 SDNode *SelectToLitPool(SDNode *N);
Tim Northover72062f52013-01-31 12:12:40 +000096
97 SDNode* Select(SDNode*);
98private:
99};
100}
101
102bool
103AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
104 unsigned RegWidth) {
105 const ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
106 if (!CN) return false;
107
108 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
109 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
110 // x-register.
111 //
112 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
113 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
114 // integers.
115 bool IsExact;
116
117 // fbits is between 1 and 64 in the worst-case, which means the fmul
118 // could have 2^64 as an actual operand. Need 65 bits of precision.
119 APSInt IntVal(65, true);
120 CN->getValueAPF().convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
121
122 // N.b. isPowerOf2 also checks for > 0.
123 if (!IsExact || !IntVal.isPowerOf2()) return false;
124 unsigned FBits = IntVal.logBase2();
125
126 // Checks above should have guaranteed that we haven't lost information in
127 // finding FBits, but it must still be in range.
128 if (FBits == 0 || FBits > RegWidth) return false;
129
130 FixedPos = CurDAG->getTargetConstant(64 - FBits, MVT::i32);
131 return true;
132}
133
134bool
135AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
Tim Northoverdfe076a2013-02-05 13:24:56 +0000136 char ConstraintCode,
137 std::vector<SDValue> &OutOps) {
Tim Northover72062f52013-01-31 12:12:40 +0000138 switch (ConstraintCode) {
139 default: llvm_unreachable("Unrecognised AArch64 memory constraint");
140 case 'm':
141 // FIXME: more freedom is actually permitted for 'm'. We can go
142 // hunting for a base and an offset if we want. Of course, since
143 // we don't really know how the operand is going to be used we're
144 // probably restricted to the load/store pair's simm7 as an offset
145 // range anyway.
146 case 'Q':
147 OutOps.push_back(Op);
148 }
149
150 return false;
151}
152
153bool
154AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
155 ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
156 if (!Imm || !Imm->getValueAPF().isPosZero())
157 return false;
Tim Northoverdfe076a2013-02-05 13:24:56 +0000158
Tim Northover72062f52013-01-31 12:12:40 +0000159 // Doesn't actually carry any information, but keeps TableGen quiet.
160 Dummy = CurDAG->getTargetConstant(0, MVT::i32);
161 return true;
162}
163
164bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) {
165 uint32_t Bits;
166 uint32_t RegWidth = N.getValueType().getSizeInBits();
167
168 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
169 if (!CN) return false;
170
171 if (!A64Imms::isLogicalImm(RegWidth, CN->getZExtValue(), Bits))
172 return false;
173
174 Imm = CurDAG->getTargetConstant(Bits, MVT::i32);
175 return true;
176}
177
178SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
179 SDNode *ResNode;
180 DebugLoc dl = Node->getDebugLoc();
181 EVT DestType = Node->getValueType(0);
182 unsigned DestWidth = DestType.getSizeInBits();
183
184 unsigned MOVOpcode;
185 EVT MOVType;
186 int UImm16, Shift;
187 uint32_t LogicalBits;
188
189 uint64_t BitPat = cast<ConstantSDNode>(Node)->getZExtValue();
190 if (A64Imms::isMOVZImm(DestWidth, BitPat, UImm16, Shift)) {
191 MOVType = DestType;
192 MOVOpcode = DestWidth == 64 ? AArch64::MOVZxii : AArch64::MOVZwii;
193 } else if (A64Imms::isMOVNImm(DestWidth, BitPat, UImm16, Shift)) {
194 MOVType = DestType;
195 MOVOpcode = DestWidth == 64 ? AArch64::MOVNxii : AArch64::MOVNwii;
196 } else if (DestWidth == 64 && A64Imms::isMOVNImm(32, BitPat, UImm16, Shift)) {
197 // To get something like 0x0000_0000_ffff_1234 into a 64-bit register we can
198 // use a 32-bit instruction: "movn w0, 0xedbc".
199 MOVType = MVT::i32;
200 MOVOpcode = AArch64::MOVNwii;
201 } else if (A64Imms::isLogicalImm(DestWidth, BitPat, LogicalBits)) {
202 MOVOpcode = DestWidth == 64 ? AArch64::ORRxxi : AArch64::ORRwwi;
203 uint16_t ZR = DestWidth == 64 ? AArch64::XZR : AArch64::WZR;
204
205 return CurDAG->getMachineNode(MOVOpcode, dl, DestType,
206 CurDAG->getRegister(ZR, DestType),
207 CurDAG->getTargetConstant(LogicalBits, MVT::i32));
208 } else {
209 // Can't handle it in one instruction. There's scope for permitting two (or
210 // more) instructions, but that'll need more thought.
211 return NULL;
212 }
213
214 ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType,
215 CurDAG->getTargetConstant(UImm16, MVT::i32),
216 CurDAG->getTargetConstant(Shift, MVT::i32));
217
218 if (MOVType != DestType) {
219 ResNode = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
220 MVT::i64, MVT::i32, MVT::Other,
221 CurDAG->getTargetConstant(0, MVT::i64),
222 SDValue(ResNode, 0),
223 CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32));
224 }
225
226 return ResNode;
227}
228
229SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
Tim Northover1e883932013-02-15 09:33:43 +0000230 DebugLoc DL = Node->getDebugLoc();
Tim Northover72062f52013-01-31 12:12:40 +0000231 uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue();
232 int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue();
233 EVT DestType = Node->getValueType(0);
Tim Northover1e883932013-02-15 09:33:43 +0000234 EVT PtrVT = TLI.getPointerTy();
Tim Northover72062f52013-01-31 12:12:40 +0000235
236 // Since we may end up loading a 64-bit constant from a 32-bit entry the
237 // constant in the pool may have a different type to the eventual node.
Tim Northover1e883932013-02-15 09:33:43 +0000238 ISD::LoadExtType Extension;
239 EVT MemType;
Tim Northover72062f52013-01-31 12:12:40 +0000240
241 assert((DestType == MVT::i64 || DestType == MVT::i32)
242 && "Only expect integer constants at the moment");
243
Tim Northover1e883932013-02-15 09:33:43 +0000244 if (DestType == MVT::i32) {
245 Extension = ISD::NON_EXTLOAD;
246 MemType = MVT::i32;
247 } else if (UnsignedVal <= UINT32_MAX) {
248 Extension = ISD::ZEXTLOAD;
249 MemType = MVT::i32;
Tim Northover72062f52013-01-31 12:12:40 +0000250 } else if (SignedVal >= INT32_MIN && SignedVal <= INT32_MAX) {
Tim Northover1e883932013-02-15 09:33:43 +0000251 Extension = ISD::SEXTLOAD;
252 MemType = MVT::i32;
Tim Northover72062f52013-01-31 12:12:40 +0000253 } else {
Tim Northover1e883932013-02-15 09:33:43 +0000254 Extension = ISD::NON_EXTLOAD;
255 MemType = MVT::i64;
Tim Northover72062f52013-01-31 12:12:40 +0000256 }
257
Tim Northover1e883932013-02-15 09:33:43 +0000258 Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(),
259 MemType.getSizeInBits()),
260 UnsignedVal);
261 SDValue PoolAddr;
262 unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(CV->getType());
263 PoolAddr = CurDAG->getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
264 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0,
265 AArch64II::MO_NO_FLAG),
266 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0,
267 AArch64II::MO_LO12),
268 CurDAG->getConstant(Alignment, MVT::i32));
Tim Northover72062f52013-01-31 12:12:40 +0000269
Tim Northover1e883932013-02-15 09:33:43 +0000270 return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
271 PoolAddr,
272 MachinePointerInfo::getConstantPool(), MemType,
273 /* isVolatile = */ false,
274 /* isNonTemporal = */ false,
275 Alignment).getNode();
Tim Northover72062f52013-01-31 12:12:40 +0000276}
277
Tim Northover1e883932013-02-15 09:33:43 +0000278SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
279 DebugLoc DL = Node->getDebugLoc();
Tim Northover72062f52013-01-31 12:12:40 +0000280 const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
Tim Northover1e883932013-02-15 09:33:43 +0000281 EVT PtrVT = TLI.getPointerTy();
Tim Northover72062f52013-01-31 12:12:40 +0000282 EVT DestType = Node->getValueType(0);
283
Tim Northover1e883932013-02-15 09:33:43 +0000284 unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(FV->getType());
285 SDValue PoolAddr;
Tim Northover72062f52013-01-31 12:12:40 +0000286
Tim Northover1e883932013-02-15 09:33:43 +0000287 assert(TM.getCodeModel() == CodeModel::Small &&
288 "Only small code model supported");
289 PoolAddr = CurDAG->getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
290 CurDAG->getTargetConstantPool(FV, PtrVT, 0, 0,
291 AArch64II::MO_NO_FLAG),
292 CurDAG->getTargetConstantPool(FV, PtrVT, 0, 0,
293 AArch64II::MO_LO12),
294 CurDAG->getConstant(Alignment, MVT::i32));
Tim Northover72062f52013-01-31 12:12:40 +0000295
Tim Northover1e883932013-02-15 09:33:43 +0000296 return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
297 MachinePointerInfo::getConstantPool(),
298 /* isVolatile = */ false,
299 /* isNonTemporal = */ false,
300 /* isInvariant = */ true,
301 Alignment).getNode();
Tim Northover72062f52013-01-31 12:12:40 +0000302}
303
304bool
305AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos,
306 unsigned RegWidth) {
307 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
308 if (!CN) return false;
309
310 uint64_t Val = CN->getZExtValue();
311
312 if (!isPowerOf2_64(Val)) return false;
313
314 unsigned TestedBit = Log2_64(Val);
315 // Checks above should have guaranteed that we haven't lost information in
316 // finding TestedBit, but it must still be in range.
317 if (TestedBit >= RegWidth) return false;
318
319 FixedPos = CurDAG->getTargetConstant(TestedBit, MVT::i64);
320 return true;
321}
322
Tim Northover211ffd22013-04-08 08:40:41 +0000323SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
324 unsigned Op16,unsigned Op32,
325 unsigned Op64) {
326 // Mostly direct translation to the given operations, except that we preserve
327 // the AtomicOrdering for use later on.
328 AtomicSDNode *AN = cast<AtomicSDNode>(Node);
329 EVT VT = AN->getMemoryVT();
330
331 unsigned Op;
332 if (VT == MVT::i8)
333 Op = Op8;
334 else if (VT == MVT::i16)
335 Op = Op16;
336 else if (VT == MVT::i32)
337 Op = Op32;
338 else if (VT == MVT::i64)
339 Op = Op64;
340 else
341 llvm_unreachable("Unexpected atomic operation");
342
343 SmallVector<SDValue, 4> Ops;
344 for (unsigned i = 1; i < AN->getNumOperands(); ++i)
345 Ops.push_back(AN->getOperand(i));
346
347 Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
348 Ops.push_back(AN->getOperand(0)); // Chain moves to the end
349
350 return CurDAG->SelectNodeTo(Node, Op,
351 AN->getValueType(0), MVT::Other,
352 &Ops[0], Ops.size());
353}
354
Tim Northover72062f52013-01-31 12:12:40 +0000355SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
356 // Dump information about the Node being selected
357 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n");
358
359 if (Node->isMachineOpcode()) {
360 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
361 return NULL;
362 }
363
364 switch (Node->getOpcode()) {
Tim Northover211ffd22013-04-08 08:40:41 +0000365 case ISD::ATOMIC_LOAD_ADD:
366 return SelectAtomic(Node,
367 AArch64::ATOMIC_LOAD_ADD_I8,
368 AArch64::ATOMIC_LOAD_ADD_I16,
369 AArch64::ATOMIC_LOAD_ADD_I32,
370 AArch64::ATOMIC_LOAD_ADD_I64);
371 case ISD::ATOMIC_LOAD_SUB:
372 return SelectAtomic(Node,
373 AArch64::ATOMIC_LOAD_SUB_I8,
374 AArch64::ATOMIC_LOAD_SUB_I16,
375 AArch64::ATOMIC_LOAD_SUB_I32,
376 AArch64::ATOMIC_LOAD_SUB_I64);
377 case ISD::ATOMIC_LOAD_AND:
378 return SelectAtomic(Node,
379 AArch64::ATOMIC_LOAD_AND_I8,
380 AArch64::ATOMIC_LOAD_AND_I16,
381 AArch64::ATOMIC_LOAD_AND_I32,
382 AArch64::ATOMIC_LOAD_AND_I64);
383 case ISD::ATOMIC_LOAD_OR:
384 return SelectAtomic(Node,
385 AArch64::ATOMIC_LOAD_OR_I8,
386 AArch64::ATOMIC_LOAD_OR_I16,
387 AArch64::ATOMIC_LOAD_OR_I32,
388 AArch64::ATOMIC_LOAD_OR_I64);
389 case ISD::ATOMIC_LOAD_XOR:
390 return SelectAtomic(Node,
391 AArch64::ATOMIC_LOAD_XOR_I8,
392 AArch64::ATOMIC_LOAD_XOR_I16,
393 AArch64::ATOMIC_LOAD_XOR_I32,
394 AArch64::ATOMIC_LOAD_XOR_I64);
395 case ISD::ATOMIC_LOAD_NAND:
396 return SelectAtomic(Node,
397 AArch64::ATOMIC_LOAD_NAND_I8,
398 AArch64::ATOMIC_LOAD_NAND_I16,
399 AArch64::ATOMIC_LOAD_NAND_I32,
400 AArch64::ATOMIC_LOAD_NAND_I64);
401 case ISD::ATOMIC_LOAD_MIN:
402 return SelectAtomic(Node,
403 AArch64::ATOMIC_LOAD_MIN_I8,
404 AArch64::ATOMIC_LOAD_MIN_I16,
405 AArch64::ATOMIC_LOAD_MIN_I32,
406 AArch64::ATOMIC_LOAD_MIN_I64);
407 case ISD::ATOMIC_LOAD_MAX:
408 return SelectAtomic(Node,
409 AArch64::ATOMIC_LOAD_MAX_I8,
410 AArch64::ATOMIC_LOAD_MAX_I16,
411 AArch64::ATOMIC_LOAD_MAX_I32,
412 AArch64::ATOMIC_LOAD_MAX_I64);
413 case ISD::ATOMIC_LOAD_UMIN:
414 return SelectAtomic(Node,
415 AArch64::ATOMIC_LOAD_UMIN_I8,
416 AArch64::ATOMIC_LOAD_UMIN_I16,
417 AArch64::ATOMIC_LOAD_UMIN_I32,
418 AArch64::ATOMIC_LOAD_UMIN_I64);
419 case ISD::ATOMIC_LOAD_UMAX:
420 return SelectAtomic(Node,
421 AArch64::ATOMIC_LOAD_UMAX_I8,
422 AArch64::ATOMIC_LOAD_UMAX_I16,
423 AArch64::ATOMIC_LOAD_UMAX_I32,
424 AArch64::ATOMIC_LOAD_UMAX_I64);
425 case ISD::ATOMIC_SWAP:
426 return SelectAtomic(Node,
427 AArch64::ATOMIC_SWAP_I8,
428 AArch64::ATOMIC_SWAP_I16,
429 AArch64::ATOMIC_SWAP_I32,
430 AArch64::ATOMIC_SWAP_I64);
431 case ISD::ATOMIC_CMP_SWAP:
432 return SelectAtomic(Node,
433 AArch64::ATOMIC_CMP_SWAP_I8,
434 AArch64::ATOMIC_CMP_SWAP_I16,
435 AArch64::ATOMIC_CMP_SWAP_I32,
436 AArch64::ATOMIC_CMP_SWAP_I64);
Tim Northover72062f52013-01-31 12:12:40 +0000437 case ISD::FrameIndex: {
438 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
439 EVT PtrTy = TLI.getPointerTy();
440 SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
441 return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
442 TFI, CurDAG->getTargetConstant(0, PtrTy));
443 }
444 case ISD::ConstantPool: {
445 // Constant pools are fine, just create a Target entry.
446 ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Node);
447 const Constant *C = CN->getConstVal();
448 SDValue CP = CurDAG->getTargetConstantPool(C, CN->getValueType(0));
449
450 ReplaceUses(SDValue(Node, 0), CP);
451 return NULL;
452 }
453 case ISD::Constant: {
454 SDNode *ResNode = 0;
455 if (cast<ConstantSDNode>(Node)->getZExtValue() == 0) {
456 // XZR and WZR are probably even better than an actual move: most of the
457 // time they can be folded into another instruction with *no* cost.
458
459 EVT Ty = Node->getValueType(0);
460 assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type");
461 uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR;
462 ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
463 Node->getDebugLoc(),
464 Register, Ty).getNode();
465 }
466
467 // Next best option is a move-immediate, see if we can do that.
468 if (!ResNode) {
469 ResNode = TrySelectToMoveImm(Node);
470 }
471
Tim Northover1e883932013-02-15 09:33:43 +0000472 if (ResNode)
473 return ResNode;
Tim Northover72062f52013-01-31 12:12:40 +0000474
Tim Northover1e883932013-02-15 09:33:43 +0000475 // If even that fails we fall back to a lit-pool entry at the moment. Future
476 // tuning may change this to a sequence of MOVZ/MOVN/MOVK instructions.
477 ResNode = SelectToLitPool(Node);
Tim Northover72062f52013-01-31 12:12:40 +0000478 assert(ResNode && "We need *some* way to materialise a constant");
479
Tim Northover1e883932013-02-15 09:33:43 +0000480 // We want to continue selection at this point since the litpool access
481 // generated used generic nodes for simplicity.
Tim Northover72062f52013-01-31 12:12:40 +0000482 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
Tim Northover1e883932013-02-15 09:33:43 +0000483 Node = ResNode;
484 break;
Tim Northover72062f52013-01-31 12:12:40 +0000485 }
486 case ISD::ConstantFP: {
487 if (A64Imms::isFPImm(cast<ConstantFPSDNode>(Node)->getValueAPF())) {
488 // FMOV will take care of it from TableGen
489 break;
490 }
491
Tim Northover1e883932013-02-15 09:33:43 +0000492 SDNode *ResNode = LowerToFPLitPool(Node);
Tim Northover72062f52013-01-31 12:12:40 +0000493 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
Tim Northover1e883932013-02-15 09:33:43 +0000494
495 // We want to continue selection at this point since the litpool access
496 // generated used generic nodes for simplicity.
497 Node = ResNode;
498 break;
Tim Northover72062f52013-01-31 12:12:40 +0000499 }
500 default:
501 break; // Let generic code handle it
502 }
503
504 SDNode *ResNode = SelectCode(Node);
505
506 DEBUG(dbgs() << "=> ";
507 if (ResNode == NULL || ResNode == Node)
508 Node->dump(CurDAG);
509 else
510 ResNode->dump(CurDAG);
511 dbgs() << "\n");
512
513 return ResNode;
514}
515
516/// This pass converts a legalized DAG into a AArch64-specific DAG, ready for
517/// instruction scheduling.
518FunctionPass *llvm::createAArch64ISelDAG(AArch64TargetMachine &TM,
519 CodeGenOpt::Level OptLevel) {
520 return new AArch64DAGToDAGISel(TM, OptLevel);
521}