blob: ed49f6fc6349244b75e04bf11e248ae3de8a3272 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the AArch64 target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64TargetMachine.h"
15#include "MCTargetDesc/AArch64AddressingModes.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/CodeGen/SelectionDAGISel.h"
18#include "llvm/IR/Function.h" // To access function attributes.
19#include "llvm/IR/GlobalValue.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/ErrorHandling.h"
23#include "llvm/Support/MathExtras.h"
24#include "llvm/Support/raw_ostream.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE "aarch64-isel"
29
30//===--------------------------------------------------------------------===//
31/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32/// instructions for SelectionDAG operations.
33///
34namespace {
35
36class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
38
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
42
43 bool ForCodeSize;
44
45public:
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
49 ForCodeSize(false) {}
50
51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
53 }
54
55 bool runOnMachineFunction(MachineFunction &MF) override {
Tim Northover3b0846e2014-05-24 12:50:23 +000056 ForCodeSize =
Duncan P. N. Exon Smith003bb7d2015-02-14 02:09:06 +000057 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
58 MF.getFunction()->hasFnAttribute(Attribute::MinSize);
Eric Christopher1e513342015-01-30 23:46:40 +000059 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
Tim Northover3b0846e2014-05-24 12:50:23 +000060 return SelectionDAGISel::runOnMachineFunction(MF);
61 }
62
63 SDNode *Select(SDNode *Node) override;
64
65 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
66 /// inline asm expressions.
67 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
68 char ConstraintCode,
69 std::vector<SDValue> &OutOps) override;
70
71 SDNode *SelectMLAV64LaneV128(SDNode *N);
72 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
73 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
74 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
75 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
76 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, false, Reg, Shift);
78 }
79 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
80 return SelectShiftedRegister(N, true, Reg, Shift);
81 }
82 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed(N, 1, Base, OffImm);
84 }
85 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed(N, 2, Base, OffImm);
87 }
88 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed(N, 4, Base, OffImm);
90 }
91 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed(N, 8, Base, OffImm);
93 }
94 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed(N, 16, Base, OffImm);
96 }
97 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
99 }
100 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
102 }
103 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
105 }
106 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
108 }
109 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
111 }
112
113 template<int Width>
114 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
115 SDValue &SignExtend, SDValue &DoShift) {
116 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
117 }
118
119 template<int Width>
120 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
121 SDValue &SignExtend, SDValue &DoShift) {
122 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
123 }
124
125
126 /// Form sequences of consecutive 64/128-bit registers for use in NEON
127 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
128 /// between 1 and 4 elements. If it contains a single element that is returned
129 /// unchanged; otherwise a REG_SEQUENCE value is returned.
130 SDValue createDTuple(ArrayRef<SDValue> Vecs);
131 SDValue createQTuple(ArrayRef<SDValue> Vecs);
132
133 /// Generic helper for the createDTuple/createQTuple
134 /// functions. Those should almost always be called instead.
Benjamin Kramerea68a942015-02-19 15:26:17 +0000135 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
136 const unsigned SubRegs[]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000137
138 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
139
140 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
141
142 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
143 unsigned SubRegIdx);
144 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
145 unsigned SubRegIdx);
146 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
147 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
148
149 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
150 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
151 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
152 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
153
Tim Northover3b0846e2014-05-24 12:50:23 +0000154 SDNode *SelectBitfieldExtractOp(SDNode *N);
155 SDNode *SelectBitfieldInsertOp(SDNode *N);
156
157 SDNode *SelectLIBM(SDNode *N);
158
159// Include the pieces autogenerated from the target description.
160#include "AArch64GenDAGISel.inc"
161
162private:
163 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
164 SDValue &Shift);
165 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
166 SDValue &OffImm);
167 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
168 SDValue &OffImm);
169 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
170 SDValue &Offset, SDValue &SignExtend,
171 SDValue &DoShift);
172 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
173 SDValue &Offset, SDValue &SignExtend,
174 SDValue &DoShift);
175 bool isWorthFolding(SDValue V) const;
176 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
177 SDValue &Offset, SDValue &SignExtend);
178
179 template<unsigned RegWidth>
180 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
181 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
182 }
183
184 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
185};
186} // end anonymous namespace
187
188/// isIntImmediate - This method tests to see if the node is a constant
189/// operand. If so Imm will receive the 32-bit value.
190static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
191 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
192 Imm = C->getZExtValue();
193 return true;
194 }
195 return false;
196}
197
198// isIntImmediate - This method tests to see if a constant operand.
199// If so Imm will receive the value.
200static bool isIntImmediate(SDValue N, uint64_t &Imm) {
201 return isIntImmediate(N.getNode(), Imm);
202}
203
204// isOpcWithIntImmediate - This method tests to see if the node is a specific
205// opcode and that it has a immediate integer right operand.
206// If so Imm will receive the 32 bit value.
207static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
208 uint64_t &Imm) {
209 return N->getOpcode() == Opc &&
210 isIntImmediate(N->getOperand(1).getNode(), Imm);
211}
212
213bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
214 const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
215 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
216 // Require the address to be in a register. That is safe for all AArch64
217 // variants and it is hard to do anything much smarter without knowing
218 // how the operand is used.
219 OutOps.push_back(Op);
220 return false;
221}
222
223/// SelectArithImmed - Select an immediate value that can be represented as
224/// a 12-bit value shifted left by either 0 or 12. If so, return true with
225/// Val set to the 12-bit value and Shift set to the shifter operand.
226bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
227 SDValue &Shift) {
228 // This function is called from the addsub_shifted_imm ComplexPattern,
229 // which lists [imm] as the list of opcode it's interested in, however
230 // we still need to check whether the operand is actually an immediate
231 // here because the ComplexPattern opcode list is only used in
232 // root-level opcode matching.
233 if (!isa<ConstantSDNode>(N.getNode()))
234 return false;
235
236 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
237 unsigned ShiftAmt;
238
239 if (Immed >> 12 == 0) {
240 ShiftAmt = 0;
241 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
242 ShiftAmt = 12;
243 Immed = Immed >> 12;
244 } else
245 return false;
246
247 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
248 Val = CurDAG->getTargetConstant(Immed, MVT::i32);
249 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
250 return true;
251}
252
253/// SelectNegArithImmed - As above, but negates the value before trying to
254/// select it.
255bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
256 SDValue &Shift) {
257 // This function is called from the addsub_shifted_imm ComplexPattern,
258 // which lists [imm] as the list of opcode it's interested in, however
259 // we still need to check whether the operand is actually an immediate
260 // here because the ComplexPattern opcode list is only used in
261 // root-level opcode matching.
262 if (!isa<ConstantSDNode>(N.getNode()))
263 return false;
264
265 // The immediate operand must be a 24-bit zero-extended immediate.
266 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
267
268 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
269 // have the opposite effect on the C flag, so this pattern mustn't match under
270 // those circumstances.
271 if (Immed == 0)
272 return false;
273
274 if (N.getValueType() == MVT::i32)
275 Immed = ~((uint32_t)Immed) + 1;
276 else
277 Immed = ~Immed + 1ULL;
278 if (Immed & 0xFFFFFFFFFF000000ULL)
279 return false;
280
281 Immed &= 0xFFFFFFULL;
282 return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
283}
284
285/// getShiftTypeForNode - Translate a shift node to the corresponding
286/// ShiftType value.
287static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
288 switch (N.getOpcode()) {
289 default:
290 return AArch64_AM::InvalidShiftExtend;
291 case ISD::SHL:
292 return AArch64_AM::LSL;
293 case ISD::SRL:
294 return AArch64_AM::LSR;
295 case ISD::SRA:
296 return AArch64_AM::ASR;
297 case ISD::ROTR:
298 return AArch64_AM::ROR;
299 }
300}
301
302/// \brief Determine wether it is worth to fold V into an extended register.
303bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
Robin Morisset039781e2014-08-29 21:53:01 +0000304 // it hurts if the value is used at least twice, unless we are optimizing
Tim Northover3b0846e2014-05-24 12:50:23 +0000305 // for code size.
306 if (ForCodeSize || V.hasOneUse())
307 return true;
308 return false;
309}
310
311/// SelectShiftedRegister - Select a "shifted register" operand. If the value
312/// is not shifted, set the Shift operand to default of "LSL 0". The logical
313/// instructions allow the shifted register to be rotated, but the arithmetic
314/// instructions do not. The AllowROR parameter specifies whether ROR is
315/// supported.
316bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
317 SDValue &Reg, SDValue &Shift) {
318 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
319 if (ShType == AArch64_AM::InvalidShiftExtend)
320 return false;
321 if (!AllowROR && ShType == AArch64_AM::ROR)
322 return false;
323
324 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
325 unsigned BitSize = N.getValueType().getSizeInBits();
326 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
327 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
328
329 Reg = N.getOperand(0);
330 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
331 return isWorthFolding(N);
332 }
333
334 return false;
335}
336
337/// getExtendTypeForNode - Translate an extend node to the corresponding
338/// ExtendType value.
339static AArch64_AM::ShiftExtendType
340getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
341 if (N.getOpcode() == ISD::SIGN_EXTEND ||
342 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
343 EVT SrcVT;
344 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
345 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
346 else
347 SrcVT = N.getOperand(0).getValueType();
348
349 if (!IsLoadStore && SrcVT == MVT::i8)
350 return AArch64_AM::SXTB;
351 else if (!IsLoadStore && SrcVT == MVT::i16)
352 return AArch64_AM::SXTH;
353 else if (SrcVT == MVT::i32)
354 return AArch64_AM::SXTW;
355 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
356
357 return AArch64_AM::InvalidShiftExtend;
358 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
359 N.getOpcode() == ISD::ANY_EXTEND) {
360 EVT SrcVT = N.getOperand(0).getValueType();
361 if (!IsLoadStore && SrcVT == MVT::i8)
362 return AArch64_AM::UXTB;
363 else if (!IsLoadStore && SrcVT == MVT::i16)
364 return AArch64_AM::UXTH;
365 else if (SrcVT == MVT::i32)
366 return AArch64_AM::UXTW;
367 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
368
369 return AArch64_AM::InvalidShiftExtend;
370 } else if (N.getOpcode() == ISD::AND) {
371 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
372 if (!CSD)
373 return AArch64_AM::InvalidShiftExtend;
374 uint64_t AndMask = CSD->getZExtValue();
375
376 switch (AndMask) {
377 default:
378 return AArch64_AM::InvalidShiftExtend;
379 case 0xFF:
380 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
381 case 0xFFFF:
382 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
383 case 0xFFFFFFFF:
384 return AArch64_AM::UXTW;
385 }
386 }
387
388 return AArch64_AM::InvalidShiftExtend;
389}
390
391// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
392static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
393 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
394 DL->getOpcode() != AArch64ISD::DUPLANE32)
395 return false;
396
397 SDValue SV = DL->getOperand(0);
398 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
399 return false;
400
401 SDValue EV = SV.getOperand(1);
402 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
403 return false;
404
405 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
406 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
407 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
408 LaneOp = EV.getOperand(0);
409
410 return true;
411}
412
413// Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
414// high lane extract.
415static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
416 SDValue &LaneOp, int &LaneIdx) {
417
418 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
419 std::swap(Op0, Op1);
420 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
421 return false;
422 }
423 StdOp = Op1;
424 return true;
425}
426
427/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
428/// is a lane in the upper half of a 128-bit vector. Recognize and select this
429/// so that we don't emit unnecessary lane extracts.
430SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
431 SDValue Op0 = N->getOperand(0);
432 SDValue Op1 = N->getOperand(1);
433 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
434 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
435 int LaneIdx = -1; // Will hold the lane index.
436
437 if (Op1.getOpcode() != ISD::MUL ||
438 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
439 LaneIdx)) {
440 std::swap(Op0, Op1);
441 if (Op1.getOpcode() != ISD::MUL ||
442 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
443 LaneIdx))
444 return nullptr;
445 }
446
447 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
448
449 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
450
451 unsigned MLAOpc = ~0U;
452
453 switch (N->getSimpleValueType(0).SimpleTy) {
454 default:
455 llvm_unreachable("Unrecognized MLA.");
456 case MVT::v4i16:
457 MLAOpc = AArch64::MLAv4i16_indexed;
458 break;
459 case MVT::v8i16:
460 MLAOpc = AArch64::MLAv8i16_indexed;
461 break;
462 case MVT::v2i32:
463 MLAOpc = AArch64::MLAv2i32_indexed;
464 break;
465 case MVT::v4i32:
466 MLAOpc = AArch64::MLAv4i32_indexed;
467 break;
468 }
469
470 return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
471}
472
473SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
474 SDValue SMULLOp0;
475 SDValue SMULLOp1;
476 int LaneIdx;
477
478 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
479 LaneIdx))
480 return nullptr;
481
482 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
483
484 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
485
486 unsigned SMULLOpc = ~0U;
487
488 if (IntNo == Intrinsic::aarch64_neon_smull) {
489 switch (N->getSimpleValueType(0).SimpleTy) {
490 default:
491 llvm_unreachable("Unrecognized SMULL.");
492 case MVT::v4i32:
493 SMULLOpc = AArch64::SMULLv4i16_indexed;
494 break;
495 case MVT::v2i64:
496 SMULLOpc = AArch64::SMULLv2i32_indexed;
497 break;
498 }
499 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
500 switch (N->getSimpleValueType(0).SimpleTy) {
501 default:
502 llvm_unreachable("Unrecognized SMULL.");
503 case MVT::v4i32:
504 SMULLOpc = AArch64::UMULLv4i16_indexed;
505 break;
506 case MVT::v2i64:
507 SMULLOpc = AArch64::UMULLv2i32_indexed;
508 break;
509 }
510 } else
511 llvm_unreachable("Unrecognized intrinsic.");
512
513 return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
514}
515
516/// Instructions that accept extend modifiers like UXTW expect the register
517/// being extended to be a GPR32, but the incoming DAG might be acting on a
518/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
519/// this is the case.
520static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
521 if (N.getValueType() == MVT::i32)
522 return N;
523
524 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
525 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
526 SDLoc(N), MVT::i32, N, SubReg);
527 return SDValue(Node, 0);
528}
529
530
531/// SelectArithExtendedRegister - Select a "extended register" operand. This
532/// operand folds in an extend followed by an optional left shift.
533bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
534 SDValue &Shift) {
535 unsigned ShiftVal = 0;
536 AArch64_AM::ShiftExtendType Ext;
537
538 if (N.getOpcode() == ISD::SHL) {
539 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
540 if (!CSD)
541 return false;
542 ShiftVal = CSD->getZExtValue();
543 if (ShiftVal > 4)
544 return false;
545
546 Ext = getExtendTypeForNode(N.getOperand(0));
547 if (Ext == AArch64_AM::InvalidShiftExtend)
548 return false;
549
550 Reg = N.getOperand(0).getOperand(0);
551 } else {
552 Ext = getExtendTypeForNode(N);
553 if (Ext == AArch64_AM::InvalidShiftExtend)
554 return false;
555
556 Reg = N.getOperand(0);
557 }
558
559 // AArch64 mandates that the RHS of the operation must use the smallest
560 // register classs that could contain the size being extended from. Thus,
561 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
562 // there might not be an actual 32-bit value in the program. We can
563 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
564 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
565 Reg = narrowIfNeeded(CurDAG, Reg);
566 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
567 return isWorthFolding(N);
568}
569
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000570/// If there's a use of this ADDlow that's not itself a load/store then we'll
571/// need to create a real ADD instruction from it anyway and there's no point in
572/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
573/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
574/// leads to duplaicated ADRP instructions.
575static bool isWorthFoldingADDlow(SDValue N) {
576 for (auto Use : N->uses()) {
577 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
578 Use->getOpcode() != ISD::ATOMIC_LOAD &&
579 Use->getOpcode() != ISD::ATOMIC_STORE)
580 return false;
581
582 // ldar and stlr have much more restrictive addressing modes (just a
583 // register).
584 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
585 return false;
586 }
587
588 return true;
589}
590
Tim Northover3b0846e2014-05-24 12:50:23 +0000591/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
592/// immediate" address. The "Size" argument is the size in bytes of the memory
593/// reference, which determines the scale.
594bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
595 SDValue &Base, SDValue &OffImm) {
596 const TargetLowering *TLI = getTargetLowering();
597 if (N.getOpcode() == ISD::FrameIndex) {
598 int FI = cast<FrameIndexSDNode>(N)->getIndex();
599 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
600 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
601 return true;
602 }
603
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000604 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000605 GlobalAddressSDNode *GAN =
606 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
607 Base = N.getOperand(0);
608 OffImm = N.getOperand(1);
609 if (!GAN)
610 return true;
611
612 const GlobalValue *GV = GAN->getGlobal();
613 unsigned Alignment = GV->getAlignment();
614 const DataLayout *DL = TLI->getDataLayout();
Chad Rosier304fe3f2014-06-30 15:03:00 +0000615 Type *Ty = GV->getType()->getElementType();
Tim Northover4a8ac262014-12-02 23:53:43 +0000616 if (Alignment == 0 && Ty->isSized())
Chad Rosier304fe3f2014-06-30 15:03:00 +0000617 Alignment = DL->getABITypeAlignment(Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000618
619 if (Alignment >= Size)
620 return true;
621 }
622
623 if (CurDAG->isBaseWithConstantOffset(N)) {
624 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
625 int64_t RHSC = (int64_t)RHS->getZExtValue();
626 unsigned Scale = Log2_32(Size);
627 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
628 Base = N.getOperand(0);
629 if (Base.getOpcode() == ISD::FrameIndex) {
630 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
631 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
632 }
633 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
634 return true;
635 }
636 }
637 }
638
639 // Before falling back to our general case, check if the unscaled
640 // instructions can handle this. If so, that's preferable.
641 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
642 return false;
643
644 // Base only. The address will be materialized into a register before
645 // the memory is accessed.
646 // add x0, Xbase, #offset
647 // ldr x0, [x0]
648 Base = N;
649 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
650 return true;
651}
652
653/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
654/// immediate" address. This should only match when there is an offset that
655/// is not valid for a scaled immediate addressing mode. The "Size" argument
656/// is the size in bytes of the memory reference, which is needed here to know
657/// what is valid for a scaled immediate.
658bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
659 SDValue &Base,
660 SDValue &OffImm) {
661 if (!CurDAG->isBaseWithConstantOffset(N))
662 return false;
663 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
664 int64_t RHSC = RHS->getSExtValue();
665 // If the offset is valid as a scaled immediate, don't match here.
666 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
667 RHSC < (0x1000 << Log2_32(Size)))
668 return false;
669 if (RHSC >= -256 && RHSC < 256) {
670 Base = N.getOperand(0);
671 if (Base.getOpcode() == ISD::FrameIndex) {
672 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
673 const TargetLowering *TLI = getTargetLowering();
674 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
675 }
676 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
677 return true;
678 }
679 }
680 return false;
681}
682
683static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
684 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
685 SDValue ImpDef = SDValue(
686 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
687 0);
688 MachineSDNode *Node = CurDAG->getMachineNode(
689 TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
690 return SDValue(Node, 0);
691}
692
693/// \brief Check if the given SHL node (\p N), can be used to form an
694/// extended register for an addressing mode.
695bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
696 bool WantExtend, SDValue &Offset,
697 SDValue &SignExtend) {
698 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
699 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
700 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
701 return false;
702
703 if (WantExtend) {
704 AArch64_AM::ShiftExtendType Ext =
705 getExtendTypeForNode(N.getOperand(0), true);
706 if (Ext == AArch64_AM::InvalidShiftExtend)
707 return false;
708
709 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
710 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
711 } else {
712 Offset = N.getOperand(0);
713 SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
714 }
715
716 unsigned LegalShiftVal = Log2_32(Size);
717 unsigned ShiftVal = CSD->getZExtValue();
718
719 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
720 return false;
721
722 if (isWorthFolding(N))
723 return true;
724
725 return false;
726}
727
728bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
729 SDValue &Base, SDValue &Offset,
730 SDValue &SignExtend,
731 SDValue &DoShift) {
732 if (N.getOpcode() != ISD::ADD)
733 return false;
734 SDValue LHS = N.getOperand(0);
735 SDValue RHS = N.getOperand(1);
736
737 // We don't want to match immediate adds here, because they are better lowered
738 // to the register-immediate addressing modes.
739 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
740 return false;
741
742 // Check if this particular node is reused in any non-memory related
743 // operation. If yes, do not try to fold this node into the address
744 // computation, since the computation will be kept.
745 const SDNode *Node = N.getNode();
746 for (SDNode *UI : Node->uses()) {
747 if (!isa<MemSDNode>(*UI))
748 return false;
749 }
750
751 // Remember if it is worth folding N when it produces extended register.
752 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
753
754 // Try to match a shifted extend on the RHS.
755 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
756 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
757 Base = LHS;
758 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
759 return true;
760 }
761
762 // Try to match a shifted extend on the LHS.
763 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
764 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
765 Base = RHS;
766 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
767 return true;
768 }
769
770 // There was no shift, whatever else we find.
771 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
772
773 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
774 // Try to match an unshifted extend on the LHS.
775 if (IsExtendedRegisterWorthFolding &&
776 (Ext = getExtendTypeForNode(LHS, true)) !=
777 AArch64_AM::InvalidShiftExtend) {
778 Base = RHS;
779 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
780 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
781 if (isWorthFolding(LHS))
782 return true;
783 }
784
785 // Try to match an unshifted extend on the RHS.
786 if (IsExtendedRegisterWorthFolding &&
787 (Ext = getExtendTypeForNode(RHS, true)) !=
788 AArch64_AM::InvalidShiftExtend) {
789 Base = LHS;
790 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
791 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
792 if (isWorthFolding(RHS))
793 return true;
794 }
795
796 return false;
797}
798
Hao Liu3cb826c2014-10-14 06:50:36 +0000799// Check if the given immediate is preferred by ADD. If an immediate can be
800// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
801// encoded by one MOVZ, return true.
802static bool isPreferredADD(int64_t ImmOff) {
803 // Constant in [0x0, 0xfff] can be encoded in ADD.
804 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
805 return true;
806 // Check if it can be encoded in an "ADD LSL #12".
807 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
808 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
809 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
810 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
811 return false;
812}
813
Tim Northover3b0846e2014-05-24 12:50:23 +0000814bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
815 SDValue &Base, SDValue &Offset,
816 SDValue &SignExtend,
817 SDValue &DoShift) {
818 if (N.getOpcode() != ISD::ADD)
819 return false;
820 SDValue LHS = N.getOperand(0);
821 SDValue RHS = N.getOperand(1);
822
Tim Northover3b0846e2014-05-24 12:50:23 +0000823 // Check if this particular node is reused in any non-memory related
824 // operation. If yes, do not try to fold this node into the address
825 // computation, since the computation will be kept.
826 const SDNode *Node = N.getNode();
827 for (SDNode *UI : Node->uses()) {
828 if (!isa<MemSDNode>(*UI))
829 return false;
830 }
831
Hao Liu3cb826c2014-10-14 06:50:36 +0000832 // Watch out if RHS is a wide immediate, it can not be selected into
833 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
834 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
835 // instructions like:
836 // MOV X0, WideImmediate
837 // ADD X1, BaseReg, X0
838 // LDR X2, [X1, 0]
839 // For such situation, using [BaseReg, XReg] addressing mode can save one
840 // ADD/SUB:
841 // MOV X0, WideImmediate
842 // LDR X2, [BaseReg, X0]
843 if (isa<ConstantSDNode>(RHS)) {
844 int64_t ImmOff = (int64_t)dyn_cast<ConstantSDNode>(RHS)->getZExtValue();
845 unsigned Scale = Log2_32(Size);
846 // Skip the immediate can be seleced by load/store addressing mode.
847 // Also skip the immediate can be encoded by a single ADD (SUB is also
848 // checked by using -ImmOff).
849 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
850 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
851 return false;
852
853 SDLoc DL(N.getNode());
854 SDValue Ops[] = { RHS };
855 SDNode *MOVI =
856 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
857 SDValue MOVIV = SDValue(MOVI, 0);
858 // This ADD of two X register will be selected into [Reg+Reg] mode.
859 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
860 }
861
Tim Northover3b0846e2014-05-24 12:50:23 +0000862 // Remember if it is worth folding N when it produces extended register.
863 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
864
865 // Try to match a shifted extend on the RHS.
866 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
867 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
868 Base = LHS;
869 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
870 return true;
871 }
872
873 // Try to match a shifted extend on the LHS.
874 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
875 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
876 Base = RHS;
877 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
878 return true;
879 }
880
881 // Match any non-shifted, non-extend, non-immediate add expression.
882 Base = LHS;
883 Offset = RHS;
884 SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
885 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
886 // Reg1 + Reg2 is free: no check needed.
887 return true;
888}
889
890SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +0000891 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +0000892 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +0000893 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
894 AArch64::dsub2, AArch64::dsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +0000895
896 return createTuple(Regs, RegClassIDs, SubRegs);
897}
898
899SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +0000900 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +0000901 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +0000902 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
903 AArch64::qsub2, AArch64::qsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +0000904
905 return createTuple(Regs, RegClassIDs, SubRegs);
906}
907
908SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
Benjamin Kramerea68a942015-02-19 15:26:17 +0000909 const unsigned RegClassIDs[],
910 const unsigned SubRegs[]) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000911 // There's no special register-class for a vector-list of 1 element: it's just
912 // a vector.
913 if (Regs.size() == 1)
914 return Regs[0];
915
916 assert(Regs.size() >= 2 && Regs.size() <= 4);
917
918 SDLoc DL(Regs[0].getNode());
919
920 SmallVector<SDValue, 4> Ops;
921
922 // First operand of REG_SEQUENCE is the desired RegClass.
923 Ops.push_back(
924 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
925
926 // Then we get pairs of source & subregister-position for the components.
927 for (unsigned i = 0; i < Regs.size(); ++i) {
928 Ops.push_back(Regs[i]);
929 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
930 }
931
932 SDNode *N =
933 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
934 return SDValue(N, 0);
935}
936
937SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
938 unsigned Opc, bool isExt) {
939 SDLoc dl(N);
940 EVT VT = N->getValueType(0);
941
942 unsigned ExtOff = isExt;
943
944 // Form a REG_SEQUENCE to force register allocation.
945 unsigned Vec0Off = ExtOff + 1;
946 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
947 N->op_begin() + Vec0Off + NumVecs);
948 SDValue RegSeq = createQTuple(Regs);
949
950 SmallVector<SDValue, 6> Ops;
951 if (isExt)
952 Ops.push_back(N->getOperand(1));
953 Ops.push_back(RegSeq);
954 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
955 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
956}
957
958SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
959 LoadSDNode *LD = cast<LoadSDNode>(N);
960 if (LD->isUnindexed())
961 return nullptr;
962 EVT VT = LD->getMemoryVT();
963 EVT DstVT = N->getValueType(0);
964 ISD::MemIndexedMode AM = LD->getAddressingMode();
965 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
966
967 // We're not doing validity checking here. That was done when checking
968 // if we should mark the load as indexed or not. We're just selecting
969 // the right instruction.
970 unsigned Opcode = 0;
971
972 ISD::LoadExtType ExtType = LD->getExtensionType();
973 bool InsertTo64 = false;
974 if (VT == MVT::i64)
975 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
976 else if (VT == MVT::i32) {
977 if (ExtType == ISD::NON_EXTLOAD)
978 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
979 else if (ExtType == ISD::SEXTLOAD)
980 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
981 else {
982 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
983 InsertTo64 = true;
984 // The result of the load is only i32. It's the subreg_to_reg that makes
985 // it into an i64.
986 DstVT = MVT::i32;
987 }
988 } else if (VT == MVT::i16) {
989 if (ExtType == ISD::SEXTLOAD) {
990 if (DstVT == MVT::i64)
991 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
992 else
993 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
994 } else {
995 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
996 InsertTo64 = DstVT == MVT::i64;
997 // The result of the load is only i32. It's the subreg_to_reg that makes
998 // it into an i64.
999 DstVT = MVT::i32;
1000 }
1001 } else if (VT == MVT::i8) {
1002 if (ExtType == ISD::SEXTLOAD) {
1003 if (DstVT == MVT::i64)
1004 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1005 else
1006 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1007 } else {
1008 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1009 InsertTo64 = DstVT == MVT::i64;
1010 // The result of the load is only i32. It's the subreg_to_reg that makes
1011 // it into an i64.
1012 DstVT = MVT::i32;
1013 }
1014 } else if (VT == MVT::f32) {
1015 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1016 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1017 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1018 } else if (VT.is128BitVector()) {
1019 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1020 } else
1021 return nullptr;
1022 SDValue Chain = LD->getChain();
1023 SDValue Base = LD->getBasePtr();
1024 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1025 int OffsetVal = (int)OffsetOp->getZExtValue();
1026 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
1027 SDValue Ops[] = { Base, Offset, Chain };
1028 SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, DstVT,
1029 MVT::Other, Ops);
1030 // Either way, we're replacing the node, so tell the caller that.
1031 Done = true;
1032 SDValue LoadedVal = SDValue(Res, 1);
1033 if (InsertTo64) {
1034 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
1035 LoadedVal =
1036 SDValue(CurDAG->getMachineNode(
1037 AArch64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
1038 CurDAG->getTargetConstant(0, MVT::i64), LoadedVal, SubReg),
1039 0);
1040 }
1041
1042 ReplaceUses(SDValue(N, 0), LoadedVal);
1043 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1044 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1045
1046 return nullptr;
1047}
1048
1049SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1050 unsigned Opc, unsigned SubRegIdx) {
1051 SDLoc dl(N);
1052 EVT VT = N->getValueType(0);
1053 SDValue Chain = N->getOperand(0);
1054
Benjamin Kramerea68a942015-02-19 15:26:17 +00001055 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1056 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001057
Benjamin Kramerea68a942015-02-19 15:26:17 +00001058 EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001059
1060 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1061 SDValue SuperReg = SDValue(Ld, 0);
1062 for (unsigned i = 0; i < NumVecs; ++i)
1063 ReplaceUses(SDValue(N, i),
1064 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1065
1066 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1067 return nullptr;
1068}
1069
1070SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1071 unsigned Opc, unsigned SubRegIdx) {
1072 SDLoc dl(N);
1073 EVT VT = N->getValueType(0);
1074 SDValue Chain = N->getOperand(0);
1075
Benjamin Kramerea68a942015-02-19 15:26:17 +00001076 SDValue Ops[] = {N->getOperand(1), // Mem operand
1077 N->getOperand(2), // Incremental
1078 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001079
Benjamin Kramerea68a942015-02-19 15:26:17 +00001080 EVT ResTys[] = {MVT::i64, // Type of the write back register
1081 MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001082
1083 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1084
1085 // Update uses of write back register
1086 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1087
1088 // Update uses of vector list
1089 SDValue SuperReg = SDValue(Ld, 1);
1090 if (NumVecs == 1)
1091 ReplaceUses(SDValue(N, 0), SuperReg);
1092 else
1093 for (unsigned i = 0; i < NumVecs; ++i)
1094 ReplaceUses(SDValue(N, i),
1095 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1096
1097 // Update the chain
1098 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1099 return nullptr;
1100}
1101
1102SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1103 unsigned Opc) {
1104 SDLoc dl(N);
1105 EVT VT = N->getOperand(2)->getValueType(0);
1106
1107 // Form a REG_SEQUENCE to force register allocation.
1108 bool Is128Bit = VT.getSizeInBits() == 128;
1109 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1110 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1111
Benjamin Kramerea68a942015-02-19 15:26:17 +00001112 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001113 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1114
1115 return St;
1116}
1117
1118SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1119 unsigned Opc) {
1120 SDLoc dl(N);
1121 EVT VT = N->getOperand(2)->getValueType(0);
Benjamin Kramerea68a942015-02-19 15:26:17 +00001122 EVT ResTys[] = {MVT::i64, // Type of the write back register
1123 MVT::Other}; // Type for the Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001124
1125 // Form a REG_SEQUENCE to force register allocation.
1126 bool Is128Bit = VT.getSizeInBits() == 128;
1127 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1128 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1129
Benjamin Kramerea68a942015-02-19 15:26:17 +00001130 SDValue Ops[] = {RegSeq,
1131 N->getOperand(NumVecs + 1), // base register
1132 N->getOperand(NumVecs + 2), // Incremental
1133 N->getOperand(0)}; // Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001134 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1135
1136 return St;
1137}
1138
1139/// WidenVector - Given a value in the V64 register class, produce the
1140/// equivalent value in the V128 register class.
1141class WidenVector {
1142 SelectionDAG &DAG;
1143
1144public:
1145 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1146
1147 SDValue operator()(SDValue V64Reg) {
1148 EVT VT = V64Reg.getValueType();
1149 unsigned NarrowSize = VT.getVectorNumElements();
1150 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1151 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1152 SDLoc DL(V64Reg);
1153
1154 SDValue Undef =
1155 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1156 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1157 }
1158};
1159
1160/// NarrowVector - Given a value in the V128 register class, produce the
1161/// equivalent value in the V64 register class.
1162static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1163 EVT VT = V128Reg.getValueType();
1164 unsigned WideSize = VT.getVectorNumElements();
1165 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1166 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1167
1168 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1169 V128Reg);
1170}
1171
1172SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1173 unsigned Opc) {
1174 SDLoc dl(N);
1175 EVT VT = N->getValueType(0);
1176 bool Narrow = VT.getSizeInBits() == 64;
1177
1178 // Form a REG_SEQUENCE to force register allocation.
1179 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1180
1181 if (Narrow)
1182 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1183 WidenVector(*CurDAG));
1184
1185 SDValue RegSeq = createQTuple(Regs);
1186
Benjamin Kramerea68a942015-02-19 15:26:17 +00001187 EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001188
1189 unsigned LaneNo =
1190 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1191
Benjamin Kramerea68a942015-02-19 15:26:17 +00001192 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
1193 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001194 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1195 SDValue SuperReg = SDValue(Ld, 0);
1196
1197 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1198 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1199 AArch64::qsub3 };
1200 for (unsigned i = 0; i < NumVecs; ++i) {
1201 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1202 if (Narrow)
1203 NV = NarrowVector(NV, *CurDAG);
1204 ReplaceUses(SDValue(N, i), NV);
1205 }
1206
1207 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1208
1209 return Ld;
1210}
1211
1212SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1213 unsigned Opc) {
1214 SDLoc dl(N);
1215 EVT VT = N->getValueType(0);
1216 bool Narrow = VT.getSizeInBits() == 64;
1217
1218 // Form a REG_SEQUENCE to force register allocation.
1219 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1220
1221 if (Narrow)
1222 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1223 WidenVector(*CurDAG));
1224
1225 SDValue RegSeq = createQTuple(Regs);
1226
Benjamin Kramerea68a942015-02-19 15:26:17 +00001227 EVT ResTys[] = {MVT::i64, // Type of the write back register
1228 MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001229
1230 unsigned LaneNo =
1231 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1232
Benjamin Kramerea68a942015-02-19 15:26:17 +00001233 SDValue Ops[] = {RegSeq,
1234 CurDAG->getTargetConstant(LaneNo, MVT::i64), // Lane Number
1235 N->getOperand(NumVecs + 2), // Base register
1236 N->getOperand(NumVecs + 3), // Incremental
1237 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001238 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1239
1240 // Update uses of the write back register
1241 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1242
1243 // Update uses of the vector list
1244 SDValue SuperReg = SDValue(Ld, 1);
1245 if (NumVecs == 1) {
1246 ReplaceUses(SDValue(N, 0),
1247 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1248 } else {
1249 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1250 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1251 AArch64::qsub3 };
1252 for (unsigned i = 0; i < NumVecs; ++i) {
1253 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1254 SuperReg);
1255 if (Narrow)
1256 NV = NarrowVector(NV, *CurDAG);
1257 ReplaceUses(SDValue(N, i), NV);
1258 }
1259 }
1260
1261 // Update the Chain
1262 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1263
1264 return Ld;
1265}
1266
1267SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1268 unsigned Opc) {
1269 SDLoc dl(N);
1270 EVT VT = N->getOperand(2)->getValueType(0);
1271 bool Narrow = VT.getSizeInBits() == 64;
1272
1273 // Form a REG_SEQUENCE to force register allocation.
1274 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1275
1276 if (Narrow)
1277 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1278 WidenVector(*CurDAG));
1279
1280 SDValue RegSeq = createQTuple(Regs);
1281
1282 unsigned LaneNo =
1283 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1284
Benjamin Kramerea68a942015-02-19 15:26:17 +00001285 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
1286 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001287 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1288
1289 // Transfer memoperands.
1290 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1291 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1292 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1293
1294 return St;
1295}
1296
1297SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1298 unsigned Opc) {
1299 SDLoc dl(N);
1300 EVT VT = N->getOperand(2)->getValueType(0);
1301 bool Narrow = VT.getSizeInBits() == 64;
1302
1303 // Form a REG_SEQUENCE to force register allocation.
1304 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1305
1306 if (Narrow)
1307 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1308 WidenVector(*CurDAG));
1309
1310 SDValue RegSeq = createQTuple(Regs);
1311
Benjamin Kramerea68a942015-02-19 15:26:17 +00001312 EVT ResTys[] = {MVT::i64, // Type of the write back register
1313 MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001314
1315 unsigned LaneNo =
1316 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1317
Benjamin Kramerea68a942015-02-19 15:26:17 +00001318 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
1319 N->getOperand(NumVecs + 2), // Base Register
1320 N->getOperand(NumVecs + 3), // Incremental
1321 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001322 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1323
1324 // Transfer memoperands.
1325 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1326 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1327 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1328
1329 return St;
1330}
1331
1332static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1333 unsigned &Opc, SDValue &Opd0,
1334 unsigned &LSB, unsigned &MSB,
1335 unsigned NumberOfIgnoredLowBits,
1336 bool BiggerPattern) {
1337 assert(N->getOpcode() == ISD::AND &&
1338 "N must be a AND operation to call this function");
1339
1340 EVT VT = N->getValueType(0);
1341
1342 // Here we can test the type of VT and return false when the type does not
1343 // match, but since it is done prior to that call in the current context
1344 // we turned that into an assert to avoid redundant code.
1345 assert((VT == MVT::i32 || VT == MVT::i64) &&
1346 "Type checking must have been done before calling this function");
1347
1348 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1349 // changed the AND node to a 32-bit mask operation. We'll have to
1350 // undo that as part of the transform here if we want to catch all
1351 // the opportunities.
1352 // Currently the NumberOfIgnoredLowBits argument helps to recover
1353 // form these situations when matching bigger pattern (bitfield insert).
1354
1355 // For unsigned extracts, check for a shift right and mask
1356 uint64_t And_imm = 0;
1357 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1358 return false;
1359
1360 const SDNode *Op0 = N->getOperand(0).getNode();
1361
1362 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1363 // simplified. Try to undo that
1364 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1365
1366 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1367 if (And_imm & (And_imm + 1))
1368 return false;
1369
1370 bool ClampMSB = false;
1371 uint64_t Srl_imm = 0;
1372 // Handle the SRL + ANY_EXTEND case.
1373 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1374 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1375 // Extend the incoming operand of the SRL to 64-bit.
1376 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1377 // Make sure to clamp the MSB so that we preserve the semantics of the
1378 // original operations.
1379 ClampMSB = true;
1380 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1381 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1382 Srl_imm)) {
1383 // If the shift result was truncated, we can still combine them.
1384 Opd0 = Op0->getOperand(0).getOperand(0);
1385
1386 // Use the type of SRL node.
1387 VT = Opd0->getValueType(0);
1388 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1389 Opd0 = Op0->getOperand(0);
1390 } else if (BiggerPattern) {
1391 // Let's pretend a 0 shift right has been performed.
1392 // The resulting code will be at least as good as the original one
1393 // plus it may expose more opportunities for bitfield insert pattern.
1394 // FIXME: Currently we limit this to the bigger pattern, because
1395 // some optimizations expect AND and not UBFM
1396 Opd0 = N->getOperand(0);
1397 } else
1398 return false;
1399
1400 assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
1401 "bad amount in shift node!");
1402
1403 LSB = Srl_imm;
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001404 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1405 : countTrailingOnes<uint64_t>(And_imm)) -
Tim Northover3b0846e2014-05-24 12:50:23 +00001406 1;
1407 if (ClampMSB)
1408 // Since we're moving the extend before the right shift operation, we need
1409 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1410 // the zeros which would get shifted in with the original right shift
1411 // operation.
1412 MSB = MSB > 31 ? 31 : MSB;
1413
1414 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1415 return true;
1416}
1417
David Xu052b9d92014-09-02 09:33:56 +00001418static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1419 SDValue &Opd0, unsigned &LSB,
1420 unsigned &MSB) {
1421 // We are looking for the following pattern which basically extracts several
1422 // continuous bits from the source value and places it from the LSB of the
1423 // destination value, all other bits of the destination value or set to zero:
Tim Northover3b0846e2014-05-24 12:50:23 +00001424 //
1425 // Value2 = AND Value, MaskImm
1426 // SRL Value2, ShiftImm
1427 //
David Xu052b9d92014-09-02 09:33:56 +00001428 // with MaskImm >> ShiftImm to search for the bit width.
Tim Northover3b0846e2014-05-24 12:50:23 +00001429 //
1430 // This gets selected into a single UBFM:
1431 //
David Xu052b9d92014-09-02 09:33:56 +00001432 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
Tim Northover3b0846e2014-05-24 12:50:23 +00001433 //
1434
1435 if (N->getOpcode() != ISD::SRL)
1436 return false;
1437
1438 uint64_t And_mask = 0;
1439 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1440 return false;
1441
1442 Opd0 = N->getOperand(0).getOperand(0);
1443
1444 uint64_t Srl_imm = 0;
1445 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1446 return false;
1447
David Xu052b9d92014-09-02 09:33:56 +00001448 // Check whether we really have several bits extract here.
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001449 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
David Xu052b9d92014-09-02 09:33:56 +00001450 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001451 if (N->getValueType(0) == MVT::i32)
1452 Opc = AArch64::UBFMWri;
1453 else
1454 Opc = AArch64::UBFMXri;
1455
David Xu052b9d92014-09-02 09:33:56 +00001456 LSB = Srl_imm;
1457 MSB = BitWide + Srl_imm - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001458 return true;
1459 }
1460
1461 return false;
1462}
1463
1464static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1465 unsigned &LSB, unsigned &MSB,
1466 bool BiggerPattern) {
1467 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1468 "N must be a SHR/SRA operation to call this function");
1469
1470 EVT VT = N->getValueType(0);
1471
1472 // Here we can test the type of VT and return false when the type does not
1473 // match, but since it is done prior to that call in the current context
1474 // we turned that into an assert to avoid redundant code.
1475 assert((VT == MVT::i32 || VT == MVT::i64) &&
1476 "Type checking must have been done before calling this function");
1477
David Xu052b9d92014-09-02 09:33:56 +00001478 // Check for AND + SRL doing several bits extract.
1479 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
Tim Northover3b0846e2014-05-24 12:50:23 +00001480 return true;
1481
1482 // we're looking for a shift of a shift
1483 uint64_t Shl_imm = 0;
1484 uint64_t Trunc_bits = 0;
1485 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1486 Opd0 = N->getOperand(0).getOperand(0);
1487 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1488 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1489 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1490 // be considered as setting high 32 bits as zero. Our strategy here is to
1491 // always generate 64bit UBFM. This consistency will help the CSE pass
1492 // later find more redundancy.
1493 Opd0 = N->getOperand(0).getOperand(0);
1494 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1495 VT = Opd0->getValueType(0);
1496 assert(VT == MVT::i64 && "the promoted type should be i64");
1497 } else if (BiggerPattern) {
1498 // Let's pretend a 0 shift left has been performed.
1499 // FIXME: Currently we limit this to the bigger pattern case,
1500 // because some optimizations expect AND and not UBFM
1501 Opd0 = N->getOperand(0);
1502 } else
1503 return false;
1504
1505 assert(Shl_imm < VT.getSizeInBits() && "bad amount in shift node!");
1506 uint64_t Srl_imm = 0;
1507 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1508 return false;
1509
1510 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1511 "bad amount in shift node!");
1512 // Note: The width operand is encoded as width-1.
1513 unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
1514 int sLSB = Srl_imm - Shl_imm;
1515 if (sLSB < 0)
1516 return false;
1517 LSB = sLSB;
1518 MSB = LSB + Width;
1519 // SRA requires a signed extraction
1520 if (VT == MVT::i32)
1521 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1522 else
1523 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1524 return true;
1525}
1526
1527static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1528 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1529 unsigned NumberOfIgnoredLowBits = 0,
1530 bool BiggerPattern = false) {
1531 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1532 return false;
1533
1534 switch (N->getOpcode()) {
1535 default:
1536 if (!N->isMachineOpcode())
1537 return false;
1538 break;
1539 case ISD::AND:
1540 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1541 NumberOfIgnoredLowBits, BiggerPattern);
1542 case ISD::SRL:
1543 case ISD::SRA:
1544 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1545 }
1546
1547 unsigned NOpc = N->getMachineOpcode();
1548 switch (NOpc) {
1549 default:
1550 return false;
1551 case AArch64::SBFMWri:
1552 case AArch64::UBFMWri:
1553 case AArch64::SBFMXri:
1554 case AArch64::UBFMXri:
1555 Opc = NOpc;
1556 Opd0 = N->getOperand(0);
1557 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1558 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1559 return true;
1560 }
1561 // Unreachable
1562 return false;
1563}
1564
1565SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1566 unsigned Opc, LSB, MSB;
1567 SDValue Opd0;
1568 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1569 return nullptr;
1570
1571 EVT VT = N->getValueType(0);
1572
1573 // If the bit extract operation is 64bit but the original type is 32bit, we
1574 // need to add one EXTRACT_SUBREG.
1575 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
1576 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
1577 CurDAG->getTargetConstant(MSB, MVT::i64)};
1578
1579 SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
1580 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
1581 MachineSDNode *Node =
1582 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
1583 SDValue(BFM, 0), SubReg);
1584 return Node;
1585 }
1586
1587 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
1588 CurDAG->getTargetConstant(MSB, VT)};
1589 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1590}
1591
1592/// Does DstMask form a complementary pair with the mask provided by
1593/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1594/// this asks whether DstMask zeroes precisely those bits that will be set by
1595/// the other half.
1596static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1597 unsigned NumberOfIgnoredHighBits, EVT VT) {
1598 assert((VT == MVT::i32 || VT == MVT::i64) &&
1599 "i32 or i64 mask type expected!");
1600 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1601
1602 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1603 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1604
1605 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1606 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1607}
1608
1609// Look for bits that will be useful for later uses.
1610// A bit is consider useless as soon as it is dropped and never used
1611// before it as been dropped.
1612// E.g., looking for useful bit of x
1613// 1. y = x & 0x7
1614// 2. z = y >> 2
1615// After #1, x useful bits are 0x7, then the useful bits of x, live through
1616// y.
1617// After #2, the useful bits of x are 0x4.
1618// However, if x is used on an unpredicatable instruction, then all its bits
1619// are useful.
1620// E.g.
1621// 1. y = x & 0x7
1622// 2. z = y >> 2
1623// 3. str x, [@x]
1624static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1625
1626static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1627 unsigned Depth) {
1628 uint64_t Imm =
1629 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1630 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1631 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1632 getUsefulBits(Op, UsefulBits, Depth + 1);
1633}
1634
1635static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1636 uint64_t Imm, uint64_t MSB,
1637 unsigned Depth) {
1638 // inherit the bitwidth value
1639 APInt OpUsefulBits(UsefulBits);
1640 OpUsefulBits = 1;
1641
1642 if (MSB >= Imm) {
1643 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1644 --OpUsefulBits;
1645 // The interesting part will be in the lower part of the result
1646 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1647 // The interesting part was starting at Imm in the argument
1648 OpUsefulBits = OpUsefulBits.shl(Imm);
1649 } else {
1650 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1651 --OpUsefulBits;
1652 // The interesting part will be shifted in the result
1653 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1654 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1655 // The interesting part was at zero in the argument
1656 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1657 }
1658
1659 UsefulBits &= OpUsefulBits;
1660}
1661
1662static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1663 unsigned Depth) {
1664 uint64_t Imm =
1665 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1666 uint64_t MSB =
1667 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1668
1669 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1670}
1671
1672static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1673 unsigned Depth) {
1674 uint64_t ShiftTypeAndValue =
1675 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1676 APInt Mask(UsefulBits);
1677 Mask.clearAllBits();
1678 Mask.flipAllBits();
1679
1680 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1681 // Shift Left
1682 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1683 Mask = Mask.shl(ShiftAmt);
1684 getUsefulBits(Op, Mask, Depth + 1);
1685 Mask = Mask.lshr(ShiftAmt);
1686 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1687 // Shift Right
1688 // We do not handle AArch64_AM::ASR, because the sign will change the
1689 // number of useful bits
1690 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1691 Mask = Mask.lshr(ShiftAmt);
1692 getUsefulBits(Op, Mask, Depth + 1);
1693 Mask = Mask.shl(ShiftAmt);
1694 } else
1695 return;
1696
1697 UsefulBits &= Mask;
1698}
1699
1700static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1701 unsigned Depth) {
1702 uint64_t Imm =
1703 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1704 uint64_t MSB =
1705 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1706
1707 if (Op.getOperand(1) == Orig)
1708 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1709
1710 APInt OpUsefulBits(UsefulBits);
1711 OpUsefulBits = 1;
1712
1713 if (MSB >= Imm) {
1714 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1715 --OpUsefulBits;
1716 UsefulBits &= ~OpUsefulBits;
1717 getUsefulBits(Op, UsefulBits, Depth + 1);
1718 } else {
1719 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1720 --OpUsefulBits;
1721 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1722 getUsefulBits(Op, UsefulBits, Depth + 1);
1723 }
1724}
1725
1726static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1727 SDValue Orig, unsigned Depth) {
1728
1729 // Users of this node should have already been instruction selected
1730 // FIXME: Can we turn that into an assert?
1731 if (!UserNode->isMachineOpcode())
1732 return;
1733
1734 switch (UserNode->getMachineOpcode()) {
1735 default:
1736 return;
1737 case AArch64::ANDSWri:
1738 case AArch64::ANDSXri:
1739 case AArch64::ANDWri:
1740 case AArch64::ANDXri:
1741 // We increment Depth only when we call the getUsefulBits
1742 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1743 Depth);
1744 case AArch64::UBFMWri:
1745 case AArch64::UBFMXri:
1746 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1747
1748 case AArch64::ORRWrs:
1749 case AArch64::ORRXrs:
1750 if (UserNode->getOperand(1) != Orig)
1751 return;
1752 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1753 Depth);
1754 case AArch64::BFMWri:
1755 case AArch64::BFMXri:
1756 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1757 }
1758}
1759
1760static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1761 if (Depth >= 6)
1762 return;
1763 // Initialize UsefulBits
1764 if (!Depth) {
1765 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1766 // At the beginning, assume every produced bits is useful
1767 UsefulBits = APInt(Bitwidth, 0);
1768 UsefulBits.flipAllBits();
1769 }
1770 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1771
1772 for (SDNode *Node : Op.getNode()->uses()) {
1773 // A use cannot produce useful bits
1774 APInt UsefulBitsForUse = APInt(UsefulBits);
1775 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1776 UsersUsefulBits |= UsefulBitsForUse;
1777 }
1778 // UsefulBits contains the produced bits that are meaningful for the
1779 // current definition, thus a user cannot make a bit meaningful at
1780 // this point
1781 UsefulBits &= UsersUsefulBits;
1782}
1783
1784/// Create a machine node performing a notional SHL of Op by ShlAmount. If
1785/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1786/// 0, return Op unchanged.
1787static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1788 if (ShlAmount == 0)
1789 return Op;
1790
1791 EVT VT = Op.getValueType();
1792 unsigned BitWidth = VT.getSizeInBits();
1793 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1794
1795 SDNode *ShiftNode;
1796 if (ShlAmount > 0) {
1797 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1798 ShiftNode = CurDAG->getMachineNode(
1799 UBFMOpc, SDLoc(Op), VT, Op,
1800 CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
1801 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
1802 } else {
1803 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1804 assert(ShlAmount < 0 && "expected right shift");
1805 int ShrAmount = -ShlAmount;
1806 ShiftNode = CurDAG->getMachineNode(
1807 UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
1808 CurDAG->getTargetConstant(BitWidth - 1, VT));
1809 }
1810
1811 return SDValue(ShiftNode, 0);
1812}
1813
1814/// Does this tree qualify as an attempt to move a bitfield into position,
1815/// essentially "(and (shl VAL, N), Mask)".
1816static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1817 SDValue &Src, int &ShiftAmount,
1818 int &MaskWidth) {
1819 EVT VT = Op.getValueType();
1820 unsigned BitWidth = VT.getSizeInBits();
1821 (void)BitWidth;
1822 assert(BitWidth == 32 || BitWidth == 64);
1823
1824 APInt KnownZero, KnownOne;
1825 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1826
1827 // Non-zero in the sense that they're not provably zero, which is the key
1828 // point if we want to use this value
1829 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1830
1831 // Discard a constant AND mask if present. It's safe because the node will
1832 // already have been factored into the computeKnownBits calculation above.
1833 uint64_t AndImm;
1834 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1835 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1836 Op = Op.getOperand(0);
1837 }
1838
1839 uint64_t ShlImm;
1840 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1841 return false;
1842 Op = Op.getOperand(0);
1843
1844 if (!isShiftedMask_64(NonZeroBits))
1845 return false;
1846
1847 ShiftAmount = countTrailingZeros(NonZeroBits);
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001848 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
Tim Northover3b0846e2014-05-24 12:50:23 +00001849
1850 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1851 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1852 // amount.
1853 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1854
1855 return true;
1856}
1857
1858// Given a OR operation, check if we have the following pattern
1859// ubfm c, b, imm, imm2 (or something that does the same jobs, see
1860// isBitfieldExtractOp)
1861// d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1862// countTrailingZeros(mask2) == imm2 - imm + 1
1863// f = d | c
1864// if yes, given reference arguments will be update so that one can replace
1865// the OR instruction with:
1866// f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1867static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1868 SDValue &Src, unsigned &ImmR,
1869 unsigned &ImmS, SelectionDAG *CurDAG) {
1870 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1871
1872 // Set Opc
1873 EVT VT = N->getValueType(0);
1874 if (VT == MVT::i32)
1875 Opc = AArch64::BFMWri;
1876 else if (VT == MVT::i64)
1877 Opc = AArch64::BFMXri;
1878 else
1879 return false;
1880
1881 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1882 // have the expected shape. Try to undo that.
1883 APInt UsefulBits;
1884 getUsefulBits(SDValue(N, 0), UsefulBits);
1885
1886 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1887 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1888
1889 // OR is commutative, check both possibilities (does llvm provide a
1890 // way to do that directely, e.g., via code matcher?)
1891 SDValue OrOpd1Val = N->getOperand(1);
1892 SDNode *OrOpd0 = N->getOperand(0).getNode();
1893 SDNode *OrOpd1 = N->getOperand(1).getNode();
1894 for (int i = 0; i < 2;
1895 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1896 unsigned BFXOpc;
1897 int DstLSB, Width;
1898 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1899 NumberOfIgnoredLowBits, true)) {
1900 // Check that the returned opcode is compatible with the pattern,
1901 // i.e., same type and zero extended (U and not S)
1902 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
1903 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
1904 continue;
1905
1906 // Compute the width of the bitfield insertion
1907 DstLSB = 0;
1908 Width = ImmS - ImmR + 1;
1909 // FIXME: This constraint is to catch bitfield insertion we may
1910 // want to widen the pattern if we want to grab general bitfied
1911 // move case
1912 if (Width <= 0)
1913 continue;
1914
1915 // If the mask on the insertee is correct, we have a BFXIL operation. We
1916 // can share the ImmR and ImmS values from the already-computed UBFM.
1917 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1918 DstLSB, Width)) {
1919 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1920 ImmS = Width - 1;
1921 } else
1922 continue;
1923
1924 // Check the second part of the pattern
1925 EVT VT = OrOpd1->getValueType(0);
1926 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1927
1928 // Compute the Known Zero for the candidate of the first operand.
1929 // This allows to catch more general case than just looking for
1930 // AND with imm. Indeed, simplify-demanded-bits may have removed
1931 // the AND instruction because it proves it was useless.
1932 APInt KnownZero, KnownOne;
1933 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
1934
1935 // Check if there is enough room for the second operand to appear
1936 // in the first one
1937 APInt BitsToBeInserted =
1938 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1939
1940 if ((BitsToBeInserted & ~KnownZero) != 0)
1941 continue;
1942
1943 // Set the first operand
1944 uint64_t Imm;
1945 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1946 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1947 // In that case, we can eliminate the AND
1948 Dst = OrOpd1->getOperand(0);
1949 else
1950 // Maybe the AND has been removed by simplify-demanded-bits
1951 // or is useful because it discards more bits
1952 Dst = OrOpd1Val;
1953
1954 // both parts match
1955 return true;
1956 }
1957
1958 return false;
1959}
1960
1961SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
1962 if (N->getOpcode() != ISD::OR)
1963 return nullptr;
1964
1965 unsigned Opc;
1966 unsigned LSB, MSB;
1967 SDValue Opd0, Opd1;
1968
1969 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
1970 return nullptr;
1971
1972 EVT VT = N->getValueType(0);
1973 SDValue Ops[] = { Opd0,
1974 Opd1,
1975 CurDAG->getTargetConstant(LSB, VT),
1976 CurDAG->getTargetConstant(MSB, VT) };
1977 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1978}
1979
1980SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
1981 EVT VT = N->getValueType(0);
1982 unsigned Variant;
1983 unsigned Opc;
1984 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
1985
1986 if (VT == MVT::f32) {
1987 Variant = 0;
1988 } else if (VT == MVT::f64) {
1989 Variant = 1;
1990 } else
1991 return nullptr; // Unrecognized argument type. Fall back on default codegen.
1992
1993 // Pick the FRINTX variant needed to set the flags.
1994 unsigned FRINTXOpc = FRINTXOpcs[Variant];
1995
1996 switch (N->getOpcode()) {
1997 default:
1998 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
1999 case ISD::FCEIL: {
2000 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2001 Opc = FRINTPOpcs[Variant];
2002 break;
2003 }
2004 case ISD::FFLOOR: {
2005 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2006 Opc = FRINTMOpcs[Variant];
2007 break;
2008 }
2009 case ISD::FTRUNC: {
2010 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2011 Opc = FRINTZOpcs[Variant];
2012 break;
2013 }
2014 case ISD::FROUND: {
2015 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2016 Opc = FRINTAOpcs[Variant];
2017 break;
2018 }
2019 }
2020
2021 SDLoc dl(N);
2022 SDValue In = N->getOperand(0);
2023 SmallVector<SDValue, 2> Ops;
2024 Ops.push_back(In);
2025
2026 if (!TM.Options.UnsafeFPMath) {
2027 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
2028 Ops.push_back(SDValue(FRINTX, 1));
2029 }
2030
2031 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2032}
2033
2034bool
2035AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2036 unsigned RegWidth) {
2037 APFloat FVal(0.0);
2038 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2039 FVal = CN->getValueAPF();
2040 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2041 // Some otherwise illegal constants are allowed in this case.
2042 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2043 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2044 return false;
2045
2046 ConstantPoolSDNode *CN =
2047 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2048 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2049 } else
2050 return false;
2051
2052 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2053 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2054 // x-register.
2055 //
2056 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2057 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2058 // integers.
2059 bool IsExact;
2060
2061 // fbits is between 1 and 64 in the worst-case, which means the fmul
2062 // could have 2^64 as an actual operand. Need 65 bits of precision.
2063 APSInt IntVal(65, true);
2064 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2065
2066 // N.b. isPowerOf2 also checks for > 0.
2067 if (!IsExact || !IntVal.isPowerOf2()) return false;
2068 unsigned FBits = IntVal.logBase2();
2069
2070 // Checks above should have guaranteed that we haven't lost information in
2071 // finding FBits, but it must still be in range.
2072 if (FBits == 0 || FBits > RegWidth) return false;
2073
2074 FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
2075 return true;
2076}
2077
2078SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2079 // Dump information about the Node being selected
2080 DEBUG(errs() << "Selecting: ");
2081 DEBUG(Node->dump(CurDAG));
2082 DEBUG(errs() << "\n");
2083
2084 // If we have a custom node, we already have selected!
2085 if (Node->isMachineOpcode()) {
2086 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2087 Node->setNodeId(-1);
2088 return nullptr;
2089 }
2090
2091 // Few custom selection stuff.
2092 SDNode *ResNode = nullptr;
2093 EVT VT = Node->getValueType(0);
2094
2095 switch (Node->getOpcode()) {
2096 default:
2097 break;
2098
2099 case ISD::ADD:
2100 if (SDNode *I = SelectMLAV64LaneV128(Node))
2101 return I;
2102 break;
2103
2104 case ISD::LOAD: {
2105 // Try to select as an indexed load. Fall through to normal processing
2106 // if we can't.
2107 bool Done = false;
2108 SDNode *I = SelectIndexedLoad(Node, Done);
2109 if (Done)
2110 return I;
2111 break;
2112 }
2113
2114 case ISD::SRL:
2115 case ISD::AND:
2116 case ISD::SRA:
2117 if (SDNode *I = SelectBitfieldExtractOp(Node))
2118 return I;
2119 break;
2120
2121 case ISD::OR:
2122 if (SDNode *I = SelectBitfieldInsertOp(Node))
2123 return I;
2124 break;
2125
2126 case ISD::EXTRACT_VECTOR_ELT: {
2127 // Extracting lane zero is a special case where we can just use a plain
2128 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2129 // the rest of the compiler, especially the register allocator and copyi
2130 // propagation, to reason about, so is preferred when it's possible to
2131 // use it.
2132 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2133 // Bail and use the default Select() for non-zero lanes.
2134 if (LaneNode->getZExtValue() != 0)
2135 break;
2136 // If the element type is not the same as the result type, likewise
2137 // bail and use the default Select(), as there's more to do than just
2138 // a cross-class COPY. This catches extracts of i8 and i16 elements
2139 // since they will need an explicit zext.
2140 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2141 break;
2142 unsigned SubReg;
2143 switch (Node->getOperand(0)
2144 .getValueType()
2145 .getVectorElementType()
2146 .getSizeInBits()) {
2147 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002148 llvm_unreachable("Unexpected vector element type!");
Tim Northover3b0846e2014-05-24 12:50:23 +00002149 case 64:
2150 SubReg = AArch64::dsub;
2151 break;
2152 case 32:
2153 SubReg = AArch64::ssub;
2154 break;
Oliver Stannard89d15422014-08-27 16:16:04 +00002155 case 16:
2156 SubReg = AArch64::hsub;
2157 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00002158 case 8:
2159 llvm_unreachable("unexpected zext-requiring extract element!");
2160 }
2161 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2162 Node->getOperand(0));
2163 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2164 DEBUG(Extract->dumpr(CurDAG));
2165 DEBUG(dbgs() << "\n");
2166 return Extract.getNode();
2167 }
2168 case ISD::Constant: {
2169 // Materialize zero constants as copies from WZR/XZR. This allows
2170 // the coalescer to propagate these into other instructions.
2171 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2172 if (ConstNode->isNullValue()) {
2173 if (VT == MVT::i32)
2174 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2175 AArch64::WZR, MVT::i32).getNode();
2176 else if (VT == MVT::i64)
2177 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2178 AArch64::XZR, MVT::i64).getNode();
2179 }
2180 break;
2181 }
2182
2183 case ISD::FrameIndex: {
2184 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2185 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2186 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2187 const TargetLowering *TLI = getTargetLowering();
2188 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2189 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2190 CurDAG->getTargetConstant(Shifter, MVT::i32) };
2191 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2192 }
2193 case ISD::INTRINSIC_W_CHAIN: {
2194 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2195 switch (IntNo) {
2196 default:
2197 break;
2198 case Intrinsic::aarch64_ldaxp:
2199 case Intrinsic::aarch64_ldxp: {
2200 unsigned Op =
2201 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2202 SDValue MemAddr = Node->getOperand(2);
2203 SDLoc DL(Node);
2204 SDValue Chain = Node->getOperand(0);
2205
2206 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2207 MVT::Other, MemAddr, Chain);
2208
2209 // Transfer memoperands.
2210 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2211 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2212 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2213 return Ld;
2214 }
2215 case Intrinsic::aarch64_stlxp:
2216 case Intrinsic::aarch64_stxp: {
2217 unsigned Op =
2218 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2219 SDLoc DL(Node);
2220 SDValue Chain = Node->getOperand(0);
2221 SDValue ValLo = Node->getOperand(2);
2222 SDValue ValHi = Node->getOperand(3);
2223 SDValue MemAddr = Node->getOperand(4);
2224
2225 // Place arguments in the right order.
Benjamin Kramerea68a942015-02-19 15:26:17 +00002226 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00002227
2228 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2229 // Transfer memoperands.
2230 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2231 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2232 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2233
2234 return St;
2235 }
2236 case Intrinsic::aarch64_neon_ld1x2:
2237 if (VT == MVT::v8i8)
2238 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2239 else if (VT == MVT::v16i8)
2240 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002241 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002242 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002243 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002244 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2245 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2246 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2247 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2248 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2249 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2250 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2251 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2252 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2253 break;
2254 case Intrinsic::aarch64_neon_ld1x3:
2255 if (VT == MVT::v8i8)
2256 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2257 else if (VT == MVT::v16i8)
2258 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002259 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002260 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002261 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002262 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2263 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2264 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2265 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2266 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2267 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2268 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2269 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2270 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2271 break;
2272 case Intrinsic::aarch64_neon_ld1x4:
2273 if (VT == MVT::v8i8)
2274 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2275 else if (VT == MVT::v16i8)
2276 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002277 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002278 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002279 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002280 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2281 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2282 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2283 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2284 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2285 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2286 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2287 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2288 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2289 break;
2290 case Intrinsic::aarch64_neon_ld2:
2291 if (VT == MVT::v8i8)
2292 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2293 else if (VT == MVT::v16i8)
2294 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002295 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002296 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002297 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002298 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2299 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2300 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2301 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2302 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2303 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2304 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2305 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2306 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2307 break;
2308 case Intrinsic::aarch64_neon_ld3:
2309 if (VT == MVT::v8i8)
2310 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2311 else if (VT == MVT::v16i8)
2312 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002313 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002314 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002315 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002316 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2317 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2318 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2319 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2320 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2321 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2322 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2323 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2324 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2325 break;
2326 case Intrinsic::aarch64_neon_ld4:
2327 if (VT == MVT::v8i8)
2328 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2329 else if (VT == MVT::v16i8)
2330 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002331 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002332 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002333 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002334 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2335 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2336 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2337 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2338 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2339 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2340 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2341 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2342 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2343 break;
2344 case Intrinsic::aarch64_neon_ld2r:
2345 if (VT == MVT::v8i8)
2346 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2347 else if (VT == MVT::v16i8)
2348 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002349 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002350 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002351 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002352 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2353 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2354 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2355 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2356 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2357 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2358 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2359 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2360 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2361 break;
2362 case Intrinsic::aarch64_neon_ld3r:
2363 if (VT == MVT::v8i8)
2364 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2365 else if (VT == MVT::v16i8)
2366 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002367 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002368 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002369 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002370 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2371 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2372 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2373 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2374 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2375 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2376 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2377 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2378 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2379 break;
2380 case Intrinsic::aarch64_neon_ld4r:
2381 if (VT == MVT::v8i8)
2382 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2383 else if (VT == MVT::v16i8)
2384 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002385 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002386 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002387 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002388 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2389 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2390 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2391 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2392 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2393 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2394 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2395 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2396 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2397 break;
2398 case Intrinsic::aarch64_neon_ld2lane:
2399 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2400 return SelectLoadLane(Node, 2, AArch64::LD2i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002401 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2402 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002403 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2404 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2405 VT == MVT::v2f32)
2406 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2407 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2408 VT == MVT::v1f64)
2409 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2410 break;
2411 case Intrinsic::aarch64_neon_ld3lane:
2412 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2413 return SelectLoadLane(Node, 3, AArch64::LD3i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002414 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2415 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002416 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2417 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2418 VT == MVT::v2f32)
2419 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2420 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2421 VT == MVT::v1f64)
2422 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2423 break;
2424 case Intrinsic::aarch64_neon_ld4lane:
2425 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2426 return SelectLoadLane(Node, 4, AArch64::LD4i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002427 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2428 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002429 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2430 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2431 VT == MVT::v2f32)
2432 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2433 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2434 VT == MVT::v1f64)
2435 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2436 break;
2437 }
2438 } break;
2439 case ISD::INTRINSIC_WO_CHAIN: {
2440 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2441 switch (IntNo) {
2442 default:
2443 break;
2444 case Intrinsic::aarch64_neon_tbl2:
2445 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2446 : AArch64::TBLv16i8Two,
2447 false);
2448 case Intrinsic::aarch64_neon_tbl3:
2449 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2450 : AArch64::TBLv16i8Three,
2451 false);
2452 case Intrinsic::aarch64_neon_tbl4:
2453 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2454 : AArch64::TBLv16i8Four,
2455 false);
2456 case Intrinsic::aarch64_neon_tbx2:
2457 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2458 : AArch64::TBXv16i8Two,
2459 true);
2460 case Intrinsic::aarch64_neon_tbx3:
2461 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2462 : AArch64::TBXv16i8Three,
2463 true);
2464 case Intrinsic::aarch64_neon_tbx4:
2465 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2466 : AArch64::TBXv16i8Four,
2467 true);
2468 case Intrinsic::aarch64_neon_smull:
2469 case Intrinsic::aarch64_neon_umull:
2470 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2471 return N;
2472 break;
2473 }
2474 break;
2475 }
2476 case ISD::INTRINSIC_VOID: {
2477 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2478 if (Node->getNumOperands() >= 3)
2479 VT = Node->getOperand(2)->getValueType(0);
2480 switch (IntNo) {
2481 default:
2482 break;
2483 case Intrinsic::aarch64_neon_st1x2: {
2484 if (VT == MVT::v8i8)
2485 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2486 else if (VT == MVT::v16i8)
2487 return SelectStore(Node, 2, AArch64::ST1Twov16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002488 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002489 return SelectStore(Node, 2, AArch64::ST1Twov4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002490 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002491 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2492 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2493 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2494 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2495 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2496 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2497 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2498 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2499 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2500 break;
2501 }
2502 case Intrinsic::aarch64_neon_st1x3: {
2503 if (VT == MVT::v8i8)
2504 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2505 else if (VT == MVT::v16i8)
2506 return SelectStore(Node, 3, AArch64::ST1Threev16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002507 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002508 return SelectStore(Node, 3, AArch64::ST1Threev4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002509 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002510 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2511 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2512 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2513 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2514 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2515 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2516 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2517 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2518 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2519 break;
2520 }
2521 case Intrinsic::aarch64_neon_st1x4: {
2522 if (VT == MVT::v8i8)
2523 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2524 else if (VT == MVT::v16i8)
2525 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002526 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002527 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002528 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002529 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2530 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2531 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2532 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2533 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2534 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2535 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2536 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2537 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2538 break;
2539 }
2540 case Intrinsic::aarch64_neon_st2: {
2541 if (VT == MVT::v8i8)
2542 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2543 else if (VT == MVT::v16i8)
2544 return SelectStore(Node, 2, AArch64::ST2Twov16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002545 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002546 return SelectStore(Node, 2, AArch64::ST2Twov4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002547 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002548 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2549 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2550 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2551 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2552 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2553 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2554 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2555 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2556 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2557 break;
2558 }
2559 case Intrinsic::aarch64_neon_st3: {
2560 if (VT == MVT::v8i8)
2561 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2562 else if (VT == MVT::v16i8)
2563 return SelectStore(Node, 3, AArch64::ST3Threev16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002564 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002565 return SelectStore(Node, 3, AArch64::ST3Threev4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002566 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002567 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2568 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2569 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2570 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2571 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2572 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2573 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2574 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2575 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2576 break;
2577 }
2578 case Intrinsic::aarch64_neon_st4: {
2579 if (VT == MVT::v8i8)
2580 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2581 else if (VT == MVT::v16i8)
2582 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002583 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002584 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002585 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002586 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2587 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2588 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2589 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2590 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2591 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2592 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2593 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2594 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2595 break;
2596 }
2597 case Intrinsic::aarch64_neon_st2lane: {
2598 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2599 return SelectStoreLane(Node, 2, AArch64::ST2i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002600 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2601 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002602 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2603 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2604 VT == MVT::v2f32)
2605 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2606 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2607 VT == MVT::v1f64)
2608 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2609 break;
2610 }
2611 case Intrinsic::aarch64_neon_st3lane: {
2612 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2613 return SelectStoreLane(Node, 3, AArch64::ST3i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002614 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2615 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002616 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2617 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2618 VT == MVT::v2f32)
2619 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2620 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2621 VT == MVT::v1f64)
2622 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2623 break;
2624 }
2625 case Intrinsic::aarch64_neon_st4lane: {
2626 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2627 return SelectStoreLane(Node, 4, AArch64::ST4i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002628 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2629 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002630 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2631 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2632 VT == MVT::v2f32)
2633 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2634 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2635 VT == MVT::v1f64)
2636 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2637 break;
2638 }
2639 }
2640 }
2641 case AArch64ISD::LD2post: {
2642 if (VT == MVT::v8i8)
2643 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2644 else if (VT == MVT::v16i8)
2645 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002646 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002647 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002648 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002649 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2650 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2651 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2652 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2653 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2654 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2655 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2656 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2657 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2658 break;
2659 }
2660 case AArch64ISD::LD3post: {
2661 if (VT == MVT::v8i8)
2662 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2663 else if (VT == MVT::v16i8)
2664 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002665 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002666 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002667 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002668 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2669 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2670 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2671 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2672 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2673 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2674 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2675 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2676 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2677 break;
2678 }
2679 case AArch64ISD::LD4post: {
2680 if (VT == MVT::v8i8)
2681 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2682 else if (VT == MVT::v16i8)
2683 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002684 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002685 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002686 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002687 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
2688 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2689 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
2690 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2691 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
2692 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2693 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2694 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2695 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
2696 break;
2697 }
2698 case AArch64ISD::LD1x2post: {
2699 if (VT == MVT::v8i8)
2700 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
2701 else if (VT == MVT::v16i8)
2702 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002703 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002704 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002705 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002706 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
2707 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2708 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
2709 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2710 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
2711 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2712 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2713 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2714 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
2715 break;
2716 }
2717 case AArch64ISD::LD1x3post: {
2718 if (VT == MVT::v8i8)
2719 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
2720 else if (VT == MVT::v16i8)
2721 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002722 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002723 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002724 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002725 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
2726 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2727 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
2728 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2729 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
2730 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2731 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2732 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2733 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
2734 break;
2735 }
2736 case AArch64ISD::LD1x4post: {
2737 if (VT == MVT::v8i8)
2738 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
2739 else if (VT == MVT::v16i8)
2740 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002741 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002742 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002743 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002744 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
2745 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2746 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
2747 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2748 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
2749 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2750 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2751 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2752 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
2753 break;
2754 }
2755 case AArch64ISD::LD1DUPpost: {
2756 if (VT == MVT::v8i8)
2757 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
2758 else if (VT == MVT::v16i8)
2759 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002760 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002761 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002762 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002763 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
2764 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2765 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
2766 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2767 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
2768 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2769 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
2770 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2771 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
2772 break;
2773 }
2774 case AArch64ISD::LD2DUPpost: {
2775 if (VT == MVT::v8i8)
2776 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
2777 else if (VT == MVT::v16i8)
2778 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002779 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002780 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002781 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002782 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
2783 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2784 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
2785 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2786 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
2787 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2788 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
2789 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2790 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
2791 break;
2792 }
2793 case AArch64ISD::LD3DUPpost: {
2794 if (VT == MVT::v8i8)
2795 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
2796 else if (VT == MVT::v16i8)
2797 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002798 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002799 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002800 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002801 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
2802 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2803 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
2804 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2805 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
2806 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2807 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
2808 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2809 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
2810 break;
2811 }
2812 case AArch64ISD::LD4DUPpost: {
2813 if (VT == MVT::v8i8)
2814 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
2815 else if (VT == MVT::v16i8)
2816 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002817 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002818 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002819 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002820 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
2821 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2822 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
2823 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2824 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
2825 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2826 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
2827 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2828 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
2829 break;
2830 }
2831 case AArch64ISD::LD1LANEpost: {
2832 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2833 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002834 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2835 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002836 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
2837 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2838 VT == MVT::v2f32)
2839 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
2840 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2841 VT == MVT::v1f64)
2842 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
2843 break;
2844 }
2845 case AArch64ISD::LD2LANEpost: {
2846 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2847 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002848 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2849 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002850 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
2851 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2852 VT == MVT::v2f32)
2853 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
2854 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2855 VT == MVT::v1f64)
2856 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
2857 break;
2858 }
2859 case AArch64ISD::LD3LANEpost: {
2860 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2861 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002862 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2863 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002864 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
2865 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2866 VT == MVT::v2f32)
2867 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
2868 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2869 VT == MVT::v1f64)
2870 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
2871 break;
2872 }
2873 case AArch64ISD::LD4LANEpost: {
2874 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2875 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002876 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2877 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002878 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
2879 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2880 VT == MVT::v2f32)
2881 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
2882 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2883 VT == MVT::v1f64)
2884 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
2885 break;
2886 }
2887 case AArch64ISD::ST2post: {
2888 VT = Node->getOperand(1).getValueType();
2889 if (VT == MVT::v8i8)
2890 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
2891 else if (VT == MVT::v16i8)
2892 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002893 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002894 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002895 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002896 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
2897 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2898 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
2899 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2900 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
2901 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2902 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
2903 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2904 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
2905 break;
2906 }
2907 case AArch64ISD::ST3post: {
2908 VT = Node->getOperand(1).getValueType();
2909 if (VT == MVT::v8i8)
2910 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
2911 else if (VT == MVT::v16i8)
2912 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002913 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002914 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002915 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002916 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
2917 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2918 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
2919 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2920 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
2921 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2922 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
2923 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2924 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
2925 break;
2926 }
2927 case AArch64ISD::ST4post: {
2928 VT = Node->getOperand(1).getValueType();
2929 if (VT == MVT::v8i8)
2930 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
2931 else if (VT == MVT::v16i8)
2932 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002933 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002934 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002935 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002936 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
2937 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2938 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
2939 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2940 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
2941 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2942 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
2943 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2944 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
2945 break;
2946 }
2947 case AArch64ISD::ST1x2post: {
2948 VT = Node->getOperand(1).getValueType();
2949 if (VT == MVT::v8i8)
2950 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
2951 else if (VT == MVT::v16i8)
2952 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002953 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002954 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002955 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002956 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
2957 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2958 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
2959 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2960 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
2961 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2962 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
2963 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2964 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
2965 break;
2966 }
2967 case AArch64ISD::ST1x3post: {
2968 VT = Node->getOperand(1).getValueType();
2969 if (VT == MVT::v8i8)
2970 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
2971 else if (VT == MVT::v16i8)
2972 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002973 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002974 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002975 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002976 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
2977 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2978 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
2979 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2980 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
2981 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2982 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
2983 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2984 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
2985 break;
2986 }
2987 case AArch64ISD::ST1x4post: {
2988 VT = Node->getOperand(1).getValueType();
2989 if (VT == MVT::v8i8)
2990 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
2991 else if (VT == MVT::v16i8)
2992 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002993 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002994 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002995 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002996 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
2997 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2998 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
2999 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3000 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3001 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3002 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3003 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3004 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3005 break;
3006 }
3007 case AArch64ISD::ST2LANEpost: {
3008 VT = Node->getOperand(1).getValueType();
3009 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3010 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003011 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3012 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003013 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3014 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3015 VT == MVT::v2f32)
3016 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3017 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3018 VT == MVT::v1f64)
3019 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3020 break;
3021 }
3022 case AArch64ISD::ST3LANEpost: {
3023 VT = Node->getOperand(1).getValueType();
3024 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3025 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003026 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3027 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003028 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3029 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3030 VT == MVT::v2f32)
3031 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3032 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3033 VT == MVT::v1f64)
3034 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3035 break;
3036 }
3037 case AArch64ISD::ST4LANEpost: {
3038 VT = Node->getOperand(1).getValueType();
3039 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3040 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003041 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3042 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003043 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3044 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3045 VT == MVT::v2f32)
3046 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3047 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3048 VT == MVT::v1f64)
3049 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3050 break;
3051 }
3052
3053 case ISD::FCEIL:
3054 case ISD::FFLOOR:
3055 case ISD::FTRUNC:
3056 case ISD::FROUND:
3057 if (SDNode *I = SelectLIBM(Node))
3058 return I;
3059 break;
3060 }
3061
3062 // Select the default instruction
3063 ResNode = SelectCode(Node);
3064
3065 DEBUG(errs() << "=> ");
3066 if (ResNode == nullptr || ResNode == Node)
3067 DEBUG(Node->dump(CurDAG));
3068 else
3069 DEBUG(ResNode->dump(CurDAG));
3070 DEBUG(errs() << "\n");
3071
3072 return ResNode;
3073}
3074
3075/// createAArch64ISelDag - This pass converts a legalized DAG into a
3076/// AArch64-specific DAG, ready for instruction scheduling.
3077FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3078 CodeGenOpt::Level OptLevel) {
3079 return new AArch64DAGToDAGISel(TM, OptLevel);
3080}