blob: 44d20fcfd845a138156d7809d520e75bfd893583 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the AArch64 target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64TargetMachine.h"
15#include "MCTargetDesc/AArch64AddressingModes.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/CodeGen/SelectionDAGISel.h"
18#include "llvm/IR/Function.h" // To access function attributes.
19#include "llvm/IR/GlobalValue.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/ErrorHandling.h"
23#include "llvm/Support/MathExtras.h"
24#include "llvm/Support/raw_ostream.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE "aarch64-isel"
29
30//===--------------------------------------------------------------------===//
31/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32/// instructions for SelectionDAG operations.
33///
34namespace {
35
36class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
38
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
42
43 bool ForCodeSize;
44
45public:
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
49 ForCodeSize(false) {}
50
51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
53 }
54
55 bool runOnMachineFunction(MachineFunction &MF) override {
Tim Northover3b0846e2014-05-24 12:50:23 +000056 ForCodeSize =
Duncan P. N. Exon Smith003bb7d2015-02-14 02:09:06 +000057 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
58 MF.getFunction()->hasFnAttribute(Attribute::MinSize);
Eric Christopher1e513342015-01-30 23:46:40 +000059 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
Tim Northover3b0846e2014-05-24 12:50:23 +000060 return SelectionDAGISel::runOnMachineFunction(MF);
61 }
62
63 SDNode *Select(SDNode *Node) override;
64
65 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
66 /// inline asm expressions.
67 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
Daniel Sanders60f1db02015-03-13 12:45:09 +000068 unsigned ConstraintID,
Tim Northover3b0846e2014-05-24 12:50:23 +000069 std::vector<SDValue> &OutOps) override;
70
71 SDNode *SelectMLAV64LaneV128(SDNode *N);
72 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
73 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
74 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
75 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
76 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, false, Reg, Shift);
78 }
79 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
80 return SelectShiftedRegister(N, true, Reg, Shift);
81 }
82 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed(N, 1, Base, OffImm);
84 }
85 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed(N, 2, Base, OffImm);
87 }
88 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed(N, 4, Base, OffImm);
90 }
91 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed(N, 8, Base, OffImm);
93 }
94 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed(N, 16, Base, OffImm);
96 }
97 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
99 }
100 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
102 }
103 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
105 }
106 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
108 }
109 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
111 }
112
113 template<int Width>
114 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
115 SDValue &SignExtend, SDValue &DoShift) {
116 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
117 }
118
119 template<int Width>
120 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
121 SDValue &SignExtend, SDValue &DoShift) {
122 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
123 }
124
125
126 /// Form sequences of consecutive 64/128-bit registers for use in NEON
127 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
128 /// between 1 and 4 elements. If it contains a single element that is returned
129 /// unchanged; otherwise a REG_SEQUENCE value is returned.
130 SDValue createDTuple(ArrayRef<SDValue> Vecs);
131 SDValue createQTuple(ArrayRef<SDValue> Vecs);
132
133 /// Generic helper for the createDTuple/createQTuple
134 /// functions. Those should almost always be called instead.
Benjamin Kramerea68a942015-02-19 15:26:17 +0000135 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
136 const unsigned SubRegs[]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000137
138 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
139
140 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
141
142 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
143 unsigned SubRegIdx);
144 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
145 unsigned SubRegIdx);
146 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
147 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
148
149 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
150 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
151 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
152 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
153
Tim Northover3b0846e2014-05-24 12:50:23 +0000154 SDNode *SelectBitfieldExtractOp(SDNode *N);
155 SDNode *SelectBitfieldInsertOp(SDNode *N);
156
157 SDNode *SelectLIBM(SDNode *N);
158
159// Include the pieces autogenerated from the target description.
160#include "AArch64GenDAGISel.inc"
161
162private:
163 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
164 SDValue &Shift);
165 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
166 SDValue &OffImm);
167 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
168 SDValue &OffImm);
169 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
170 SDValue &Offset, SDValue &SignExtend,
171 SDValue &DoShift);
172 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
173 SDValue &Offset, SDValue &SignExtend,
174 SDValue &DoShift);
175 bool isWorthFolding(SDValue V) const;
176 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
177 SDValue &Offset, SDValue &SignExtend);
178
179 template<unsigned RegWidth>
180 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
181 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
182 }
183
184 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
185};
186} // end anonymous namespace
187
188/// isIntImmediate - This method tests to see if the node is a constant
189/// operand. If so Imm will receive the 32-bit value.
190static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
191 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
192 Imm = C->getZExtValue();
193 return true;
194 }
195 return false;
196}
197
198// isIntImmediate - This method tests to see if a constant operand.
199// If so Imm will receive the value.
200static bool isIntImmediate(SDValue N, uint64_t &Imm) {
201 return isIntImmediate(N.getNode(), Imm);
202}
203
204// isOpcWithIntImmediate - This method tests to see if the node is a specific
205// opcode and that it has a immediate integer right operand.
206// If so Imm will receive the 32 bit value.
207static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
208 uint64_t &Imm) {
209 return N->getOpcode() == Opc &&
210 isIntImmediate(N->getOperand(1).getNode(), Imm);
211}
212
213bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
Daniel Sanders60f1db02015-03-13 12:45:09 +0000214 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000215 switch(ConstraintID) {
216 default:
217 llvm_unreachable("Unexpected asm memory constraint");
218 case InlineAsm::Constraint_i:
219 case InlineAsm::Constraint_m:
220 case InlineAsm::Constraint_Q:
221 // Require the address to be in a register. That is safe for all AArch64
222 // variants and it is hard to do anything much smarter without knowing
223 // how the operand is used.
224 OutOps.push_back(Op);
225 return false;
226 }
227 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000228}
229
230/// SelectArithImmed - Select an immediate value that can be represented as
231/// a 12-bit value shifted left by either 0 or 12. If so, return true with
232/// Val set to the 12-bit value and Shift set to the shifter operand.
233bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
234 SDValue &Shift) {
235 // This function is called from the addsub_shifted_imm ComplexPattern,
236 // which lists [imm] as the list of opcode it's interested in, however
237 // we still need to check whether the operand is actually an immediate
238 // here because the ComplexPattern opcode list is only used in
239 // root-level opcode matching.
240 if (!isa<ConstantSDNode>(N.getNode()))
241 return false;
242
243 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
244 unsigned ShiftAmt;
245
246 if (Immed >> 12 == 0) {
247 ShiftAmt = 0;
248 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
249 ShiftAmt = 12;
250 Immed = Immed >> 12;
251 } else
252 return false;
253
254 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
255 Val = CurDAG->getTargetConstant(Immed, MVT::i32);
256 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
257 return true;
258}
259
260/// SelectNegArithImmed - As above, but negates the value before trying to
261/// select it.
262bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
263 SDValue &Shift) {
264 // This function is called from the addsub_shifted_imm ComplexPattern,
265 // which lists [imm] as the list of opcode it's interested in, however
266 // we still need to check whether the operand is actually an immediate
267 // here because the ComplexPattern opcode list is only used in
268 // root-level opcode matching.
269 if (!isa<ConstantSDNode>(N.getNode()))
270 return false;
271
272 // The immediate operand must be a 24-bit zero-extended immediate.
273 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
274
275 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
276 // have the opposite effect on the C flag, so this pattern mustn't match under
277 // those circumstances.
278 if (Immed == 0)
279 return false;
280
281 if (N.getValueType() == MVT::i32)
282 Immed = ~((uint32_t)Immed) + 1;
283 else
284 Immed = ~Immed + 1ULL;
285 if (Immed & 0xFFFFFFFFFF000000ULL)
286 return false;
287
288 Immed &= 0xFFFFFFULL;
289 return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
290}
291
292/// getShiftTypeForNode - Translate a shift node to the corresponding
293/// ShiftType value.
294static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
295 switch (N.getOpcode()) {
296 default:
297 return AArch64_AM::InvalidShiftExtend;
298 case ISD::SHL:
299 return AArch64_AM::LSL;
300 case ISD::SRL:
301 return AArch64_AM::LSR;
302 case ISD::SRA:
303 return AArch64_AM::ASR;
304 case ISD::ROTR:
305 return AArch64_AM::ROR;
306 }
307}
308
Eric Christopher25dbdeb2015-03-07 01:39:09 +0000309/// \brief Determine whether it is worth to fold V into an extended register.
Tim Northover3b0846e2014-05-24 12:50:23 +0000310bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
Robin Morisset039781e2014-08-29 21:53:01 +0000311 // it hurts if the value is used at least twice, unless we are optimizing
Tim Northover3b0846e2014-05-24 12:50:23 +0000312 // for code size.
313 if (ForCodeSize || V.hasOneUse())
314 return true;
315 return false;
316}
317
318/// SelectShiftedRegister - Select a "shifted register" operand. If the value
319/// is not shifted, set the Shift operand to default of "LSL 0". The logical
320/// instructions allow the shifted register to be rotated, but the arithmetic
321/// instructions do not. The AllowROR parameter specifies whether ROR is
322/// supported.
323bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
324 SDValue &Reg, SDValue &Shift) {
325 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
326 if (ShType == AArch64_AM::InvalidShiftExtend)
327 return false;
328 if (!AllowROR && ShType == AArch64_AM::ROR)
329 return false;
330
331 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
332 unsigned BitSize = N.getValueType().getSizeInBits();
333 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
334 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
335
336 Reg = N.getOperand(0);
337 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
338 return isWorthFolding(N);
339 }
340
341 return false;
342}
343
344/// getExtendTypeForNode - Translate an extend node to the corresponding
345/// ExtendType value.
346static AArch64_AM::ShiftExtendType
347getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
348 if (N.getOpcode() == ISD::SIGN_EXTEND ||
349 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
350 EVT SrcVT;
351 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
352 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
353 else
354 SrcVT = N.getOperand(0).getValueType();
355
356 if (!IsLoadStore && SrcVT == MVT::i8)
357 return AArch64_AM::SXTB;
358 else if (!IsLoadStore && SrcVT == MVT::i16)
359 return AArch64_AM::SXTH;
360 else if (SrcVT == MVT::i32)
361 return AArch64_AM::SXTW;
362 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
363
364 return AArch64_AM::InvalidShiftExtend;
365 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
366 N.getOpcode() == ISD::ANY_EXTEND) {
367 EVT SrcVT = N.getOperand(0).getValueType();
368 if (!IsLoadStore && SrcVT == MVT::i8)
369 return AArch64_AM::UXTB;
370 else if (!IsLoadStore && SrcVT == MVT::i16)
371 return AArch64_AM::UXTH;
372 else if (SrcVT == MVT::i32)
373 return AArch64_AM::UXTW;
374 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
375
376 return AArch64_AM::InvalidShiftExtend;
377 } else if (N.getOpcode() == ISD::AND) {
378 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
379 if (!CSD)
380 return AArch64_AM::InvalidShiftExtend;
381 uint64_t AndMask = CSD->getZExtValue();
382
383 switch (AndMask) {
384 default:
385 return AArch64_AM::InvalidShiftExtend;
386 case 0xFF:
387 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
388 case 0xFFFF:
389 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
390 case 0xFFFFFFFF:
391 return AArch64_AM::UXTW;
392 }
393 }
394
395 return AArch64_AM::InvalidShiftExtend;
396}
397
398// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
399static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
400 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
401 DL->getOpcode() != AArch64ISD::DUPLANE32)
402 return false;
403
404 SDValue SV = DL->getOperand(0);
405 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
406 return false;
407
408 SDValue EV = SV.getOperand(1);
409 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
410 return false;
411
412 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
413 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
414 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
415 LaneOp = EV.getOperand(0);
416
417 return true;
418}
419
420// Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
421// high lane extract.
422static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
423 SDValue &LaneOp, int &LaneIdx) {
424
425 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
426 std::swap(Op0, Op1);
427 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
428 return false;
429 }
430 StdOp = Op1;
431 return true;
432}
433
434/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
435/// is a lane in the upper half of a 128-bit vector. Recognize and select this
436/// so that we don't emit unnecessary lane extracts.
437SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
438 SDValue Op0 = N->getOperand(0);
439 SDValue Op1 = N->getOperand(1);
440 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
441 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
442 int LaneIdx = -1; // Will hold the lane index.
443
444 if (Op1.getOpcode() != ISD::MUL ||
445 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
446 LaneIdx)) {
447 std::swap(Op0, Op1);
448 if (Op1.getOpcode() != ISD::MUL ||
449 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
450 LaneIdx))
451 return nullptr;
452 }
453
454 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
455
456 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
457
458 unsigned MLAOpc = ~0U;
459
460 switch (N->getSimpleValueType(0).SimpleTy) {
461 default:
462 llvm_unreachable("Unrecognized MLA.");
463 case MVT::v4i16:
464 MLAOpc = AArch64::MLAv4i16_indexed;
465 break;
466 case MVT::v8i16:
467 MLAOpc = AArch64::MLAv8i16_indexed;
468 break;
469 case MVT::v2i32:
470 MLAOpc = AArch64::MLAv2i32_indexed;
471 break;
472 case MVT::v4i32:
473 MLAOpc = AArch64::MLAv4i32_indexed;
474 break;
475 }
476
477 return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
478}
479
480SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
481 SDValue SMULLOp0;
482 SDValue SMULLOp1;
483 int LaneIdx;
484
485 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
486 LaneIdx))
487 return nullptr;
488
489 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
490
491 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
492
493 unsigned SMULLOpc = ~0U;
494
495 if (IntNo == Intrinsic::aarch64_neon_smull) {
496 switch (N->getSimpleValueType(0).SimpleTy) {
497 default:
498 llvm_unreachable("Unrecognized SMULL.");
499 case MVT::v4i32:
500 SMULLOpc = AArch64::SMULLv4i16_indexed;
501 break;
502 case MVT::v2i64:
503 SMULLOpc = AArch64::SMULLv2i32_indexed;
504 break;
505 }
506 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
507 switch (N->getSimpleValueType(0).SimpleTy) {
508 default:
509 llvm_unreachable("Unrecognized SMULL.");
510 case MVT::v4i32:
511 SMULLOpc = AArch64::UMULLv4i16_indexed;
512 break;
513 case MVT::v2i64:
514 SMULLOpc = AArch64::UMULLv2i32_indexed;
515 break;
516 }
517 } else
518 llvm_unreachable("Unrecognized intrinsic.");
519
520 return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
521}
522
523/// Instructions that accept extend modifiers like UXTW expect the register
524/// being extended to be a GPR32, but the incoming DAG might be acting on a
525/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
526/// this is the case.
527static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
528 if (N.getValueType() == MVT::i32)
529 return N;
530
531 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
532 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
533 SDLoc(N), MVT::i32, N, SubReg);
534 return SDValue(Node, 0);
535}
536
537
538/// SelectArithExtendedRegister - Select a "extended register" operand. This
539/// operand folds in an extend followed by an optional left shift.
540bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
541 SDValue &Shift) {
542 unsigned ShiftVal = 0;
543 AArch64_AM::ShiftExtendType Ext;
544
545 if (N.getOpcode() == ISD::SHL) {
546 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
547 if (!CSD)
548 return false;
549 ShiftVal = CSD->getZExtValue();
550 if (ShiftVal > 4)
551 return false;
552
553 Ext = getExtendTypeForNode(N.getOperand(0));
554 if (Ext == AArch64_AM::InvalidShiftExtend)
555 return false;
556
557 Reg = N.getOperand(0).getOperand(0);
558 } else {
559 Ext = getExtendTypeForNode(N);
560 if (Ext == AArch64_AM::InvalidShiftExtend)
561 return false;
562
563 Reg = N.getOperand(0);
564 }
565
566 // AArch64 mandates that the RHS of the operation must use the smallest
567 // register classs that could contain the size being extended from. Thus,
568 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
569 // there might not be an actual 32-bit value in the program. We can
570 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
571 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
572 Reg = narrowIfNeeded(CurDAG, Reg);
573 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
574 return isWorthFolding(N);
575}
576
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000577/// If there's a use of this ADDlow that's not itself a load/store then we'll
578/// need to create a real ADD instruction from it anyway and there's no point in
579/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
580/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
581/// leads to duplaicated ADRP instructions.
582static bool isWorthFoldingADDlow(SDValue N) {
583 for (auto Use : N->uses()) {
584 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
585 Use->getOpcode() != ISD::ATOMIC_LOAD &&
586 Use->getOpcode() != ISD::ATOMIC_STORE)
587 return false;
588
589 // ldar and stlr have much more restrictive addressing modes (just a
590 // register).
591 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
592 return false;
593 }
594
595 return true;
596}
597
Tim Northover3b0846e2014-05-24 12:50:23 +0000598/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
599/// immediate" address. The "Size" argument is the size in bytes of the memory
600/// reference, which determines the scale.
601bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
602 SDValue &Base, SDValue &OffImm) {
603 const TargetLowering *TLI = getTargetLowering();
604 if (N.getOpcode() == ISD::FrameIndex) {
605 int FI = cast<FrameIndexSDNode>(N)->getIndex();
606 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
607 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
608 return true;
609 }
610
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000611 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000612 GlobalAddressSDNode *GAN =
613 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
614 Base = N.getOperand(0);
615 OffImm = N.getOperand(1);
616 if (!GAN)
617 return true;
618
619 const GlobalValue *GV = GAN->getGlobal();
620 unsigned Alignment = GV->getAlignment();
621 const DataLayout *DL = TLI->getDataLayout();
Chad Rosier304fe3f2014-06-30 15:03:00 +0000622 Type *Ty = GV->getType()->getElementType();
Tim Northover4a8ac262014-12-02 23:53:43 +0000623 if (Alignment == 0 && Ty->isSized())
Chad Rosier304fe3f2014-06-30 15:03:00 +0000624 Alignment = DL->getABITypeAlignment(Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000625
626 if (Alignment >= Size)
627 return true;
628 }
629
630 if (CurDAG->isBaseWithConstantOffset(N)) {
631 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
632 int64_t RHSC = (int64_t)RHS->getZExtValue();
633 unsigned Scale = Log2_32(Size);
634 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
635 Base = N.getOperand(0);
636 if (Base.getOpcode() == ISD::FrameIndex) {
637 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
638 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
639 }
640 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
641 return true;
642 }
643 }
644 }
645
646 // Before falling back to our general case, check if the unscaled
647 // instructions can handle this. If so, that's preferable.
648 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
649 return false;
650
651 // Base only. The address will be materialized into a register before
652 // the memory is accessed.
653 // add x0, Xbase, #offset
654 // ldr x0, [x0]
655 Base = N;
656 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
657 return true;
658}
659
660/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
661/// immediate" address. This should only match when there is an offset that
662/// is not valid for a scaled immediate addressing mode. The "Size" argument
663/// is the size in bytes of the memory reference, which is needed here to know
664/// what is valid for a scaled immediate.
665bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
666 SDValue &Base,
667 SDValue &OffImm) {
668 if (!CurDAG->isBaseWithConstantOffset(N))
669 return false;
670 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
671 int64_t RHSC = RHS->getSExtValue();
672 // If the offset is valid as a scaled immediate, don't match here.
673 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
674 RHSC < (0x1000 << Log2_32(Size)))
675 return false;
676 if (RHSC >= -256 && RHSC < 256) {
677 Base = N.getOperand(0);
678 if (Base.getOpcode() == ISD::FrameIndex) {
679 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
680 const TargetLowering *TLI = getTargetLowering();
681 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
682 }
683 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
684 return true;
685 }
686 }
687 return false;
688}
689
690static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
691 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
692 SDValue ImpDef = SDValue(
693 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
694 0);
695 MachineSDNode *Node = CurDAG->getMachineNode(
696 TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
697 return SDValue(Node, 0);
698}
699
700/// \brief Check if the given SHL node (\p N), can be used to form an
701/// extended register for an addressing mode.
702bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
703 bool WantExtend, SDValue &Offset,
704 SDValue &SignExtend) {
705 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
706 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
707 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
708 return false;
709
710 if (WantExtend) {
711 AArch64_AM::ShiftExtendType Ext =
712 getExtendTypeForNode(N.getOperand(0), true);
713 if (Ext == AArch64_AM::InvalidShiftExtend)
714 return false;
715
716 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
717 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
718 } else {
719 Offset = N.getOperand(0);
720 SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
721 }
722
723 unsigned LegalShiftVal = Log2_32(Size);
724 unsigned ShiftVal = CSD->getZExtValue();
725
726 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
727 return false;
728
729 if (isWorthFolding(N))
730 return true;
731
732 return false;
733}
734
735bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
736 SDValue &Base, SDValue &Offset,
737 SDValue &SignExtend,
738 SDValue &DoShift) {
739 if (N.getOpcode() != ISD::ADD)
740 return false;
741 SDValue LHS = N.getOperand(0);
742 SDValue RHS = N.getOperand(1);
743
744 // We don't want to match immediate adds here, because they are better lowered
745 // to the register-immediate addressing modes.
746 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
747 return false;
748
749 // Check if this particular node is reused in any non-memory related
750 // operation. If yes, do not try to fold this node into the address
751 // computation, since the computation will be kept.
752 const SDNode *Node = N.getNode();
753 for (SDNode *UI : Node->uses()) {
754 if (!isa<MemSDNode>(*UI))
755 return false;
756 }
757
758 // Remember if it is worth folding N when it produces extended register.
759 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
760
761 // Try to match a shifted extend on the RHS.
762 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
763 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
764 Base = LHS;
765 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
766 return true;
767 }
768
769 // Try to match a shifted extend on the LHS.
770 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
771 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
772 Base = RHS;
773 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
774 return true;
775 }
776
777 // There was no shift, whatever else we find.
778 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
779
780 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
781 // Try to match an unshifted extend on the LHS.
782 if (IsExtendedRegisterWorthFolding &&
783 (Ext = getExtendTypeForNode(LHS, true)) !=
784 AArch64_AM::InvalidShiftExtend) {
785 Base = RHS;
786 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
787 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
788 if (isWorthFolding(LHS))
789 return true;
790 }
791
792 // Try to match an unshifted extend on the RHS.
793 if (IsExtendedRegisterWorthFolding &&
794 (Ext = getExtendTypeForNode(RHS, true)) !=
795 AArch64_AM::InvalidShiftExtend) {
796 Base = LHS;
797 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
798 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
799 if (isWorthFolding(RHS))
800 return true;
801 }
802
803 return false;
804}
805
Hao Liu3cb826c2014-10-14 06:50:36 +0000806// Check if the given immediate is preferred by ADD. If an immediate can be
807// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
808// encoded by one MOVZ, return true.
809static bool isPreferredADD(int64_t ImmOff) {
810 // Constant in [0x0, 0xfff] can be encoded in ADD.
811 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
812 return true;
813 // Check if it can be encoded in an "ADD LSL #12".
814 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
815 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
816 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
817 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
818 return false;
819}
820
Tim Northover3b0846e2014-05-24 12:50:23 +0000821bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
822 SDValue &Base, SDValue &Offset,
823 SDValue &SignExtend,
824 SDValue &DoShift) {
825 if (N.getOpcode() != ISD::ADD)
826 return false;
827 SDValue LHS = N.getOperand(0);
828 SDValue RHS = N.getOperand(1);
829
Tim Northover3b0846e2014-05-24 12:50:23 +0000830 // Check if this particular node is reused in any non-memory related
831 // operation. If yes, do not try to fold this node into the address
832 // computation, since the computation will be kept.
833 const SDNode *Node = N.getNode();
834 for (SDNode *UI : Node->uses()) {
835 if (!isa<MemSDNode>(*UI))
836 return false;
837 }
838
Hao Liu3cb826c2014-10-14 06:50:36 +0000839 // Watch out if RHS is a wide immediate, it can not be selected into
840 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
841 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
842 // instructions like:
843 // MOV X0, WideImmediate
844 // ADD X1, BaseReg, X0
845 // LDR X2, [X1, 0]
846 // For such situation, using [BaseReg, XReg] addressing mode can save one
847 // ADD/SUB:
848 // MOV X0, WideImmediate
849 // LDR X2, [BaseReg, X0]
850 if (isa<ConstantSDNode>(RHS)) {
851 int64_t ImmOff = (int64_t)dyn_cast<ConstantSDNode>(RHS)->getZExtValue();
852 unsigned Scale = Log2_32(Size);
853 // Skip the immediate can be seleced by load/store addressing mode.
854 // Also skip the immediate can be encoded by a single ADD (SUB is also
855 // checked by using -ImmOff).
856 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
857 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
858 return false;
859
860 SDLoc DL(N.getNode());
861 SDValue Ops[] = { RHS };
862 SDNode *MOVI =
863 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
864 SDValue MOVIV = SDValue(MOVI, 0);
865 // This ADD of two X register will be selected into [Reg+Reg] mode.
866 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
867 }
868
Tim Northover3b0846e2014-05-24 12:50:23 +0000869 // Remember if it is worth folding N when it produces extended register.
870 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
871
872 // Try to match a shifted extend on the RHS.
873 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
874 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
875 Base = LHS;
876 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
877 return true;
878 }
879
880 // Try to match a shifted extend on the LHS.
881 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
882 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
883 Base = RHS;
884 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
885 return true;
886 }
887
888 // Match any non-shifted, non-extend, non-immediate add expression.
889 Base = LHS;
890 Offset = RHS;
891 SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
892 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
893 // Reg1 + Reg2 is free: no check needed.
894 return true;
895}
896
897SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +0000898 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +0000899 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +0000900 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
901 AArch64::dsub2, AArch64::dsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +0000902
903 return createTuple(Regs, RegClassIDs, SubRegs);
904}
905
906SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +0000907 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +0000908 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +0000909 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
910 AArch64::qsub2, AArch64::qsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +0000911
912 return createTuple(Regs, RegClassIDs, SubRegs);
913}
914
915SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
Benjamin Kramerea68a942015-02-19 15:26:17 +0000916 const unsigned RegClassIDs[],
917 const unsigned SubRegs[]) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000918 // There's no special register-class for a vector-list of 1 element: it's just
919 // a vector.
920 if (Regs.size() == 1)
921 return Regs[0];
922
923 assert(Regs.size() >= 2 && Regs.size() <= 4);
924
925 SDLoc DL(Regs[0].getNode());
926
927 SmallVector<SDValue, 4> Ops;
928
929 // First operand of REG_SEQUENCE is the desired RegClass.
930 Ops.push_back(
931 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
932
933 // Then we get pairs of source & subregister-position for the components.
934 for (unsigned i = 0; i < Regs.size(); ++i) {
935 Ops.push_back(Regs[i]);
936 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
937 }
938
939 SDNode *N =
940 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
941 return SDValue(N, 0);
942}
943
944SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
945 unsigned Opc, bool isExt) {
946 SDLoc dl(N);
947 EVT VT = N->getValueType(0);
948
949 unsigned ExtOff = isExt;
950
951 // Form a REG_SEQUENCE to force register allocation.
952 unsigned Vec0Off = ExtOff + 1;
953 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
954 N->op_begin() + Vec0Off + NumVecs);
955 SDValue RegSeq = createQTuple(Regs);
956
957 SmallVector<SDValue, 6> Ops;
958 if (isExt)
959 Ops.push_back(N->getOperand(1));
960 Ops.push_back(RegSeq);
961 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
962 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
963}
964
965SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
966 LoadSDNode *LD = cast<LoadSDNode>(N);
967 if (LD->isUnindexed())
968 return nullptr;
969 EVT VT = LD->getMemoryVT();
970 EVT DstVT = N->getValueType(0);
971 ISD::MemIndexedMode AM = LD->getAddressingMode();
972 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
973
974 // We're not doing validity checking here. That was done when checking
975 // if we should mark the load as indexed or not. We're just selecting
976 // the right instruction.
977 unsigned Opcode = 0;
978
979 ISD::LoadExtType ExtType = LD->getExtensionType();
980 bool InsertTo64 = false;
981 if (VT == MVT::i64)
982 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
983 else if (VT == MVT::i32) {
984 if (ExtType == ISD::NON_EXTLOAD)
985 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
986 else if (ExtType == ISD::SEXTLOAD)
987 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
988 else {
989 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
990 InsertTo64 = true;
991 // The result of the load is only i32. It's the subreg_to_reg that makes
992 // it into an i64.
993 DstVT = MVT::i32;
994 }
995 } else if (VT == MVT::i16) {
996 if (ExtType == ISD::SEXTLOAD) {
997 if (DstVT == MVT::i64)
998 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
999 else
1000 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1001 } else {
1002 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1003 InsertTo64 = DstVT == MVT::i64;
1004 // The result of the load is only i32. It's the subreg_to_reg that makes
1005 // it into an i64.
1006 DstVT = MVT::i32;
1007 }
1008 } else if (VT == MVT::i8) {
1009 if (ExtType == ISD::SEXTLOAD) {
1010 if (DstVT == MVT::i64)
1011 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1012 else
1013 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1014 } else {
1015 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1016 InsertTo64 = DstVT == MVT::i64;
1017 // The result of the load is only i32. It's the subreg_to_reg that makes
1018 // it into an i64.
1019 DstVT = MVT::i32;
1020 }
1021 } else if (VT == MVT::f32) {
1022 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1023 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1024 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1025 } else if (VT.is128BitVector()) {
1026 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1027 } else
1028 return nullptr;
1029 SDValue Chain = LD->getChain();
1030 SDValue Base = LD->getBasePtr();
1031 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1032 int OffsetVal = (int)OffsetOp->getZExtValue();
1033 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
1034 SDValue Ops[] = { Base, Offset, Chain };
1035 SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, DstVT,
1036 MVT::Other, Ops);
1037 // Either way, we're replacing the node, so tell the caller that.
1038 Done = true;
1039 SDValue LoadedVal = SDValue(Res, 1);
1040 if (InsertTo64) {
1041 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
1042 LoadedVal =
1043 SDValue(CurDAG->getMachineNode(
1044 AArch64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
1045 CurDAG->getTargetConstant(0, MVT::i64), LoadedVal, SubReg),
1046 0);
1047 }
1048
1049 ReplaceUses(SDValue(N, 0), LoadedVal);
1050 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1051 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1052
1053 return nullptr;
1054}
1055
1056SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1057 unsigned Opc, unsigned SubRegIdx) {
1058 SDLoc dl(N);
1059 EVT VT = N->getValueType(0);
1060 SDValue Chain = N->getOperand(0);
1061
Benjamin Kramerea68a942015-02-19 15:26:17 +00001062 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1063 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001064
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001065 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001066
1067 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1068 SDValue SuperReg = SDValue(Ld, 0);
1069 for (unsigned i = 0; i < NumVecs; ++i)
1070 ReplaceUses(SDValue(N, i),
1071 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1072
1073 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1074 return nullptr;
1075}
1076
1077SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1078 unsigned Opc, unsigned SubRegIdx) {
1079 SDLoc dl(N);
1080 EVT VT = N->getValueType(0);
1081 SDValue Chain = N->getOperand(0);
1082
Benjamin Kramerea68a942015-02-19 15:26:17 +00001083 SDValue Ops[] = {N->getOperand(1), // Mem operand
1084 N->getOperand(2), // Incremental
1085 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001086
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001087 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1088 MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001089
1090 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1091
1092 // Update uses of write back register
1093 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1094
1095 // Update uses of vector list
1096 SDValue SuperReg = SDValue(Ld, 1);
1097 if (NumVecs == 1)
1098 ReplaceUses(SDValue(N, 0), SuperReg);
1099 else
1100 for (unsigned i = 0; i < NumVecs; ++i)
1101 ReplaceUses(SDValue(N, i),
1102 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1103
1104 // Update the chain
1105 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1106 return nullptr;
1107}
1108
1109SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1110 unsigned Opc) {
1111 SDLoc dl(N);
1112 EVT VT = N->getOperand(2)->getValueType(0);
1113
1114 // Form a REG_SEQUENCE to force register allocation.
1115 bool Is128Bit = VT.getSizeInBits() == 128;
1116 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1117 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1118
Benjamin Kramerea68a942015-02-19 15:26:17 +00001119 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001120 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1121
1122 return St;
1123}
1124
1125SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1126 unsigned Opc) {
1127 SDLoc dl(N);
1128 EVT VT = N->getOperand(2)->getValueType(0);
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001129 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1130 MVT::Other}; // Type for the Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001131
1132 // Form a REG_SEQUENCE to force register allocation.
1133 bool Is128Bit = VT.getSizeInBits() == 128;
1134 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1135 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1136
Benjamin Kramerea68a942015-02-19 15:26:17 +00001137 SDValue Ops[] = {RegSeq,
1138 N->getOperand(NumVecs + 1), // base register
1139 N->getOperand(NumVecs + 2), // Incremental
1140 N->getOperand(0)}; // Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001141 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1142
1143 return St;
1144}
1145
1146/// WidenVector - Given a value in the V64 register class, produce the
1147/// equivalent value in the V128 register class.
1148class WidenVector {
1149 SelectionDAG &DAG;
1150
1151public:
1152 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1153
1154 SDValue operator()(SDValue V64Reg) {
1155 EVT VT = V64Reg.getValueType();
1156 unsigned NarrowSize = VT.getVectorNumElements();
1157 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1158 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1159 SDLoc DL(V64Reg);
1160
1161 SDValue Undef =
1162 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1163 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1164 }
1165};
1166
1167/// NarrowVector - Given a value in the V128 register class, produce the
1168/// equivalent value in the V64 register class.
1169static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1170 EVT VT = V128Reg.getValueType();
1171 unsigned WideSize = VT.getVectorNumElements();
1172 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1173 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1174
1175 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1176 V128Reg);
1177}
1178
1179SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1180 unsigned Opc) {
1181 SDLoc dl(N);
1182 EVT VT = N->getValueType(0);
1183 bool Narrow = VT.getSizeInBits() == 64;
1184
1185 // Form a REG_SEQUENCE to force register allocation.
1186 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1187
1188 if (Narrow)
1189 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1190 WidenVector(*CurDAG));
1191
1192 SDValue RegSeq = createQTuple(Regs);
1193
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001194 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001195
1196 unsigned LaneNo =
1197 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1198
Benjamin Kramerea68a942015-02-19 15:26:17 +00001199 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
1200 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001201 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1202 SDValue SuperReg = SDValue(Ld, 0);
1203
1204 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1205 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1206 AArch64::qsub3 };
1207 for (unsigned i = 0; i < NumVecs; ++i) {
1208 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1209 if (Narrow)
1210 NV = NarrowVector(NV, *CurDAG);
1211 ReplaceUses(SDValue(N, i), NV);
1212 }
1213
1214 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1215
1216 return Ld;
1217}
1218
1219SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1220 unsigned Opc) {
1221 SDLoc dl(N);
1222 EVT VT = N->getValueType(0);
1223 bool Narrow = VT.getSizeInBits() == 64;
1224
1225 // Form a REG_SEQUENCE to force register allocation.
1226 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1227
1228 if (Narrow)
1229 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1230 WidenVector(*CurDAG));
1231
1232 SDValue RegSeq = createQTuple(Regs);
1233
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001234 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1235 MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001236
1237 unsigned LaneNo =
1238 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1239
Benjamin Kramerea68a942015-02-19 15:26:17 +00001240 SDValue Ops[] = {RegSeq,
1241 CurDAG->getTargetConstant(LaneNo, MVT::i64), // Lane Number
1242 N->getOperand(NumVecs + 2), // Base register
1243 N->getOperand(NumVecs + 3), // Incremental
1244 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001245 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1246
1247 // Update uses of the write back register
1248 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1249
1250 // Update uses of the vector list
1251 SDValue SuperReg = SDValue(Ld, 1);
1252 if (NumVecs == 1) {
1253 ReplaceUses(SDValue(N, 0),
1254 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1255 } else {
1256 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1257 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1258 AArch64::qsub3 };
1259 for (unsigned i = 0; i < NumVecs; ++i) {
1260 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1261 SuperReg);
1262 if (Narrow)
1263 NV = NarrowVector(NV, *CurDAG);
1264 ReplaceUses(SDValue(N, i), NV);
1265 }
1266 }
1267
1268 // Update the Chain
1269 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1270
1271 return Ld;
1272}
1273
1274SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1275 unsigned Opc) {
1276 SDLoc dl(N);
1277 EVT VT = N->getOperand(2)->getValueType(0);
1278 bool Narrow = VT.getSizeInBits() == 64;
1279
1280 // Form a REG_SEQUENCE to force register allocation.
1281 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1282
1283 if (Narrow)
1284 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1285 WidenVector(*CurDAG));
1286
1287 SDValue RegSeq = createQTuple(Regs);
1288
1289 unsigned LaneNo =
1290 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1291
Benjamin Kramerea68a942015-02-19 15:26:17 +00001292 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
1293 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001294 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1295
1296 // Transfer memoperands.
1297 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1298 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1299 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1300
1301 return St;
1302}
1303
1304SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1305 unsigned Opc) {
1306 SDLoc dl(N);
1307 EVT VT = N->getOperand(2)->getValueType(0);
1308 bool Narrow = VT.getSizeInBits() == 64;
1309
1310 // Form a REG_SEQUENCE to force register allocation.
1311 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1312
1313 if (Narrow)
1314 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1315 WidenVector(*CurDAG));
1316
1317 SDValue RegSeq = createQTuple(Regs);
1318
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001319 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1320 MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001321
1322 unsigned LaneNo =
1323 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1324
Benjamin Kramerea68a942015-02-19 15:26:17 +00001325 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
1326 N->getOperand(NumVecs + 2), // Base Register
1327 N->getOperand(NumVecs + 3), // Incremental
1328 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001329 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1330
1331 // Transfer memoperands.
1332 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1333 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1334 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1335
1336 return St;
1337}
1338
1339static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1340 unsigned &Opc, SDValue &Opd0,
1341 unsigned &LSB, unsigned &MSB,
1342 unsigned NumberOfIgnoredLowBits,
1343 bool BiggerPattern) {
1344 assert(N->getOpcode() == ISD::AND &&
1345 "N must be a AND operation to call this function");
1346
1347 EVT VT = N->getValueType(0);
1348
1349 // Here we can test the type of VT and return false when the type does not
1350 // match, but since it is done prior to that call in the current context
1351 // we turned that into an assert to avoid redundant code.
1352 assert((VT == MVT::i32 || VT == MVT::i64) &&
1353 "Type checking must have been done before calling this function");
1354
1355 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1356 // changed the AND node to a 32-bit mask operation. We'll have to
1357 // undo that as part of the transform here if we want to catch all
1358 // the opportunities.
1359 // Currently the NumberOfIgnoredLowBits argument helps to recover
1360 // form these situations when matching bigger pattern (bitfield insert).
1361
1362 // For unsigned extracts, check for a shift right and mask
1363 uint64_t And_imm = 0;
1364 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1365 return false;
1366
1367 const SDNode *Op0 = N->getOperand(0).getNode();
1368
1369 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1370 // simplified. Try to undo that
1371 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1372
1373 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1374 if (And_imm & (And_imm + 1))
1375 return false;
1376
1377 bool ClampMSB = false;
1378 uint64_t Srl_imm = 0;
1379 // Handle the SRL + ANY_EXTEND case.
1380 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1381 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1382 // Extend the incoming operand of the SRL to 64-bit.
1383 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1384 // Make sure to clamp the MSB so that we preserve the semantics of the
1385 // original operations.
1386 ClampMSB = true;
1387 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1388 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1389 Srl_imm)) {
1390 // If the shift result was truncated, we can still combine them.
1391 Opd0 = Op0->getOperand(0).getOperand(0);
1392
1393 // Use the type of SRL node.
1394 VT = Opd0->getValueType(0);
1395 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1396 Opd0 = Op0->getOperand(0);
1397 } else if (BiggerPattern) {
1398 // Let's pretend a 0 shift right has been performed.
1399 // The resulting code will be at least as good as the original one
1400 // plus it may expose more opportunities for bitfield insert pattern.
1401 // FIXME: Currently we limit this to the bigger pattern, because
1402 // some optimizations expect AND and not UBFM
1403 Opd0 = N->getOperand(0);
1404 } else
1405 return false;
1406
Matthias Braun75260352015-02-24 18:52:04 +00001407 // Bail out on large immediates. This happens when no proper
1408 // combining/constant folding was performed.
Matthias Braun02892ec2015-02-25 18:03:50 +00001409 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1410 DEBUG((dbgs() << N
1411 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001412 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001413 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001414
1415 LSB = Srl_imm;
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001416 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1417 : countTrailingOnes<uint64_t>(And_imm)) -
Tim Northover3b0846e2014-05-24 12:50:23 +00001418 1;
1419 if (ClampMSB)
1420 // Since we're moving the extend before the right shift operation, we need
1421 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1422 // the zeros which would get shifted in with the original right shift
1423 // operation.
1424 MSB = MSB > 31 ? 31 : MSB;
1425
1426 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1427 return true;
1428}
1429
David Xu052b9d92014-09-02 09:33:56 +00001430static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1431 SDValue &Opd0, unsigned &LSB,
1432 unsigned &MSB) {
1433 // We are looking for the following pattern which basically extracts several
1434 // continuous bits from the source value and places it from the LSB of the
1435 // destination value, all other bits of the destination value or set to zero:
Tim Northover3b0846e2014-05-24 12:50:23 +00001436 //
1437 // Value2 = AND Value, MaskImm
1438 // SRL Value2, ShiftImm
1439 //
David Xu052b9d92014-09-02 09:33:56 +00001440 // with MaskImm >> ShiftImm to search for the bit width.
Tim Northover3b0846e2014-05-24 12:50:23 +00001441 //
1442 // This gets selected into a single UBFM:
1443 //
David Xu052b9d92014-09-02 09:33:56 +00001444 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
Tim Northover3b0846e2014-05-24 12:50:23 +00001445 //
1446
1447 if (N->getOpcode() != ISD::SRL)
1448 return false;
1449
1450 uint64_t And_mask = 0;
1451 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1452 return false;
1453
1454 Opd0 = N->getOperand(0).getOperand(0);
1455
1456 uint64_t Srl_imm = 0;
1457 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1458 return false;
1459
David Xu052b9d92014-09-02 09:33:56 +00001460 // Check whether we really have several bits extract here.
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001461 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
David Xu052b9d92014-09-02 09:33:56 +00001462 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001463 if (N->getValueType(0) == MVT::i32)
1464 Opc = AArch64::UBFMWri;
1465 else
1466 Opc = AArch64::UBFMXri;
1467
David Xu052b9d92014-09-02 09:33:56 +00001468 LSB = Srl_imm;
1469 MSB = BitWide + Srl_imm - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001470 return true;
1471 }
1472
1473 return false;
1474}
1475
1476static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1477 unsigned &LSB, unsigned &MSB,
1478 bool BiggerPattern) {
1479 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1480 "N must be a SHR/SRA operation to call this function");
1481
1482 EVT VT = N->getValueType(0);
1483
1484 // Here we can test the type of VT and return false when the type does not
1485 // match, but since it is done prior to that call in the current context
1486 // we turned that into an assert to avoid redundant code.
1487 assert((VT == MVT::i32 || VT == MVT::i64) &&
1488 "Type checking must have been done before calling this function");
1489
David Xu052b9d92014-09-02 09:33:56 +00001490 // Check for AND + SRL doing several bits extract.
1491 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
Tim Northover3b0846e2014-05-24 12:50:23 +00001492 return true;
1493
1494 // we're looking for a shift of a shift
1495 uint64_t Shl_imm = 0;
1496 uint64_t Trunc_bits = 0;
1497 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1498 Opd0 = N->getOperand(0).getOperand(0);
1499 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1500 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1501 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1502 // be considered as setting high 32 bits as zero. Our strategy here is to
1503 // always generate 64bit UBFM. This consistency will help the CSE pass
1504 // later find more redundancy.
1505 Opd0 = N->getOperand(0).getOperand(0);
1506 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1507 VT = Opd0->getValueType(0);
1508 assert(VT == MVT::i64 && "the promoted type should be i64");
1509 } else if (BiggerPattern) {
1510 // Let's pretend a 0 shift left has been performed.
1511 // FIXME: Currently we limit this to the bigger pattern case,
1512 // because some optimizations expect AND and not UBFM
1513 Opd0 = N->getOperand(0);
1514 } else
1515 return false;
1516
Matthias Braun75260352015-02-24 18:52:04 +00001517 // Missing combines/constant folding may have left us with strange
1518 // constants.
Matthias Braun02892ec2015-02-25 18:03:50 +00001519 if (Shl_imm >= VT.getSizeInBits()) {
1520 DEBUG((dbgs() << N
1521 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001522 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001523 }
Matthias Braun75260352015-02-24 18:52:04 +00001524
Tim Northover3b0846e2014-05-24 12:50:23 +00001525 uint64_t Srl_imm = 0;
1526 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1527 return false;
1528
1529 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1530 "bad amount in shift node!");
1531 // Note: The width operand is encoded as width-1.
1532 unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
1533 int sLSB = Srl_imm - Shl_imm;
1534 if (sLSB < 0)
1535 return false;
1536 LSB = sLSB;
1537 MSB = LSB + Width;
1538 // SRA requires a signed extraction
1539 if (VT == MVT::i32)
1540 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1541 else
1542 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1543 return true;
1544}
1545
1546static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1547 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1548 unsigned NumberOfIgnoredLowBits = 0,
1549 bool BiggerPattern = false) {
1550 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1551 return false;
1552
1553 switch (N->getOpcode()) {
1554 default:
1555 if (!N->isMachineOpcode())
1556 return false;
1557 break;
1558 case ISD::AND:
1559 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1560 NumberOfIgnoredLowBits, BiggerPattern);
1561 case ISD::SRL:
1562 case ISD::SRA:
1563 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1564 }
1565
1566 unsigned NOpc = N->getMachineOpcode();
1567 switch (NOpc) {
1568 default:
1569 return false;
1570 case AArch64::SBFMWri:
1571 case AArch64::UBFMWri:
1572 case AArch64::SBFMXri:
1573 case AArch64::UBFMXri:
1574 Opc = NOpc;
1575 Opd0 = N->getOperand(0);
1576 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1577 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1578 return true;
1579 }
1580 // Unreachable
1581 return false;
1582}
1583
1584SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1585 unsigned Opc, LSB, MSB;
1586 SDValue Opd0;
1587 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1588 return nullptr;
1589
1590 EVT VT = N->getValueType(0);
1591
1592 // If the bit extract operation is 64bit but the original type is 32bit, we
1593 // need to add one EXTRACT_SUBREG.
1594 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
1595 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
1596 CurDAG->getTargetConstant(MSB, MVT::i64)};
1597
1598 SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
1599 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
1600 MachineSDNode *Node =
1601 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
1602 SDValue(BFM, 0), SubReg);
1603 return Node;
1604 }
1605
1606 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
1607 CurDAG->getTargetConstant(MSB, VT)};
1608 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1609}
1610
1611/// Does DstMask form a complementary pair with the mask provided by
1612/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1613/// this asks whether DstMask zeroes precisely those bits that will be set by
1614/// the other half.
1615static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1616 unsigned NumberOfIgnoredHighBits, EVT VT) {
1617 assert((VT == MVT::i32 || VT == MVT::i64) &&
1618 "i32 or i64 mask type expected!");
1619 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1620
1621 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1622 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1623
1624 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1625 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1626}
1627
1628// Look for bits that will be useful for later uses.
1629// A bit is consider useless as soon as it is dropped and never used
1630// before it as been dropped.
1631// E.g., looking for useful bit of x
1632// 1. y = x & 0x7
1633// 2. z = y >> 2
1634// After #1, x useful bits are 0x7, then the useful bits of x, live through
1635// y.
1636// After #2, the useful bits of x are 0x4.
1637// However, if x is used on an unpredicatable instruction, then all its bits
1638// are useful.
1639// E.g.
1640// 1. y = x & 0x7
1641// 2. z = y >> 2
1642// 3. str x, [@x]
1643static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1644
1645static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1646 unsigned Depth) {
1647 uint64_t Imm =
1648 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1649 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1650 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1651 getUsefulBits(Op, UsefulBits, Depth + 1);
1652}
1653
1654static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1655 uint64_t Imm, uint64_t MSB,
1656 unsigned Depth) {
1657 // inherit the bitwidth value
1658 APInt OpUsefulBits(UsefulBits);
1659 OpUsefulBits = 1;
1660
1661 if (MSB >= Imm) {
1662 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1663 --OpUsefulBits;
1664 // The interesting part will be in the lower part of the result
1665 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1666 // The interesting part was starting at Imm in the argument
1667 OpUsefulBits = OpUsefulBits.shl(Imm);
1668 } else {
1669 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1670 --OpUsefulBits;
1671 // The interesting part will be shifted in the result
1672 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1673 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1674 // The interesting part was at zero in the argument
1675 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1676 }
1677
1678 UsefulBits &= OpUsefulBits;
1679}
1680
1681static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1682 unsigned Depth) {
1683 uint64_t Imm =
1684 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1685 uint64_t MSB =
1686 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1687
1688 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1689}
1690
1691static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1692 unsigned Depth) {
1693 uint64_t ShiftTypeAndValue =
1694 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1695 APInt Mask(UsefulBits);
1696 Mask.clearAllBits();
1697 Mask.flipAllBits();
1698
1699 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1700 // Shift Left
1701 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1702 Mask = Mask.shl(ShiftAmt);
1703 getUsefulBits(Op, Mask, Depth + 1);
1704 Mask = Mask.lshr(ShiftAmt);
1705 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1706 // Shift Right
1707 // We do not handle AArch64_AM::ASR, because the sign will change the
1708 // number of useful bits
1709 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1710 Mask = Mask.lshr(ShiftAmt);
1711 getUsefulBits(Op, Mask, Depth + 1);
1712 Mask = Mask.shl(ShiftAmt);
1713 } else
1714 return;
1715
1716 UsefulBits &= Mask;
1717}
1718
1719static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1720 unsigned Depth) {
1721 uint64_t Imm =
1722 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1723 uint64_t MSB =
1724 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1725
1726 if (Op.getOperand(1) == Orig)
1727 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1728
1729 APInt OpUsefulBits(UsefulBits);
1730 OpUsefulBits = 1;
1731
1732 if (MSB >= Imm) {
1733 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1734 --OpUsefulBits;
1735 UsefulBits &= ~OpUsefulBits;
1736 getUsefulBits(Op, UsefulBits, Depth + 1);
1737 } else {
1738 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1739 --OpUsefulBits;
1740 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1741 getUsefulBits(Op, UsefulBits, Depth + 1);
1742 }
1743}
1744
1745static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1746 SDValue Orig, unsigned Depth) {
1747
1748 // Users of this node should have already been instruction selected
1749 // FIXME: Can we turn that into an assert?
1750 if (!UserNode->isMachineOpcode())
1751 return;
1752
1753 switch (UserNode->getMachineOpcode()) {
1754 default:
1755 return;
1756 case AArch64::ANDSWri:
1757 case AArch64::ANDSXri:
1758 case AArch64::ANDWri:
1759 case AArch64::ANDXri:
1760 // We increment Depth only when we call the getUsefulBits
1761 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1762 Depth);
1763 case AArch64::UBFMWri:
1764 case AArch64::UBFMXri:
1765 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1766
1767 case AArch64::ORRWrs:
1768 case AArch64::ORRXrs:
1769 if (UserNode->getOperand(1) != Orig)
1770 return;
1771 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1772 Depth);
1773 case AArch64::BFMWri:
1774 case AArch64::BFMXri:
1775 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1776 }
1777}
1778
1779static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1780 if (Depth >= 6)
1781 return;
1782 // Initialize UsefulBits
1783 if (!Depth) {
1784 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1785 // At the beginning, assume every produced bits is useful
1786 UsefulBits = APInt(Bitwidth, 0);
1787 UsefulBits.flipAllBits();
1788 }
1789 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1790
1791 for (SDNode *Node : Op.getNode()->uses()) {
1792 // A use cannot produce useful bits
1793 APInt UsefulBitsForUse = APInt(UsefulBits);
1794 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1795 UsersUsefulBits |= UsefulBitsForUse;
1796 }
1797 // UsefulBits contains the produced bits that are meaningful for the
1798 // current definition, thus a user cannot make a bit meaningful at
1799 // this point
1800 UsefulBits &= UsersUsefulBits;
1801}
1802
1803/// Create a machine node performing a notional SHL of Op by ShlAmount. If
1804/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1805/// 0, return Op unchanged.
1806static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1807 if (ShlAmount == 0)
1808 return Op;
1809
1810 EVT VT = Op.getValueType();
1811 unsigned BitWidth = VT.getSizeInBits();
1812 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1813
1814 SDNode *ShiftNode;
1815 if (ShlAmount > 0) {
1816 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1817 ShiftNode = CurDAG->getMachineNode(
1818 UBFMOpc, SDLoc(Op), VT, Op,
1819 CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
1820 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
1821 } else {
1822 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1823 assert(ShlAmount < 0 && "expected right shift");
1824 int ShrAmount = -ShlAmount;
1825 ShiftNode = CurDAG->getMachineNode(
1826 UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
1827 CurDAG->getTargetConstant(BitWidth - 1, VT));
1828 }
1829
1830 return SDValue(ShiftNode, 0);
1831}
1832
1833/// Does this tree qualify as an attempt to move a bitfield into position,
1834/// essentially "(and (shl VAL, N), Mask)".
1835static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1836 SDValue &Src, int &ShiftAmount,
1837 int &MaskWidth) {
1838 EVT VT = Op.getValueType();
1839 unsigned BitWidth = VT.getSizeInBits();
1840 (void)BitWidth;
1841 assert(BitWidth == 32 || BitWidth == 64);
1842
1843 APInt KnownZero, KnownOne;
1844 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1845
1846 // Non-zero in the sense that they're not provably zero, which is the key
1847 // point if we want to use this value
1848 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1849
1850 // Discard a constant AND mask if present. It's safe because the node will
1851 // already have been factored into the computeKnownBits calculation above.
1852 uint64_t AndImm;
1853 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1854 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1855 Op = Op.getOperand(0);
1856 }
1857
1858 uint64_t ShlImm;
1859 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1860 return false;
1861 Op = Op.getOperand(0);
1862
1863 if (!isShiftedMask_64(NonZeroBits))
1864 return false;
1865
1866 ShiftAmount = countTrailingZeros(NonZeroBits);
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001867 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
Tim Northover3b0846e2014-05-24 12:50:23 +00001868
1869 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1870 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1871 // amount.
1872 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1873
1874 return true;
1875}
1876
1877// Given a OR operation, check if we have the following pattern
1878// ubfm c, b, imm, imm2 (or something that does the same jobs, see
1879// isBitfieldExtractOp)
1880// d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1881// countTrailingZeros(mask2) == imm2 - imm + 1
1882// f = d | c
1883// if yes, given reference arguments will be update so that one can replace
1884// the OR instruction with:
1885// f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1886static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1887 SDValue &Src, unsigned &ImmR,
1888 unsigned &ImmS, SelectionDAG *CurDAG) {
1889 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1890
1891 // Set Opc
1892 EVT VT = N->getValueType(0);
1893 if (VT == MVT::i32)
1894 Opc = AArch64::BFMWri;
1895 else if (VT == MVT::i64)
1896 Opc = AArch64::BFMXri;
1897 else
1898 return false;
1899
1900 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1901 // have the expected shape. Try to undo that.
1902 APInt UsefulBits;
1903 getUsefulBits(SDValue(N, 0), UsefulBits);
1904
1905 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1906 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1907
1908 // OR is commutative, check both possibilities (does llvm provide a
1909 // way to do that directely, e.g., via code matcher?)
1910 SDValue OrOpd1Val = N->getOperand(1);
1911 SDNode *OrOpd0 = N->getOperand(0).getNode();
1912 SDNode *OrOpd1 = N->getOperand(1).getNode();
1913 for (int i = 0; i < 2;
1914 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1915 unsigned BFXOpc;
1916 int DstLSB, Width;
1917 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1918 NumberOfIgnoredLowBits, true)) {
1919 // Check that the returned opcode is compatible with the pattern,
1920 // i.e., same type and zero extended (U and not S)
1921 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
1922 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
1923 continue;
1924
1925 // Compute the width of the bitfield insertion
1926 DstLSB = 0;
1927 Width = ImmS - ImmR + 1;
1928 // FIXME: This constraint is to catch bitfield insertion we may
1929 // want to widen the pattern if we want to grab general bitfied
1930 // move case
1931 if (Width <= 0)
1932 continue;
1933
1934 // If the mask on the insertee is correct, we have a BFXIL operation. We
1935 // can share the ImmR and ImmS values from the already-computed UBFM.
1936 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1937 DstLSB, Width)) {
1938 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1939 ImmS = Width - 1;
1940 } else
1941 continue;
1942
1943 // Check the second part of the pattern
1944 EVT VT = OrOpd1->getValueType(0);
1945 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1946
1947 // Compute the Known Zero for the candidate of the first operand.
1948 // This allows to catch more general case than just looking for
1949 // AND with imm. Indeed, simplify-demanded-bits may have removed
1950 // the AND instruction because it proves it was useless.
1951 APInt KnownZero, KnownOne;
1952 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
1953
1954 // Check if there is enough room for the second operand to appear
1955 // in the first one
1956 APInt BitsToBeInserted =
1957 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1958
1959 if ((BitsToBeInserted & ~KnownZero) != 0)
1960 continue;
1961
1962 // Set the first operand
1963 uint64_t Imm;
1964 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1965 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1966 // In that case, we can eliminate the AND
1967 Dst = OrOpd1->getOperand(0);
1968 else
1969 // Maybe the AND has been removed by simplify-demanded-bits
1970 // or is useful because it discards more bits
1971 Dst = OrOpd1Val;
1972
1973 // both parts match
1974 return true;
1975 }
1976
1977 return false;
1978}
1979
1980SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
1981 if (N->getOpcode() != ISD::OR)
1982 return nullptr;
1983
1984 unsigned Opc;
1985 unsigned LSB, MSB;
1986 SDValue Opd0, Opd1;
1987
1988 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
1989 return nullptr;
1990
1991 EVT VT = N->getValueType(0);
1992 SDValue Ops[] = { Opd0,
1993 Opd1,
1994 CurDAG->getTargetConstant(LSB, VT),
1995 CurDAG->getTargetConstant(MSB, VT) };
1996 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1997}
1998
1999SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
2000 EVT VT = N->getValueType(0);
2001 unsigned Variant;
2002 unsigned Opc;
2003 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
2004
2005 if (VT == MVT::f32) {
2006 Variant = 0;
2007 } else if (VT == MVT::f64) {
2008 Variant = 1;
2009 } else
2010 return nullptr; // Unrecognized argument type. Fall back on default codegen.
2011
2012 // Pick the FRINTX variant needed to set the flags.
2013 unsigned FRINTXOpc = FRINTXOpcs[Variant];
2014
2015 switch (N->getOpcode()) {
2016 default:
2017 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
2018 case ISD::FCEIL: {
2019 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2020 Opc = FRINTPOpcs[Variant];
2021 break;
2022 }
2023 case ISD::FFLOOR: {
2024 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2025 Opc = FRINTMOpcs[Variant];
2026 break;
2027 }
2028 case ISD::FTRUNC: {
2029 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2030 Opc = FRINTZOpcs[Variant];
2031 break;
2032 }
2033 case ISD::FROUND: {
2034 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2035 Opc = FRINTAOpcs[Variant];
2036 break;
2037 }
2038 }
2039
2040 SDLoc dl(N);
2041 SDValue In = N->getOperand(0);
2042 SmallVector<SDValue, 2> Ops;
2043 Ops.push_back(In);
2044
2045 if (!TM.Options.UnsafeFPMath) {
2046 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
2047 Ops.push_back(SDValue(FRINTX, 1));
2048 }
2049
2050 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2051}
2052
2053bool
2054AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2055 unsigned RegWidth) {
2056 APFloat FVal(0.0);
2057 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2058 FVal = CN->getValueAPF();
2059 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2060 // Some otherwise illegal constants are allowed in this case.
2061 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2062 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2063 return false;
2064
2065 ConstantPoolSDNode *CN =
2066 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2067 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2068 } else
2069 return false;
2070
2071 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2072 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2073 // x-register.
2074 //
2075 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2076 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2077 // integers.
2078 bool IsExact;
2079
2080 // fbits is between 1 and 64 in the worst-case, which means the fmul
2081 // could have 2^64 as an actual operand. Need 65 bits of precision.
2082 APSInt IntVal(65, true);
2083 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2084
2085 // N.b. isPowerOf2 also checks for > 0.
2086 if (!IsExact || !IntVal.isPowerOf2()) return false;
2087 unsigned FBits = IntVal.logBase2();
2088
2089 // Checks above should have guaranteed that we haven't lost information in
2090 // finding FBits, but it must still be in range.
2091 if (FBits == 0 || FBits > RegWidth) return false;
2092
2093 FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
2094 return true;
2095}
2096
2097SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2098 // Dump information about the Node being selected
2099 DEBUG(errs() << "Selecting: ");
2100 DEBUG(Node->dump(CurDAG));
2101 DEBUG(errs() << "\n");
2102
2103 // If we have a custom node, we already have selected!
2104 if (Node->isMachineOpcode()) {
2105 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2106 Node->setNodeId(-1);
2107 return nullptr;
2108 }
2109
2110 // Few custom selection stuff.
2111 SDNode *ResNode = nullptr;
2112 EVT VT = Node->getValueType(0);
2113
2114 switch (Node->getOpcode()) {
2115 default:
2116 break;
2117
2118 case ISD::ADD:
2119 if (SDNode *I = SelectMLAV64LaneV128(Node))
2120 return I;
2121 break;
2122
2123 case ISD::LOAD: {
2124 // Try to select as an indexed load. Fall through to normal processing
2125 // if we can't.
2126 bool Done = false;
2127 SDNode *I = SelectIndexedLoad(Node, Done);
2128 if (Done)
2129 return I;
2130 break;
2131 }
2132
2133 case ISD::SRL:
2134 case ISD::AND:
2135 case ISD::SRA:
2136 if (SDNode *I = SelectBitfieldExtractOp(Node))
2137 return I;
2138 break;
2139
2140 case ISD::OR:
2141 if (SDNode *I = SelectBitfieldInsertOp(Node))
2142 return I;
2143 break;
2144
2145 case ISD::EXTRACT_VECTOR_ELT: {
2146 // Extracting lane zero is a special case where we can just use a plain
2147 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2148 // the rest of the compiler, especially the register allocator and copyi
2149 // propagation, to reason about, so is preferred when it's possible to
2150 // use it.
2151 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2152 // Bail and use the default Select() for non-zero lanes.
2153 if (LaneNode->getZExtValue() != 0)
2154 break;
2155 // If the element type is not the same as the result type, likewise
2156 // bail and use the default Select(), as there's more to do than just
2157 // a cross-class COPY. This catches extracts of i8 and i16 elements
2158 // since they will need an explicit zext.
2159 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2160 break;
2161 unsigned SubReg;
2162 switch (Node->getOperand(0)
2163 .getValueType()
2164 .getVectorElementType()
2165 .getSizeInBits()) {
2166 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002167 llvm_unreachable("Unexpected vector element type!");
Tim Northover3b0846e2014-05-24 12:50:23 +00002168 case 64:
2169 SubReg = AArch64::dsub;
2170 break;
2171 case 32:
2172 SubReg = AArch64::ssub;
2173 break;
Oliver Stannard89d15422014-08-27 16:16:04 +00002174 case 16:
2175 SubReg = AArch64::hsub;
2176 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00002177 case 8:
2178 llvm_unreachable("unexpected zext-requiring extract element!");
2179 }
2180 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2181 Node->getOperand(0));
2182 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2183 DEBUG(Extract->dumpr(CurDAG));
2184 DEBUG(dbgs() << "\n");
2185 return Extract.getNode();
2186 }
2187 case ISD::Constant: {
2188 // Materialize zero constants as copies from WZR/XZR. This allows
2189 // the coalescer to propagate these into other instructions.
2190 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2191 if (ConstNode->isNullValue()) {
2192 if (VT == MVT::i32)
2193 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2194 AArch64::WZR, MVT::i32).getNode();
2195 else if (VT == MVT::i64)
2196 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2197 AArch64::XZR, MVT::i64).getNode();
2198 }
2199 break;
2200 }
2201
2202 case ISD::FrameIndex: {
2203 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2204 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2205 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2206 const TargetLowering *TLI = getTargetLowering();
2207 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2208 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2209 CurDAG->getTargetConstant(Shifter, MVT::i32) };
2210 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2211 }
2212 case ISD::INTRINSIC_W_CHAIN: {
2213 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2214 switch (IntNo) {
2215 default:
2216 break;
2217 case Intrinsic::aarch64_ldaxp:
2218 case Intrinsic::aarch64_ldxp: {
2219 unsigned Op =
2220 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2221 SDValue MemAddr = Node->getOperand(2);
2222 SDLoc DL(Node);
2223 SDValue Chain = Node->getOperand(0);
2224
2225 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2226 MVT::Other, MemAddr, Chain);
2227
2228 // Transfer memoperands.
2229 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2230 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2231 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2232 return Ld;
2233 }
2234 case Intrinsic::aarch64_stlxp:
2235 case Intrinsic::aarch64_stxp: {
2236 unsigned Op =
2237 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2238 SDLoc DL(Node);
2239 SDValue Chain = Node->getOperand(0);
2240 SDValue ValLo = Node->getOperand(2);
2241 SDValue ValHi = Node->getOperand(3);
2242 SDValue MemAddr = Node->getOperand(4);
2243
2244 // Place arguments in the right order.
Benjamin Kramerea68a942015-02-19 15:26:17 +00002245 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00002246
2247 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2248 // Transfer memoperands.
2249 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2250 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2251 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2252
2253 return St;
2254 }
2255 case Intrinsic::aarch64_neon_ld1x2:
2256 if (VT == MVT::v8i8)
2257 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2258 else if (VT == MVT::v16i8)
2259 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002260 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002261 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002262 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002263 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2264 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2265 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2266 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2267 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2268 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2269 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2270 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2271 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2272 break;
2273 case Intrinsic::aarch64_neon_ld1x3:
2274 if (VT == MVT::v8i8)
2275 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2276 else if (VT == MVT::v16i8)
2277 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002278 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002279 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002280 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002281 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2282 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2283 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2284 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2285 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2286 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2287 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2288 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2289 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2290 break;
2291 case Intrinsic::aarch64_neon_ld1x4:
2292 if (VT == MVT::v8i8)
2293 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2294 else if (VT == MVT::v16i8)
2295 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002296 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002297 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002298 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002299 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2300 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2301 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2302 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2303 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2304 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2305 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2306 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2307 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2308 break;
2309 case Intrinsic::aarch64_neon_ld2:
2310 if (VT == MVT::v8i8)
2311 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2312 else if (VT == MVT::v16i8)
2313 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002314 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002315 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002316 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002317 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2318 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2319 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2320 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2321 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2322 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2323 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2324 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2325 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2326 break;
2327 case Intrinsic::aarch64_neon_ld3:
2328 if (VT == MVT::v8i8)
2329 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2330 else if (VT == MVT::v16i8)
2331 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002332 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002333 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002334 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002335 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2336 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2337 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2338 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2339 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2340 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2341 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2342 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2343 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2344 break;
2345 case Intrinsic::aarch64_neon_ld4:
2346 if (VT == MVT::v8i8)
2347 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2348 else if (VT == MVT::v16i8)
2349 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002350 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002351 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002352 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002353 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2354 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2355 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2356 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2357 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2358 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2359 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2360 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2361 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2362 break;
2363 case Intrinsic::aarch64_neon_ld2r:
2364 if (VT == MVT::v8i8)
2365 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2366 else if (VT == MVT::v16i8)
2367 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002368 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002369 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002370 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002371 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2372 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2373 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2374 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2375 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2376 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2377 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2378 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2379 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2380 break;
2381 case Intrinsic::aarch64_neon_ld3r:
2382 if (VT == MVT::v8i8)
2383 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2384 else if (VT == MVT::v16i8)
2385 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002386 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002387 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002388 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002389 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2390 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2391 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2392 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2393 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2394 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2395 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2396 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2397 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2398 break;
2399 case Intrinsic::aarch64_neon_ld4r:
2400 if (VT == MVT::v8i8)
2401 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2402 else if (VT == MVT::v16i8)
2403 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002404 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002405 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002406 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002407 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2408 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2409 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2410 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2411 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2412 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2413 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2414 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2415 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2416 break;
2417 case Intrinsic::aarch64_neon_ld2lane:
2418 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2419 return SelectLoadLane(Node, 2, AArch64::LD2i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002420 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2421 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002422 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2423 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2424 VT == MVT::v2f32)
2425 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2426 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2427 VT == MVT::v1f64)
2428 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2429 break;
2430 case Intrinsic::aarch64_neon_ld3lane:
2431 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2432 return SelectLoadLane(Node, 3, AArch64::LD3i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002433 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2434 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002435 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2436 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2437 VT == MVT::v2f32)
2438 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2439 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2440 VT == MVT::v1f64)
2441 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2442 break;
2443 case Intrinsic::aarch64_neon_ld4lane:
2444 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2445 return SelectLoadLane(Node, 4, AArch64::LD4i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002446 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2447 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002448 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2449 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2450 VT == MVT::v2f32)
2451 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2452 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2453 VT == MVT::v1f64)
2454 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2455 break;
2456 }
2457 } break;
2458 case ISD::INTRINSIC_WO_CHAIN: {
2459 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2460 switch (IntNo) {
2461 default:
2462 break;
2463 case Intrinsic::aarch64_neon_tbl2:
2464 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2465 : AArch64::TBLv16i8Two,
2466 false);
2467 case Intrinsic::aarch64_neon_tbl3:
2468 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2469 : AArch64::TBLv16i8Three,
2470 false);
2471 case Intrinsic::aarch64_neon_tbl4:
2472 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2473 : AArch64::TBLv16i8Four,
2474 false);
2475 case Intrinsic::aarch64_neon_tbx2:
2476 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2477 : AArch64::TBXv16i8Two,
2478 true);
2479 case Intrinsic::aarch64_neon_tbx3:
2480 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2481 : AArch64::TBXv16i8Three,
2482 true);
2483 case Intrinsic::aarch64_neon_tbx4:
2484 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2485 : AArch64::TBXv16i8Four,
2486 true);
2487 case Intrinsic::aarch64_neon_smull:
2488 case Intrinsic::aarch64_neon_umull:
2489 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2490 return N;
2491 break;
2492 }
2493 break;
2494 }
2495 case ISD::INTRINSIC_VOID: {
2496 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2497 if (Node->getNumOperands() >= 3)
2498 VT = Node->getOperand(2)->getValueType(0);
2499 switch (IntNo) {
2500 default:
2501 break;
2502 case Intrinsic::aarch64_neon_st1x2: {
2503 if (VT == MVT::v8i8)
2504 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2505 else if (VT == MVT::v16i8)
2506 return SelectStore(Node, 2, AArch64::ST1Twov16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002507 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002508 return SelectStore(Node, 2, AArch64::ST1Twov4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002509 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002510 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2511 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2512 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2513 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2514 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2515 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2516 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2517 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2518 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2519 break;
2520 }
2521 case Intrinsic::aarch64_neon_st1x3: {
2522 if (VT == MVT::v8i8)
2523 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2524 else if (VT == MVT::v16i8)
2525 return SelectStore(Node, 3, AArch64::ST1Threev16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002526 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002527 return SelectStore(Node, 3, AArch64::ST1Threev4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002528 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002529 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2530 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2531 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2532 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2533 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2534 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2535 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2536 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2537 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2538 break;
2539 }
2540 case Intrinsic::aarch64_neon_st1x4: {
2541 if (VT == MVT::v8i8)
2542 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2543 else if (VT == MVT::v16i8)
2544 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002545 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002546 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002547 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002548 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2549 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2550 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2551 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2552 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2553 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2554 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2555 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2556 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2557 break;
2558 }
2559 case Intrinsic::aarch64_neon_st2: {
2560 if (VT == MVT::v8i8)
2561 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2562 else if (VT == MVT::v16i8)
2563 return SelectStore(Node, 2, AArch64::ST2Twov16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002564 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002565 return SelectStore(Node, 2, AArch64::ST2Twov4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002566 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002567 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2568 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2569 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2570 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2571 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2572 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2573 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2574 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2575 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2576 break;
2577 }
2578 case Intrinsic::aarch64_neon_st3: {
2579 if (VT == MVT::v8i8)
2580 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2581 else if (VT == MVT::v16i8)
2582 return SelectStore(Node, 3, AArch64::ST3Threev16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002583 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002584 return SelectStore(Node, 3, AArch64::ST3Threev4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002585 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002586 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2587 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2588 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2589 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2590 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2591 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2592 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2593 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2594 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2595 break;
2596 }
2597 case Intrinsic::aarch64_neon_st4: {
2598 if (VT == MVT::v8i8)
2599 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2600 else if (VT == MVT::v16i8)
2601 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002602 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002603 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002604 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002605 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2606 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2607 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2608 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2609 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2610 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2611 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2612 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2613 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2614 break;
2615 }
2616 case Intrinsic::aarch64_neon_st2lane: {
2617 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2618 return SelectStoreLane(Node, 2, AArch64::ST2i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002619 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2620 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002621 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2622 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2623 VT == MVT::v2f32)
2624 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2625 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2626 VT == MVT::v1f64)
2627 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2628 break;
2629 }
2630 case Intrinsic::aarch64_neon_st3lane: {
2631 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2632 return SelectStoreLane(Node, 3, AArch64::ST3i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002633 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2634 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002635 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2636 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2637 VT == MVT::v2f32)
2638 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2639 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2640 VT == MVT::v1f64)
2641 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2642 break;
2643 }
2644 case Intrinsic::aarch64_neon_st4lane: {
2645 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2646 return SelectStoreLane(Node, 4, AArch64::ST4i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002647 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2648 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002649 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2650 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2651 VT == MVT::v2f32)
2652 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2653 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2654 VT == MVT::v1f64)
2655 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2656 break;
2657 }
2658 }
2659 }
2660 case AArch64ISD::LD2post: {
2661 if (VT == MVT::v8i8)
2662 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2663 else if (VT == MVT::v16i8)
2664 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002665 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002666 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002667 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002668 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2669 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2670 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2671 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2672 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2673 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2674 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2675 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2676 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2677 break;
2678 }
2679 case AArch64ISD::LD3post: {
2680 if (VT == MVT::v8i8)
2681 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2682 else if (VT == MVT::v16i8)
2683 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002684 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002685 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002686 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002687 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2688 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2689 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2690 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2691 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2692 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2693 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2694 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2695 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2696 break;
2697 }
2698 case AArch64ISD::LD4post: {
2699 if (VT == MVT::v8i8)
2700 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2701 else if (VT == MVT::v16i8)
2702 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002703 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002704 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002705 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002706 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
2707 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2708 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
2709 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2710 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
2711 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2712 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2713 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2714 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
2715 break;
2716 }
2717 case AArch64ISD::LD1x2post: {
2718 if (VT == MVT::v8i8)
2719 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
2720 else if (VT == MVT::v16i8)
2721 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002722 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002723 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002724 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002725 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
2726 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2727 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
2728 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2729 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
2730 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2731 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2732 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2733 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
2734 break;
2735 }
2736 case AArch64ISD::LD1x3post: {
2737 if (VT == MVT::v8i8)
2738 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
2739 else if (VT == MVT::v16i8)
2740 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002741 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002742 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002743 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002744 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
2745 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2746 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
2747 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2748 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
2749 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2750 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2751 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2752 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
2753 break;
2754 }
2755 case AArch64ISD::LD1x4post: {
2756 if (VT == MVT::v8i8)
2757 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
2758 else if (VT == MVT::v16i8)
2759 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002760 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002761 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002762 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002763 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
2764 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2765 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
2766 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2767 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
2768 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2769 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2770 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2771 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
2772 break;
2773 }
2774 case AArch64ISD::LD1DUPpost: {
2775 if (VT == MVT::v8i8)
2776 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
2777 else if (VT == MVT::v16i8)
2778 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002779 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002780 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002781 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002782 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
2783 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2784 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
2785 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2786 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
2787 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2788 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
2789 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2790 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
2791 break;
2792 }
2793 case AArch64ISD::LD2DUPpost: {
2794 if (VT == MVT::v8i8)
2795 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
2796 else if (VT == MVT::v16i8)
2797 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002798 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002799 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002800 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002801 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
2802 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2803 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
2804 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2805 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
2806 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2807 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
2808 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2809 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
2810 break;
2811 }
2812 case AArch64ISD::LD3DUPpost: {
2813 if (VT == MVT::v8i8)
2814 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
2815 else if (VT == MVT::v16i8)
2816 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002817 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002818 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002819 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002820 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
2821 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2822 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
2823 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2824 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
2825 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2826 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
2827 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2828 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
2829 break;
2830 }
2831 case AArch64ISD::LD4DUPpost: {
2832 if (VT == MVT::v8i8)
2833 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
2834 else if (VT == MVT::v16i8)
2835 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002836 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002837 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002838 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002839 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
2840 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2841 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
2842 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2843 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
2844 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2845 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
2846 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2847 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
2848 break;
2849 }
2850 case AArch64ISD::LD1LANEpost: {
2851 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2852 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002853 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2854 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002855 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
2856 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2857 VT == MVT::v2f32)
2858 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
2859 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2860 VT == MVT::v1f64)
2861 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
2862 break;
2863 }
2864 case AArch64ISD::LD2LANEpost: {
2865 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2866 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002867 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2868 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002869 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
2870 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2871 VT == MVT::v2f32)
2872 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
2873 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2874 VT == MVT::v1f64)
2875 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
2876 break;
2877 }
2878 case AArch64ISD::LD3LANEpost: {
2879 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2880 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002881 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2882 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002883 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
2884 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2885 VT == MVT::v2f32)
2886 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
2887 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2888 VT == MVT::v1f64)
2889 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
2890 break;
2891 }
2892 case AArch64ISD::LD4LANEpost: {
2893 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2894 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002895 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2896 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002897 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
2898 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2899 VT == MVT::v2f32)
2900 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
2901 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2902 VT == MVT::v1f64)
2903 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
2904 break;
2905 }
2906 case AArch64ISD::ST2post: {
2907 VT = Node->getOperand(1).getValueType();
2908 if (VT == MVT::v8i8)
2909 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
2910 else if (VT == MVT::v16i8)
2911 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002912 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002913 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002914 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002915 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
2916 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2917 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
2918 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2919 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
2920 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2921 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
2922 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2923 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
2924 break;
2925 }
2926 case AArch64ISD::ST3post: {
2927 VT = Node->getOperand(1).getValueType();
2928 if (VT == MVT::v8i8)
2929 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
2930 else if (VT == MVT::v16i8)
2931 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002932 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002933 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002934 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002935 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
2936 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2937 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
2938 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2939 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
2940 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2941 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
2942 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2943 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
2944 break;
2945 }
2946 case AArch64ISD::ST4post: {
2947 VT = Node->getOperand(1).getValueType();
2948 if (VT == MVT::v8i8)
2949 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
2950 else if (VT == MVT::v16i8)
2951 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002952 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002953 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002954 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002955 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
2956 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2957 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
2958 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2959 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
2960 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2961 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
2962 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2963 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
2964 break;
2965 }
2966 case AArch64ISD::ST1x2post: {
2967 VT = Node->getOperand(1).getValueType();
2968 if (VT == MVT::v8i8)
2969 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
2970 else if (VT == MVT::v16i8)
2971 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002972 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002973 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002974 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002975 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
2976 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2977 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
2978 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2979 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
2980 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2981 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
2982 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2983 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
2984 break;
2985 }
2986 case AArch64ISD::ST1x3post: {
2987 VT = Node->getOperand(1).getValueType();
2988 if (VT == MVT::v8i8)
2989 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
2990 else if (VT == MVT::v16i8)
2991 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002992 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002993 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00002994 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002995 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
2996 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2997 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
2998 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2999 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3000 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3001 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3002 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3003 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3004 break;
3005 }
3006 case AArch64ISD::ST1x4post: {
3007 VT = Node->getOperand(1).getValueType();
3008 if (VT == MVT::v8i8)
3009 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3010 else if (VT == MVT::v16i8)
3011 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003012 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003013 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003014 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003015 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3016 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3017 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3018 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3019 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3020 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3021 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3022 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3023 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3024 break;
3025 }
3026 case AArch64ISD::ST2LANEpost: {
3027 VT = Node->getOperand(1).getValueType();
3028 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3029 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003030 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3031 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003032 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3033 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3034 VT == MVT::v2f32)
3035 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3036 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3037 VT == MVT::v1f64)
3038 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3039 break;
3040 }
3041 case AArch64ISD::ST3LANEpost: {
3042 VT = Node->getOperand(1).getValueType();
3043 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3044 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003045 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3046 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003047 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3048 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3049 VT == MVT::v2f32)
3050 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3051 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3052 VT == MVT::v1f64)
3053 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3054 break;
3055 }
3056 case AArch64ISD::ST4LANEpost: {
3057 VT = Node->getOperand(1).getValueType();
3058 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3059 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003060 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3061 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003062 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3063 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3064 VT == MVT::v2f32)
3065 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3066 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3067 VT == MVT::v1f64)
3068 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3069 break;
3070 }
3071
3072 case ISD::FCEIL:
3073 case ISD::FFLOOR:
3074 case ISD::FTRUNC:
3075 case ISD::FROUND:
3076 if (SDNode *I = SelectLIBM(Node))
3077 return I;
3078 break;
3079 }
3080
3081 // Select the default instruction
3082 ResNode = SelectCode(Node);
3083
3084 DEBUG(errs() << "=> ");
3085 if (ResNode == nullptr || ResNode == Node)
3086 DEBUG(Node->dump(CurDAG));
3087 else
3088 DEBUG(ResNode->dump(CurDAG));
3089 DEBUG(errs() << "\n");
3090
3091 return ResNode;
3092}
3093
3094/// createAArch64ISelDag - This pass converts a legalized DAG into a
3095/// AArch64-specific DAG, ready for instruction scheduling.
3096FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3097 CodeGenOpt::Level OptLevel) {
3098 return new AArch64DAGToDAGISel(TM, OptLevel);
3099}