blob: 367bbc3a482e2ee3b1e718df105e3c9c55ee4137 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the AArch64 target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64TargetMachine.h"
15#include "MCTargetDesc/AArch64AddressingModes.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/CodeGen/SelectionDAGISel.h"
18#include "llvm/IR/Function.h" // To access function attributes.
19#include "llvm/IR/GlobalValue.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/ErrorHandling.h"
23#include "llvm/Support/MathExtras.h"
24#include "llvm/Support/raw_ostream.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE "aarch64-isel"
29
30//===--------------------------------------------------------------------===//
31/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32/// instructions for SelectionDAG operations.
33///
34namespace {
35
36class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
38
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
42
43 bool ForCodeSize;
44
45public:
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
49 ForCodeSize(false) {}
50
51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
53 }
54
55 bool runOnMachineFunction(MachineFunction &MF) override {
Sanjay Patel924879a2015-08-04 15:49:57 +000056 ForCodeSize = MF.getFunction()->optForSize();
Eric Christopher1e513342015-01-30 23:46:40 +000057 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
Tim Northover3b0846e2014-05-24 12:50:23 +000058 return SelectionDAGISel::runOnMachineFunction(MF);
59 }
60
61 SDNode *Select(SDNode *Node) override;
62
63 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
64 /// inline asm expressions.
65 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
Daniel Sanders60f1db02015-03-13 12:45:09 +000066 unsigned ConstraintID,
Tim Northover3b0846e2014-05-24 12:50:23 +000067 std::vector<SDValue> &OutOps) override;
68
69 SDNode *SelectMLAV64LaneV128(SDNode *N);
70 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
71 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
76 }
77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
79 }
Ahmed Bougachab8886b52015-09-10 01:42:28 +000080 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
82 }
83 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
85 }
86 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
88 }
89 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
91 }
92 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
94 }
Tim Northover3b0846e2014-05-24 12:50:23 +000095 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeIndexed(N, 1, Base, OffImm);
97 }
98 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeIndexed(N, 2, Base, OffImm);
100 }
101 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeIndexed(N, 4, Base, OffImm);
103 }
104 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeIndexed(N, 8, Base, OffImm);
106 }
107 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeIndexed(N, 16, Base, OffImm);
109 }
110 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
111 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
112 }
113 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
114 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
115 }
116 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
117 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
118 }
119 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
120 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
121 }
122 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
123 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
124 }
125
126 template<int Width>
127 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
128 SDValue &SignExtend, SDValue &DoShift) {
129 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
130 }
131
132 template<int Width>
133 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
134 SDValue &SignExtend, SDValue &DoShift) {
135 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
136 }
137
138
139 /// Form sequences of consecutive 64/128-bit registers for use in NEON
140 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
141 /// between 1 and 4 elements. If it contains a single element that is returned
142 /// unchanged; otherwise a REG_SEQUENCE value is returned.
143 SDValue createDTuple(ArrayRef<SDValue> Vecs);
144 SDValue createQTuple(ArrayRef<SDValue> Vecs);
145
146 /// Generic helper for the createDTuple/createQTuple
147 /// functions. Those should almost always be called instead.
Benjamin Kramerea68a942015-02-19 15:26:17 +0000148 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
149 const unsigned SubRegs[]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000150
151 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
152
153 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
154
155 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
156 unsigned SubRegIdx);
157 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
158 unsigned SubRegIdx);
159 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
160 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
161
162 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
163 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
164 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
165 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
166
Tim Northover3b0846e2014-05-24 12:50:23 +0000167 SDNode *SelectBitfieldExtractOp(SDNode *N);
168 SDNode *SelectBitfieldInsertOp(SDNode *N);
169
170 SDNode *SelectLIBM(SDNode *N);
Geoff Berryc573bf7a2015-07-28 15:24:10 +0000171 SDNode *SelectFPConvertWithRound(SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000172
Luke Cheeseman85fd06d2015-06-01 12:02:47 +0000173 SDNode *SelectReadRegister(SDNode *N);
174 SDNode *SelectWriteRegister(SDNode *N);
175
Tim Northover3b0846e2014-05-24 12:50:23 +0000176// Include the pieces autogenerated from the target description.
177#include "AArch64GenDAGISel.inc"
178
179private:
180 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
181 SDValue &Shift);
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000182 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
183 SDValue &OffImm);
Tim Northover3b0846e2014-05-24 12:50:23 +0000184 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
185 SDValue &OffImm);
186 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
187 SDValue &OffImm);
188 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
189 SDValue &Offset, SDValue &SignExtend,
190 SDValue &DoShift);
191 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
192 SDValue &Offset, SDValue &SignExtend,
193 SDValue &DoShift);
194 bool isWorthFolding(SDValue V) const;
195 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
196 SDValue &Offset, SDValue &SignExtend);
197
198 template<unsigned RegWidth>
199 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
200 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
201 }
202
203 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
Geoff Berryc573bf7a2015-07-28 15:24:10 +0000204
205 SDNode *GenerateInexactFlagIfNeeded(const SDValue &In, unsigned InTyVariant,
206 SDLoc DL);
Tim Northover3b0846e2014-05-24 12:50:23 +0000207};
208} // end anonymous namespace
209
210/// isIntImmediate - This method tests to see if the node is a constant
211/// operand. If so Imm will receive the 32-bit value.
212static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
213 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
214 Imm = C->getZExtValue();
215 return true;
216 }
217 return false;
218}
219
220// isIntImmediate - This method tests to see if a constant operand.
221// If so Imm will receive the value.
222static bool isIntImmediate(SDValue N, uint64_t &Imm) {
223 return isIntImmediate(N.getNode(), Imm);
224}
225
226// isOpcWithIntImmediate - This method tests to see if the node is a specific
227// opcode and that it has a immediate integer right operand.
228// If so Imm will receive the 32 bit value.
229static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
230 uint64_t &Imm) {
231 return N->getOpcode() == Opc &&
232 isIntImmediate(N->getOperand(1).getNode(), Imm);
233}
234
235bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
Daniel Sanders60f1db02015-03-13 12:45:09 +0000236 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000237 switch(ConstraintID) {
238 default:
239 llvm_unreachable("Unexpected asm memory constraint");
240 case InlineAsm::Constraint_i:
241 case InlineAsm::Constraint_m:
242 case InlineAsm::Constraint_Q:
243 // Require the address to be in a register. That is safe for all AArch64
244 // variants and it is hard to do anything much smarter without knowing
245 // how the operand is used.
246 OutOps.push_back(Op);
247 return false;
248 }
249 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000250}
251
252/// SelectArithImmed - Select an immediate value that can be represented as
253/// a 12-bit value shifted left by either 0 or 12. If so, return true with
254/// Val set to the 12-bit value and Shift set to the shifter operand.
255bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
256 SDValue &Shift) {
257 // This function is called from the addsub_shifted_imm ComplexPattern,
258 // which lists [imm] as the list of opcode it's interested in, however
259 // we still need to check whether the operand is actually an immediate
260 // here because the ComplexPattern opcode list is only used in
261 // root-level opcode matching.
262 if (!isa<ConstantSDNode>(N.getNode()))
263 return false;
264
265 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
266 unsigned ShiftAmt;
267
268 if (Immed >> 12 == 0) {
269 ShiftAmt = 0;
270 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
271 ShiftAmt = 12;
272 Immed = Immed >> 12;
273 } else
274 return false;
275
276 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000277 SDLoc dl(N);
278 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
279 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000280 return true;
281}
282
283/// SelectNegArithImmed - As above, but negates the value before trying to
284/// select it.
285bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
286 SDValue &Shift) {
287 // This function is called from the addsub_shifted_imm ComplexPattern,
288 // which lists [imm] as the list of opcode it's interested in, however
289 // we still need to check whether the operand is actually an immediate
290 // here because the ComplexPattern opcode list is only used in
291 // root-level opcode matching.
292 if (!isa<ConstantSDNode>(N.getNode()))
293 return false;
294
295 // The immediate operand must be a 24-bit zero-extended immediate.
296 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
297
298 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
299 // have the opposite effect on the C flag, so this pattern mustn't match under
300 // those circumstances.
301 if (Immed == 0)
302 return false;
303
304 if (N.getValueType() == MVT::i32)
305 Immed = ~((uint32_t)Immed) + 1;
306 else
307 Immed = ~Immed + 1ULL;
308 if (Immed & 0xFFFFFFFFFF000000ULL)
309 return false;
310
311 Immed &= 0xFFFFFFULL;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000312 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
313 Shift);
Tim Northover3b0846e2014-05-24 12:50:23 +0000314}
315
316/// getShiftTypeForNode - Translate a shift node to the corresponding
317/// ShiftType value.
318static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
319 switch (N.getOpcode()) {
320 default:
321 return AArch64_AM::InvalidShiftExtend;
322 case ISD::SHL:
323 return AArch64_AM::LSL;
324 case ISD::SRL:
325 return AArch64_AM::LSR;
326 case ISD::SRA:
327 return AArch64_AM::ASR;
328 case ISD::ROTR:
329 return AArch64_AM::ROR;
330 }
331}
332
Eric Christopher25dbdeb2015-03-07 01:39:09 +0000333/// \brief Determine whether it is worth to fold V into an extended register.
Tim Northover3b0846e2014-05-24 12:50:23 +0000334bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
Robin Morisset039781e2014-08-29 21:53:01 +0000335 // it hurts if the value is used at least twice, unless we are optimizing
Tim Northover3b0846e2014-05-24 12:50:23 +0000336 // for code size.
337 if (ForCodeSize || V.hasOneUse())
338 return true;
339 return false;
340}
341
342/// SelectShiftedRegister - Select a "shifted register" operand. If the value
343/// is not shifted, set the Shift operand to default of "LSL 0". The logical
344/// instructions allow the shifted register to be rotated, but the arithmetic
345/// instructions do not. The AllowROR parameter specifies whether ROR is
346/// supported.
347bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
348 SDValue &Reg, SDValue &Shift) {
349 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
350 if (ShType == AArch64_AM::InvalidShiftExtend)
351 return false;
352 if (!AllowROR && ShType == AArch64_AM::ROR)
353 return false;
354
355 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
356 unsigned BitSize = N.getValueType().getSizeInBits();
357 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
358 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
359
360 Reg = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000361 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000362 return isWorthFolding(N);
363 }
364
365 return false;
366}
367
368/// getExtendTypeForNode - Translate an extend node to the corresponding
369/// ExtendType value.
370static AArch64_AM::ShiftExtendType
371getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
372 if (N.getOpcode() == ISD::SIGN_EXTEND ||
373 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
374 EVT SrcVT;
375 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
376 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
377 else
378 SrcVT = N.getOperand(0).getValueType();
379
380 if (!IsLoadStore && SrcVT == MVT::i8)
381 return AArch64_AM::SXTB;
382 else if (!IsLoadStore && SrcVT == MVT::i16)
383 return AArch64_AM::SXTH;
384 else if (SrcVT == MVT::i32)
385 return AArch64_AM::SXTW;
386 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
387
388 return AArch64_AM::InvalidShiftExtend;
389 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
390 N.getOpcode() == ISD::ANY_EXTEND) {
391 EVT SrcVT = N.getOperand(0).getValueType();
392 if (!IsLoadStore && SrcVT == MVT::i8)
393 return AArch64_AM::UXTB;
394 else if (!IsLoadStore && SrcVT == MVT::i16)
395 return AArch64_AM::UXTH;
396 else if (SrcVT == MVT::i32)
397 return AArch64_AM::UXTW;
398 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
399
400 return AArch64_AM::InvalidShiftExtend;
401 } else if (N.getOpcode() == ISD::AND) {
402 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
403 if (!CSD)
404 return AArch64_AM::InvalidShiftExtend;
405 uint64_t AndMask = CSD->getZExtValue();
406
407 switch (AndMask) {
408 default:
409 return AArch64_AM::InvalidShiftExtend;
410 case 0xFF:
411 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
412 case 0xFFFF:
413 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
414 case 0xFFFFFFFF:
415 return AArch64_AM::UXTW;
416 }
417 }
418
419 return AArch64_AM::InvalidShiftExtend;
420}
421
422// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
423static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
424 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
425 DL->getOpcode() != AArch64ISD::DUPLANE32)
426 return false;
427
428 SDValue SV = DL->getOperand(0);
429 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
430 return false;
431
432 SDValue EV = SV.getOperand(1);
433 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
434 return false;
435
436 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
437 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
438 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
439 LaneOp = EV.getOperand(0);
440
441 return true;
442}
443
444// Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
445// high lane extract.
446static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
447 SDValue &LaneOp, int &LaneIdx) {
448
449 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
450 std::swap(Op0, Op1);
451 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
452 return false;
453 }
454 StdOp = Op1;
455 return true;
456}
457
458/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
459/// is a lane in the upper half of a 128-bit vector. Recognize and select this
460/// so that we don't emit unnecessary lane extracts.
461SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000462 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000463 SDValue Op0 = N->getOperand(0);
464 SDValue Op1 = N->getOperand(1);
465 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
466 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
467 int LaneIdx = -1; // Will hold the lane index.
468
469 if (Op1.getOpcode() != ISD::MUL ||
470 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
471 LaneIdx)) {
472 std::swap(Op0, Op1);
473 if (Op1.getOpcode() != ISD::MUL ||
474 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
475 LaneIdx))
476 return nullptr;
477 }
478
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000479 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000480
481 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
482
483 unsigned MLAOpc = ~0U;
484
485 switch (N->getSimpleValueType(0).SimpleTy) {
486 default:
487 llvm_unreachable("Unrecognized MLA.");
488 case MVT::v4i16:
489 MLAOpc = AArch64::MLAv4i16_indexed;
490 break;
491 case MVT::v8i16:
492 MLAOpc = AArch64::MLAv8i16_indexed;
493 break;
494 case MVT::v2i32:
495 MLAOpc = AArch64::MLAv2i32_indexed;
496 break;
497 case MVT::v4i32:
498 MLAOpc = AArch64::MLAv4i32_indexed;
499 break;
500 }
501
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000502 return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
Tim Northover3b0846e2014-05-24 12:50:23 +0000503}
504
505SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000506 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000507 SDValue SMULLOp0;
508 SDValue SMULLOp1;
509 int LaneIdx;
510
511 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
512 LaneIdx))
513 return nullptr;
514
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000515 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000516
517 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
518
519 unsigned SMULLOpc = ~0U;
520
521 if (IntNo == Intrinsic::aarch64_neon_smull) {
522 switch (N->getSimpleValueType(0).SimpleTy) {
523 default:
524 llvm_unreachable("Unrecognized SMULL.");
525 case MVT::v4i32:
526 SMULLOpc = AArch64::SMULLv4i16_indexed;
527 break;
528 case MVT::v2i64:
529 SMULLOpc = AArch64::SMULLv2i32_indexed;
530 break;
531 }
532 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
533 switch (N->getSimpleValueType(0).SimpleTy) {
534 default:
535 llvm_unreachable("Unrecognized SMULL.");
536 case MVT::v4i32:
537 SMULLOpc = AArch64::UMULLv4i16_indexed;
538 break;
539 case MVT::v2i64:
540 SMULLOpc = AArch64::UMULLv2i32_indexed;
541 break;
542 }
543 } else
544 llvm_unreachable("Unrecognized intrinsic.");
545
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000546 return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
Tim Northover3b0846e2014-05-24 12:50:23 +0000547}
548
549/// Instructions that accept extend modifiers like UXTW expect the register
550/// being extended to be a GPR32, but the incoming DAG might be acting on a
551/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
552/// this is the case.
553static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
554 if (N.getValueType() == MVT::i32)
555 return N;
556
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000557 SDLoc dl(N);
558 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000559 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000560 dl, MVT::i32, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000561 return SDValue(Node, 0);
562}
563
564
565/// SelectArithExtendedRegister - Select a "extended register" operand. This
566/// operand folds in an extend followed by an optional left shift.
567bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
568 SDValue &Shift) {
569 unsigned ShiftVal = 0;
570 AArch64_AM::ShiftExtendType Ext;
571
572 if (N.getOpcode() == ISD::SHL) {
573 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
574 if (!CSD)
575 return false;
576 ShiftVal = CSD->getZExtValue();
577 if (ShiftVal > 4)
578 return false;
579
580 Ext = getExtendTypeForNode(N.getOperand(0));
581 if (Ext == AArch64_AM::InvalidShiftExtend)
582 return false;
583
584 Reg = N.getOperand(0).getOperand(0);
585 } else {
586 Ext = getExtendTypeForNode(N);
587 if (Ext == AArch64_AM::InvalidShiftExtend)
588 return false;
589
590 Reg = N.getOperand(0);
591 }
592
593 // AArch64 mandates that the RHS of the operation must use the smallest
594 // register classs that could contain the size being extended from. Thus,
595 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
596 // there might not be an actual 32-bit value in the program. We can
597 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
598 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
599 Reg = narrowIfNeeded(CurDAG, Reg);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000600 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
601 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000602 return isWorthFolding(N);
603}
604
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000605/// If there's a use of this ADDlow that's not itself a load/store then we'll
606/// need to create a real ADD instruction from it anyway and there's no point in
607/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
608/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
609/// leads to duplaicated ADRP instructions.
610static bool isWorthFoldingADDlow(SDValue N) {
611 for (auto Use : N->uses()) {
612 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
613 Use->getOpcode() != ISD::ATOMIC_LOAD &&
614 Use->getOpcode() != ISD::ATOMIC_STORE)
615 return false;
616
617 // ldar and stlr have much more restrictive addressing modes (just a
618 // register).
619 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
620 return false;
621 }
622
623 return true;
624}
625
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000626/// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit
627/// immediate" address. The "Size" argument is the size in bytes of the memory
628/// reference, which determines the scale.
629bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size,
630 SDValue &Base,
631 SDValue &OffImm) {
632 SDLoc dl(N);
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000633 // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed
634 // selected here doesn't support labels/immediates, only base+offset.
635
636 if (CurDAG->isBaseWithConstantOffset(N)) {
637 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
638 int64_t RHSC = RHS->getSExtValue();
639 unsigned Scale = Log2_32(Size);
640 if ((RHSC & (Size - 1)) == 0 && RHSC >= (-0x40 << Scale) &&
641 RHSC < (0x40 << Scale)) {
642 Base = N.getOperand(0);
643 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
644 return true;
645 }
646 }
647 }
648
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000649 // Base only. The address will be materialized into a register before
650 // the memory is accessed.
651 // add x0, Xbase, #offset
652 // stp x1, x2, [x0]
653 Base = N;
654 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
655 return true;
656}
657
Tim Northover3b0846e2014-05-24 12:50:23 +0000658/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
659/// immediate" address. The "Size" argument is the size in bytes of the memory
660/// reference, which determines the scale.
661bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
662 SDValue &Base, SDValue &OffImm) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000663 SDLoc dl(N);
Mehdi Amini44ede332015-07-09 02:09:04 +0000664 const DataLayout &DL = CurDAG->getDataLayout();
Tim Northover3b0846e2014-05-24 12:50:23 +0000665 const TargetLowering *TLI = getTargetLowering();
666 if (N.getOpcode() == ISD::FrameIndex) {
667 int FI = cast<FrameIndexSDNode>(N)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000668 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000669 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000670 return true;
671 }
672
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000673 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000674 GlobalAddressSDNode *GAN =
675 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
676 Base = N.getOperand(0);
677 OffImm = N.getOperand(1);
678 if (!GAN)
679 return true;
680
681 const GlobalValue *GV = GAN->getGlobal();
682 unsigned Alignment = GV->getAlignment();
Chad Rosier304fe3f2014-06-30 15:03:00 +0000683 Type *Ty = GV->getType()->getElementType();
Tim Northover4a8ac262014-12-02 23:53:43 +0000684 if (Alignment == 0 && Ty->isSized())
Mehdi Amini44ede332015-07-09 02:09:04 +0000685 Alignment = DL.getABITypeAlignment(Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000686
687 if (Alignment >= Size)
688 return true;
689 }
690
691 if (CurDAG->isBaseWithConstantOffset(N)) {
692 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
693 int64_t RHSC = (int64_t)RHS->getZExtValue();
694 unsigned Scale = Log2_32(Size);
695 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
696 Base = N.getOperand(0);
697 if (Base.getOpcode() == ISD::FrameIndex) {
698 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000699 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Tim Northover3b0846e2014-05-24 12:50:23 +0000700 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000701 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000702 return true;
703 }
704 }
705 }
706
707 // Before falling back to our general case, check if the unscaled
708 // instructions can handle this. If so, that's preferable.
709 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
710 return false;
711
712 // Base only. The address will be materialized into a register before
713 // the memory is accessed.
714 // add x0, Xbase, #offset
715 // ldr x0, [x0]
716 Base = N;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000717 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000718 return true;
719}
720
721/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
722/// immediate" address. This should only match when there is an offset that
723/// is not valid for a scaled immediate addressing mode. The "Size" argument
724/// is the size in bytes of the memory reference, which is needed here to know
725/// what is valid for a scaled immediate.
726bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
727 SDValue &Base,
728 SDValue &OffImm) {
729 if (!CurDAG->isBaseWithConstantOffset(N))
730 return false;
731 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
732 int64_t RHSC = RHS->getSExtValue();
733 // If the offset is valid as a scaled immediate, don't match here.
734 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
735 RHSC < (0x1000 << Log2_32(Size)))
736 return false;
737 if (RHSC >= -256 && RHSC < 256) {
738 Base = N.getOperand(0);
739 if (Base.getOpcode() == ISD::FrameIndex) {
740 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
741 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +0000742 Base = CurDAG->getTargetFrameIndex(
743 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Tim Northover3b0846e2014-05-24 12:50:23 +0000744 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000745 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000746 return true;
747 }
748 }
749 return false;
750}
751
752static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000753 SDLoc dl(N);
754 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000755 SDValue ImpDef = SDValue(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000756 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000757 MachineSDNode *Node = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000758 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000759 return SDValue(Node, 0);
760}
761
762/// \brief Check if the given SHL node (\p N), can be used to form an
763/// extended register for an addressing mode.
764bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
765 bool WantExtend, SDValue &Offset,
766 SDValue &SignExtend) {
767 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
768 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
769 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
770 return false;
771
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000772 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000773 if (WantExtend) {
774 AArch64_AM::ShiftExtendType Ext =
775 getExtendTypeForNode(N.getOperand(0), true);
776 if (Ext == AArch64_AM::InvalidShiftExtend)
777 return false;
778
779 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000780 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
781 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000782 } else {
783 Offset = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000784 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000785 }
786
787 unsigned LegalShiftVal = Log2_32(Size);
788 unsigned ShiftVal = CSD->getZExtValue();
789
790 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
791 return false;
792
793 if (isWorthFolding(N))
794 return true;
795
796 return false;
797}
798
799bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
800 SDValue &Base, SDValue &Offset,
801 SDValue &SignExtend,
802 SDValue &DoShift) {
803 if (N.getOpcode() != ISD::ADD)
804 return false;
805 SDValue LHS = N.getOperand(0);
806 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000807 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000808
809 // We don't want to match immediate adds here, because they are better lowered
810 // to the register-immediate addressing modes.
811 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
812 return false;
813
814 // Check if this particular node is reused in any non-memory related
815 // operation. If yes, do not try to fold this node into the address
816 // computation, since the computation will be kept.
817 const SDNode *Node = N.getNode();
818 for (SDNode *UI : Node->uses()) {
819 if (!isa<MemSDNode>(*UI))
820 return false;
821 }
822
823 // Remember if it is worth folding N when it produces extended register.
824 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
825
826 // Try to match a shifted extend on the RHS.
827 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
828 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
829 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000830 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000831 return true;
832 }
833
834 // Try to match a shifted extend on the LHS.
835 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
836 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
837 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000838 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000839 return true;
840 }
841
842 // There was no shift, whatever else we find.
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000843 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000844
845 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
846 // Try to match an unshifted extend on the LHS.
847 if (IsExtendedRegisterWorthFolding &&
848 (Ext = getExtendTypeForNode(LHS, true)) !=
849 AArch64_AM::InvalidShiftExtend) {
850 Base = RHS;
851 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000852 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
853 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000854 if (isWorthFolding(LHS))
855 return true;
856 }
857
858 // Try to match an unshifted extend on the RHS.
859 if (IsExtendedRegisterWorthFolding &&
860 (Ext = getExtendTypeForNode(RHS, true)) !=
861 AArch64_AM::InvalidShiftExtend) {
862 Base = LHS;
863 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000864 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
865 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000866 if (isWorthFolding(RHS))
867 return true;
868 }
869
870 return false;
871}
872
Hao Liu3cb826c2014-10-14 06:50:36 +0000873// Check if the given immediate is preferred by ADD. If an immediate can be
874// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
875// encoded by one MOVZ, return true.
876static bool isPreferredADD(int64_t ImmOff) {
877 // Constant in [0x0, 0xfff] can be encoded in ADD.
878 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
879 return true;
880 // Check if it can be encoded in an "ADD LSL #12".
881 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
882 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
883 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
884 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
885 return false;
886}
887
Tim Northover3b0846e2014-05-24 12:50:23 +0000888bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
889 SDValue &Base, SDValue &Offset,
890 SDValue &SignExtend,
891 SDValue &DoShift) {
892 if (N.getOpcode() != ISD::ADD)
893 return false;
894 SDValue LHS = N.getOperand(0);
895 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000896 SDLoc DL(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000897
Tim Northover3b0846e2014-05-24 12:50:23 +0000898 // Check if this particular node is reused in any non-memory related
899 // operation. If yes, do not try to fold this node into the address
900 // computation, since the computation will be kept.
901 const SDNode *Node = N.getNode();
902 for (SDNode *UI : Node->uses()) {
903 if (!isa<MemSDNode>(*UI))
904 return false;
905 }
906
Hao Liu3cb826c2014-10-14 06:50:36 +0000907 // Watch out if RHS is a wide immediate, it can not be selected into
908 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
909 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
910 // instructions like:
911 // MOV X0, WideImmediate
912 // ADD X1, BaseReg, X0
913 // LDR X2, [X1, 0]
914 // For such situation, using [BaseReg, XReg] addressing mode can save one
915 // ADD/SUB:
916 // MOV X0, WideImmediate
917 // LDR X2, [BaseReg, X0]
918 if (isa<ConstantSDNode>(RHS)) {
Benjamin Kramer619c4e52015-04-10 11:24:51 +0000919 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
Hao Liu3cb826c2014-10-14 06:50:36 +0000920 unsigned Scale = Log2_32(Size);
921 // Skip the immediate can be seleced by load/store addressing mode.
922 // Also skip the immediate can be encoded by a single ADD (SUB is also
923 // checked by using -ImmOff).
924 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
925 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
926 return false;
927
Hao Liu3cb826c2014-10-14 06:50:36 +0000928 SDValue Ops[] = { RHS };
929 SDNode *MOVI =
930 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
931 SDValue MOVIV = SDValue(MOVI, 0);
932 // This ADD of two X register will be selected into [Reg+Reg] mode.
933 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
934 }
935
Tim Northover3b0846e2014-05-24 12:50:23 +0000936 // Remember if it is worth folding N when it produces extended register.
937 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
938
939 // Try to match a shifted extend on the RHS.
940 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
941 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
942 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000943 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000944 return true;
945 }
946
947 // Try to match a shifted extend on the LHS.
948 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
949 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
950 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000951 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000952 return true;
953 }
954
955 // Match any non-shifted, non-extend, non-immediate add expression.
956 Base = LHS;
957 Offset = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000958 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
959 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000960 // Reg1 + Reg2 is free: no check needed.
961 return true;
962}
963
964SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +0000965 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +0000966 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +0000967 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
968 AArch64::dsub2, AArch64::dsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +0000969
970 return createTuple(Regs, RegClassIDs, SubRegs);
971}
972
973SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +0000974 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +0000975 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +0000976 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
977 AArch64::qsub2, AArch64::qsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +0000978
979 return createTuple(Regs, RegClassIDs, SubRegs);
980}
981
982SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
Benjamin Kramerea68a942015-02-19 15:26:17 +0000983 const unsigned RegClassIDs[],
984 const unsigned SubRegs[]) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000985 // There's no special register-class for a vector-list of 1 element: it's just
986 // a vector.
987 if (Regs.size() == 1)
988 return Regs[0];
989
990 assert(Regs.size() >= 2 && Regs.size() <= 4);
991
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000992 SDLoc DL(Regs[0]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000993
994 SmallVector<SDValue, 4> Ops;
995
996 // First operand of REG_SEQUENCE is the desired RegClass.
997 Ops.push_back(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000998 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +0000999
1000 // Then we get pairs of source & subregister-position for the components.
1001 for (unsigned i = 0; i < Regs.size(); ++i) {
1002 Ops.push_back(Regs[i]);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001003 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +00001004 }
1005
1006 SDNode *N =
1007 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1008 return SDValue(N, 0);
1009}
1010
1011SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
1012 unsigned Opc, bool isExt) {
1013 SDLoc dl(N);
1014 EVT VT = N->getValueType(0);
1015
1016 unsigned ExtOff = isExt;
1017
1018 // Form a REG_SEQUENCE to force register allocation.
1019 unsigned Vec0Off = ExtOff + 1;
1020 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1021 N->op_begin() + Vec0Off + NumVecs);
1022 SDValue RegSeq = createQTuple(Regs);
1023
1024 SmallVector<SDValue, 6> Ops;
1025 if (isExt)
1026 Ops.push_back(N->getOperand(1));
1027 Ops.push_back(RegSeq);
1028 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
1029 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
1030}
1031
1032SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
1033 LoadSDNode *LD = cast<LoadSDNode>(N);
1034 if (LD->isUnindexed())
1035 return nullptr;
1036 EVT VT = LD->getMemoryVT();
1037 EVT DstVT = N->getValueType(0);
1038 ISD::MemIndexedMode AM = LD->getAddressingMode();
1039 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1040
1041 // We're not doing validity checking here. That was done when checking
1042 // if we should mark the load as indexed or not. We're just selecting
1043 // the right instruction.
1044 unsigned Opcode = 0;
1045
1046 ISD::LoadExtType ExtType = LD->getExtensionType();
1047 bool InsertTo64 = false;
1048 if (VT == MVT::i64)
1049 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1050 else if (VT == MVT::i32) {
1051 if (ExtType == ISD::NON_EXTLOAD)
1052 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1053 else if (ExtType == ISD::SEXTLOAD)
1054 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1055 else {
1056 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1057 InsertTo64 = true;
1058 // The result of the load is only i32. It's the subreg_to_reg that makes
1059 // it into an i64.
1060 DstVT = MVT::i32;
1061 }
1062 } else if (VT == MVT::i16) {
1063 if (ExtType == ISD::SEXTLOAD) {
1064 if (DstVT == MVT::i64)
1065 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1066 else
1067 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1068 } else {
1069 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1070 InsertTo64 = DstVT == MVT::i64;
1071 // The result of the load is only i32. It's the subreg_to_reg that makes
1072 // it into an i64.
1073 DstVT = MVT::i32;
1074 }
1075 } else if (VT == MVT::i8) {
1076 if (ExtType == ISD::SEXTLOAD) {
1077 if (DstVT == MVT::i64)
1078 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1079 else
1080 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1081 } else {
1082 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1083 InsertTo64 = DstVT == MVT::i64;
1084 // The result of the load is only i32. It's the subreg_to_reg that makes
1085 // it into an i64.
1086 DstVT = MVT::i32;
1087 }
Ahmed Bougachae0e12db2015-08-04 01:29:38 +00001088 } else if (VT == MVT::f16) {
1089 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +00001090 } else if (VT == MVT::f32) {
1091 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1092 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1093 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1094 } else if (VT.is128BitVector()) {
1095 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1096 } else
1097 return nullptr;
1098 SDValue Chain = LD->getChain();
1099 SDValue Base = LD->getBasePtr();
1100 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1101 int OffsetVal = (int)OffsetOp->getZExtValue();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001102 SDLoc dl(N);
1103 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +00001104 SDValue Ops[] = { Base, Offset, Chain };
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001105 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
Tim Northover3b0846e2014-05-24 12:50:23 +00001106 MVT::Other, Ops);
1107 // Either way, we're replacing the node, so tell the caller that.
1108 Done = true;
1109 SDValue LoadedVal = SDValue(Res, 1);
1110 if (InsertTo64) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001111 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001112 LoadedVal =
1113 SDValue(CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001114 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1115 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1116 SubReg),
Tim Northover3b0846e2014-05-24 12:50:23 +00001117 0);
1118 }
1119
1120 ReplaceUses(SDValue(N, 0), LoadedVal);
1121 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1122 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1123
1124 return nullptr;
1125}
1126
1127SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1128 unsigned Opc, unsigned SubRegIdx) {
1129 SDLoc dl(N);
1130 EVT VT = N->getValueType(0);
1131 SDValue Chain = N->getOperand(0);
1132
Benjamin Kramerea68a942015-02-19 15:26:17 +00001133 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1134 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001135
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001136 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001137
1138 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1139 SDValue SuperReg = SDValue(Ld, 0);
1140 for (unsigned i = 0; i < NumVecs; ++i)
1141 ReplaceUses(SDValue(N, i),
1142 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1143
1144 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1145 return nullptr;
1146}
1147
1148SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1149 unsigned Opc, unsigned SubRegIdx) {
1150 SDLoc dl(N);
1151 EVT VT = N->getValueType(0);
1152 SDValue Chain = N->getOperand(0);
1153
Benjamin Kramerea68a942015-02-19 15:26:17 +00001154 SDValue Ops[] = {N->getOperand(1), // Mem operand
1155 N->getOperand(2), // Incremental
1156 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001157
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001158 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1159 MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001160
1161 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1162
1163 // Update uses of write back register
1164 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1165
1166 // Update uses of vector list
1167 SDValue SuperReg = SDValue(Ld, 1);
1168 if (NumVecs == 1)
1169 ReplaceUses(SDValue(N, 0), SuperReg);
1170 else
1171 for (unsigned i = 0; i < NumVecs; ++i)
1172 ReplaceUses(SDValue(N, i),
1173 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1174
1175 // Update the chain
1176 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1177 return nullptr;
1178}
1179
1180SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1181 unsigned Opc) {
1182 SDLoc dl(N);
1183 EVT VT = N->getOperand(2)->getValueType(0);
1184
1185 // Form a REG_SEQUENCE to force register allocation.
1186 bool Is128Bit = VT.getSizeInBits() == 128;
1187 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1188 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1189
Benjamin Kramerea68a942015-02-19 15:26:17 +00001190 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001191 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1192
1193 return St;
1194}
1195
1196SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1197 unsigned Opc) {
1198 SDLoc dl(N);
1199 EVT VT = N->getOperand(2)->getValueType(0);
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001200 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1201 MVT::Other}; // Type for the Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001202
1203 // Form a REG_SEQUENCE to force register allocation.
1204 bool Is128Bit = VT.getSizeInBits() == 128;
1205 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1206 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1207
Benjamin Kramerea68a942015-02-19 15:26:17 +00001208 SDValue Ops[] = {RegSeq,
1209 N->getOperand(NumVecs + 1), // base register
1210 N->getOperand(NumVecs + 2), // Incremental
1211 N->getOperand(0)}; // Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001212 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1213
1214 return St;
1215}
1216
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001217namespace {
Tim Northover3b0846e2014-05-24 12:50:23 +00001218/// WidenVector - Given a value in the V64 register class, produce the
1219/// equivalent value in the V128 register class.
1220class WidenVector {
1221 SelectionDAG &DAG;
1222
1223public:
1224 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1225
1226 SDValue operator()(SDValue V64Reg) {
1227 EVT VT = V64Reg.getValueType();
1228 unsigned NarrowSize = VT.getVectorNumElements();
1229 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1230 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1231 SDLoc DL(V64Reg);
1232
1233 SDValue Undef =
1234 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1235 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1236 }
1237};
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001238} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +00001239
1240/// NarrowVector - Given a value in the V128 register class, produce the
1241/// equivalent value in the V64 register class.
1242static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1243 EVT VT = V128Reg.getValueType();
1244 unsigned WideSize = VT.getVectorNumElements();
1245 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1246 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1247
1248 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1249 V128Reg);
1250}
1251
1252SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1253 unsigned Opc) {
1254 SDLoc dl(N);
1255 EVT VT = N->getValueType(0);
1256 bool Narrow = VT.getSizeInBits() == 64;
1257
1258 // Form a REG_SEQUENCE to force register allocation.
1259 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1260
1261 if (Narrow)
1262 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1263 WidenVector(*CurDAG));
1264
1265 SDValue RegSeq = createQTuple(Regs);
1266
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001267 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001268
1269 unsigned LaneNo =
1270 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1271
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001272 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001273 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001274 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1275 SDValue SuperReg = SDValue(Ld, 0);
1276
1277 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1278 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1279 AArch64::qsub3 };
1280 for (unsigned i = 0; i < NumVecs; ++i) {
1281 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1282 if (Narrow)
1283 NV = NarrowVector(NV, *CurDAG);
1284 ReplaceUses(SDValue(N, i), NV);
1285 }
1286
1287 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1288
1289 return Ld;
1290}
1291
1292SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1293 unsigned Opc) {
1294 SDLoc dl(N);
1295 EVT VT = N->getValueType(0);
1296 bool Narrow = VT.getSizeInBits() == 64;
1297
1298 // Form a REG_SEQUENCE to force register allocation.
1299 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1300
1301 if (Narrow)
1302 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1303 WidenVector(*CurDAG));
1304
1305 SDValue RegSeq = createQTuple(Regs);
1306
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001307 const EVT ResTys[] = {MVT::i64, // Type of the write back register
Ahmed Bougachae14a4d42015-04-17 23:43:33 +00001308 RegSeq->getValueType(0), MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001309
1310 unsigned LaneNo =
1311 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1312
Benjamin Kramerea68a942015-02-19 15:26:17 +00001313 SDValue Ops[] = {RegSeq,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001314 CurDAG->getTargetConstant(LaneNo, dl,
1315 MVT::i64), // Lane Number
Benjamin Kramerea68a942015-02-19 15:26:17 +00001316 N->getOperand(NumVecs + 2), // Base register
1317 N->getOperand(NumVecs + 3), // Incremental
1318 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001319 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1320
1321 // Update uses of the write back register
1322 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1323
1324 // Update uses of the vector list
1325 SDValue SuperReg = SDValue(Ld, 1);
1326 if (NumVecs == 1) {
1327 ReplaceUses(SDValue(N, 0),
1328 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1329 } else {
1330 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1331 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1332 AArch64::qsub3 };
1333 for (unsigned i = 0; i < NumVecs; ++i) {
1334 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1335 SuperReg);
1336 if (Narrow)
1337 NV = NarrowVector(NV, *CurDAG);
1338 ReplaceUses(SDValue(N, i), NV);
1339 }
1340 }
1341
1342 // Update the Chain
1343 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1344
1345 return Ld;
1346}
1347
1348SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1349 unsigned Opc) {
1350 SDLoc dl(N);
1351 EVT VT = N->getOperand(2)->getValueType(0);
1352 bool Narrow = VT.getSizeInBits() == 64;
1353
1354 // Form a REG_SEQUENCE to force register allocation.
1355 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1356
1357 if (Narrow)
1358 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1359 WidenVector(*CurDAG));
1360
1361 SDValue RegSeq = createQTuple(Regs);
1362
1363 unsigned LaneNo =
1364 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1365
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001366 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001367 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001368 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1369
1370 // Transfer memoperands.
1371 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1372 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1373 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1374
1375 return St;
1376}
1377
1378SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1379 unsigned Opc) {
1380 SDLoc dl(N);
1381 EVT VT = N->getOperand(2)->getValueType(0);
1382 bool Narrow = VT.getSizeInBits() == 64;
1383
1384 // Form a REG_SEQUENCE to force register allocation.
1385 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1386
1387 if (Narrow)
1388 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1389 WidenVector(*CurDAG));
1390
1391 SDValue RegSeq = createQTuple(Regs);
1392
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001393 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1394 MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001395
1396 unsigned LaneNo =
1397 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1398
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001399 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001400 N->getOperand(NumVecs + 2), // Base Register
1401 N->getOperand(NumVecs + 3), // Incremental
1402 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001403 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1404
1405 // Transfer memoperands.
1406 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1407 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1408 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1409
1410 return St;
1411}
1412
1413static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1414 unsigned &Opc, SDValue &Opd0,
1415 unsigned &LSB, unsigned &MSB,
1416 unsigned NumberOfIgnoredLowBits,
1417 bool BiggerPattern) {
1418 assert(N->getOpcode() == ISD::AND &&
1419 "N must be a AND operation to call this function");
1420
1421 EVT VT = N->getValueType(0);
1422
1423 // Here we can test the type of VT and return false when the type does not
1424 // match, but since it is done prior to that call in the current context
1425 // we turned that into an assert to avoid redundant code.
1426 assert((VT == MVT::i32 || VT == MVT::i64) &&
1427 "Type checking must have been done before calling this function");
1428
1429 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1430 // changed the AND node to a 32-bit mask operation. We'll have to
1431 // undo that as part of the transform here if we want to catch all
1432 // the opportunities.
1433 // Currently the NumberOfIgnoredLowBits argument helps to recover
1434 // form these situations when matching bigger pattern (bitfield insert).
1435
1436 // For unsigned extracts, check for a shift right and mask
1437 uint64_t And_imm = 0;
1438 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1439 return false;
1440
1441 const SDNode *Op0 = N->getOperand(0).getNode();
1442
1443 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1444 // simplified. Try to undo that
1445 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1446
1447 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1448 if (And_imm & (And_imm + 1))
1449 return false;
1450
1451 bool ClampMSB = false;
1452 uint64_t Srl_imm = 0;
1453 // Handle the SRL + ANY_EXTEND case.
1454 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1455 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1456 // Extend the incoming operand of the SRL to 64-bit.
1457 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1458 // Make sure to clamp the MSB so that we preserve the semantics of the
1459 // original operations.
1460 ClampMSB = true;
1461 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1462 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1463 Srl_imm)) {
1464 // If the shift result was truncated, we can still combine them.
1465 Opd0 = Op0->getOperand(0).getOperand(0);
1466
1467 // Use the type of SRL node.
1468 VT = Opd0->getValueType(0);
1469 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1470 Opd0 = Op0->getOperand(0);
1471 } else if (BiggerPattern) {
1472 // Let's pretend a 0 shift right has been performed.
1473 // The resulting code will be at least as good as the original one
1474 // plus it may expose more opportunities for bitfield insert pattern.
1475 // FIXME: Currently we limit this to the bigger pattern, because
1476 // some optimizations expect AND and not UBFM
1477 Opd0 = N->getOperand(0);
1478 } else
1479 return false;
1480
Matthias Braun75260352015-02-24 18:52:04 +00001481 // Bail out on large immediates. This happens when no proper
1482 // combining/constant folding was performed.
Matthias Braun02892ec2015-02-25 18:03:50 +00001483 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1484 DEBUG((dbgs() << N
1485 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001486 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001487 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001488
1489 LSB = Srl_imm;
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001490 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1491 : countTrailingOnes<uint64_t>(And_imm)) -
Tim Northover3b0846e2014-05-24 12:50:23 +00001492 1;
1493 if (ClampMSB)
1494 // Since we're moving the extend before the right shift operation, we need
1495 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1496 // the zeros which would get shifted in with the original right shift
1497 // operation.
1498 MSB = MSB > 31 ? 31 : MSB;
1499
1500 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1501 return true;
1502}
1503
David Xu052b9d92014-09-02 09:33:56 +00001504static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1505 SDValue &Opd0, unsigned &LSB,
1506 unsigned &MSB) {
1507 // We are looking for the following pattern which basically extracts several
1508 // continuous bits from the source value and places it from the LSB of the
1509 // destination value, all other bits of the destination value or set to zero:
Tim Northover3b0846e2014-05-24 12:50:23 +00001510 //
1511 // Value2 = AND Value, MaskImm
1512 // SRL Value2, ShiftImm
1513 //
David Xu052b9d92014-09-02 09:33:56 +00001514 // with MaskImm >> ShiftImm to search for the bit width.
Tim Northover3b0846e2014-05-24 12:50:23 +00001515 //
1516 // This gets selected into a single UBFM:
1517 //
David Xu052b9d92014-09-02 09:33:56 +00001518 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
Tim Northover3b0846e2014-05-24 12:50:23 +00001519 //
1520
1521 if (N->getOpcode() != ISD::SRL)
1522 return false;
1523
1524 uint64_t And_mask = 0;
1525 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1526 return false;
1527
1528 Opd0 = N->getOperand(0).getOperand(0);
1529
1530 uint64_t Srl_imm = 0;
1531 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1532 return false;
1533
David Xu052b9d92014-09-02 09:33:56 +00001534 // Check whether we really have several bits extract here.
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001535 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
David Xu052b9d92014-09-02 09:33:56 +00001536 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001537 if (N->getValueType(0) == MVT::i32)
1538 Opc = AArch64::UBFMWri;
1539 else
1540 Opc = AArch64::UBFMXri;
1541
David Xu052b9d92014-09-02 09:33:56 +00001542 LSB = Srl_imm;
1543 MSB = BitWide + Srl_imm - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001544 return true;
1545 }
1546
1547 return false;
1548}
1549
1550static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001551 unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001552 bool BiggerPattern) {
1553 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1554 "N must be a SHR/SRA operation to call this function");
1555
1556 EVT VT = N->getValueType(0);
1557
1558 // Here we can test the type of VT and return false when the type does not
1559 // match, but since it is done prior to that call in the current context
1560 // we turned that into an assert to avoid redundant code.
1561 assert((VT == MVT::i32 || VT == MVT::i64) &&
1562 "Type checking must have been done before calling this function");
1563
David Xu052b9d92014-09-02 09:33:56 +00001564 // Check for AND + SRL doing several bits extract.
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001565 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
Tim Northover3b0846e2014-05-24 12:50:23 +00001566 return true;
1567
1568 // we're looking for a shift of a shift
1569 uint64_t Shl_imm = 0;
1570 uint64_t Trunc_bits = 0;
1571 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1572 Opd0 = N->getOperand(0).getOperand(0);
1573 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1574 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1575 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1576 // be considered as setting high 32 bits as zero. Our strategy here is to
1577 // always generate 64bit UBFM. This consistency will help the CSE pass
1578 // later find more redundancy.
1579 Opd0 = N->getOperand(0).getOperand(0);
1580 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1581 VT = Opd0->getValueType(0);
1582 assert(VT == MVT::i64 && "the promoted type should be i64");
1583 } else if (BiggerPattern) {
1584 // Let's pretend a 0 shift left has been performed.
1585 // FIXME: Currently we limit this to the bigger pattern case,
1586 // because some optimizations expect AND and not UBFM
1587 Opd0 = N->getOperand(0);
1588 } else
1589 return false;
1590
Matthias Braun75260352015-02-24 18:52:04 +00001591 // Missing combines/constant folding may have left us with strange
1592 // constants.
Matthias Braun02892ec2015-02-25 18:03:50 +00001593 if (Shl_imm >= VT.getSizeInBits()) {
1594 DEBUG((dbgs() << N
1595 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001596 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001597 }
Matthias Braun75260352015-02-24 18:52:04 +00001598
Tim Northover3b0846e2014-05-24 12:50:23 +00001599 uint64_t Srl_imm = 0;
1600 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1601 return false;
1602
1603 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1604 "bad amount in shift node!");
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001605 int immr = Srl_imm - Shl_imm;
1606 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
1607 Imms = VT.getSizeInBits() - Shl_imm - Trunc_bits - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001608 // SRA requires a signed extraction
1609 if (VT == MVT::i32)
1610 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1611 else
1612 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1613 return true;
1614}
1615
1616static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001617 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001618 unsigned NumberOfIgnoredLowBits = 0,
1619 bool BiggerPattern = false) {
1620 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1621 return false;
1622
1623 switch (N->getOpcode()) {
1624 default:
1625 if (!N->isMachineOpcode())
1626 return false;
1627 break;
1628 case ISD::AND:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001629 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001630 NumberOfIgnoredLowBits, BiggerPattern);
1631 case ISD::SRL:
1632 case ISD::SRA:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001633 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
Tim Northover3b0846e2014-05-24 12:50:23 +00001634 }
1635
1636 unsigned NOpc = N->getMachineOpcode();
1637 switch (NOpc) {
1638 default:
1639 return false;
1640 case AArch64::SBFMWri:
1641 case AArch64::UBFMWri:
1642 case AArch64::SBFMXri:
1643 case AArch64::UBFMXri:
1644 Opc = NOpc;
1645 Opd0 = N->getOperand(0);
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001646 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1647 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
Tim Northover3b0846e2014-05-24 12:50:23 +00001648 return true;
1649 }
1650 // Unreachable
1651 return false;
1652}
1653
1654SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001655 unsigned Opc, Immr, Imms;
Tim Northover3b0846e2014-05-24 12:50:23 +00001656 SDValue Opd0;
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001657 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
Tim Northover3b0846e2014-05-24 12:50:23 +00001658 return nullptr;
1659
1660 EVT VT = N->getValueType(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001661 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001662
1663 // If the bit extract operation is 64bit but the original type is 32bit, we
1664 // need to add one EXTRACT_SUBREG.
1665 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001666 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
1667 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001668
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001669 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1670 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001671 MachineSDNode *Node =
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001672 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
Tim Northover3b0846e2014-05-24 12:50:23 +00001673 SDValue(BFM, 0), SubReg);
1674 return Node;
1675 }
1676
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001677 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1678 CurDAG->getTargetConstant(Imms, dl, VT)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001679 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1680}
1681
1682/// Does DstMask form a complementary pair with the mask provided by
1683/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1684/// this asks whether DstMask zeroes precisely those bits that will be set by
1685/// the other half.
1686static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1687 unsigned NumberOfIgnoredHighBits, EVT VT) {
1688 assert((VT == MVT::i32 || VT == MVT::i64) &&
1689 "i32 or i64 mask type expected!");
1690 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1691
1692 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1693 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1694
1695 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1696 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1697}
1698
1699// Look for bits that will be useful for later uses.
1700// A bit is consider useless as soon as it is dropped and never used
1701// before it as been dropped.
1702// E.g., looking for useful bit of x
1703// 1. y = x & 0x7
1704// 2. z = y >> 2
1705// After #1, x useful bits are 0x7, then the useful bits of x, live through
1706// y.
1707// After #2, the useful bits of x are 0x4.
1708// However, if x is used on an unpredicatable instruction, then all its bits
1709// are useful.
1710// E.g.
1711// 1. y = x & 0x7
1712// 2. z = y >> 2
1713// 3. str x, [@x]
1714static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1715
1716static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1717 unsigned Depth) {
1718 uint64_t Imm =
1719 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1720 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1721 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1722 getUsefulBits(Op, UsefulBits, Depth + 1);
1723}
1724
1725static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1726 uint64_t Imm, uint64_t MSB,
1727 unsigned Depth) {
1728 // inherit the bitwidth value
1729 APInt OpUsefulBits(UsefulBits);
1730 OpUsefulBits = 1;
1731
1732 if (MSB >= Imm) {
1733 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1734 --OpUsefulBits;
1735 // The interesting part will be in the lower part of the result
1736 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1737 // The interesting part was starting at Imm in the argument
1738 OpUsefulBits = OpUsefulBits.shl(Imm);
1739 } else {
1740 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1741 --OpUsefulBits;
1742 // The interesting part will be shifted in the result
1743 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1744 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1745 // The interesting part was at zero in the argument
1746 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1747 }
1748
1749 UsefulBits &= OpUsefulBits;
1750}
1751
1752static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1753 unsigned Depth) {
1754 uint64_t Imm =
1755 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1756 uint64_t MSB =
1757 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1758
1759 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1760}
1761
1762static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1763 unsigned Depth) {
1764 uint64_t ShiftTypeAndValue =
1765 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1766 APInt Mask(UsefulBits);
1767 Mask.clearAllBits();
1768 Mask.flipAllBits();
1769
1770 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1771 // Shift Left
1772 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1773 Mask = Mask.shl(ShiftAmt);
1774 getUsefulBits(Op, Mask, Depth + 1);
1775 Mask = Mask.lshr(ShiftAmt);
1776 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1777 // Shift Right
1778 // We do not handle AArch64_AM::ASR, because the sign will change the
1779 // number of useful bits
1780 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1781 Mask = Mask.lshr(ShiftAmt);
1782 getUsefulBits(Op, Mask, Depth + 1);
1783 Mask = Mask.shl(ShiftAmt);
1784 } else
1785 return;
1786
1787 UsefulBits &= Mask;
1788}
1789
1790static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1791 unsigned Depth) {
1792 uint64_t Imm =
1793 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1794 uint64_t MSB =
1795 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1796
1797 if (Op.getOperand(1) == Orig)
1798 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1799
1800 APInt OpUsefulBits(UsefulBits);
1801 OpUsefulBits = 1;
1802
1803 if (MSB >= Imm) {
1804 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1805 --OpUsefulBits;
1806 UsefulBits &= ~OpUsefulBits;
1807 getUsefulBits(Op, UsefulBits, Depth + 1);
1808 } else {
1809 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1810 --OpUsefulBits;
1811 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1812 getUsefulBits(Op, UsefulBits, Depth + 1);
1813 }
1814}
1815
1816static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1817 SDValue Orig, unsigned Depth) {
1818
1819 // Users of this node should have already been instruction selected
1820 // FIXME: Can we turn that into an assert?
1821 if (!UserNode->isMachineOpcode())
1822 return;
1823
1824 switch (UserNode->getMachineOpcode()) {
1825 default:
1826 return;
1827 case AArch64::ANDSWri:
1828 case AArch64::ANDSXri:
1829 case AArch64::ANDWri:
1830 case AArch64::ANDXri:
1831 // We increment Depth only when we call the getUsefulBits
1832 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1833 Depth);
1834 case AArch64::UBFMWri:
1835 case AArch64::UBFMXri:
1836 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1837
1838 case AArch64::ORRWrs:
1839 case AArch64::ORRXrs:
1840 if (UserNode->getOperand(1) != Orig)
1841 return;
1842 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1843 Depth);
1844 case AArch64::BFMWri:
1845 case AArch64::BFMXri:
1846 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1847 }
1848}
1849
1850static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1851 if (Depth >= 6)
1852 return;
1853 // Initialize UsefulBits
1854 if (!Depth) {
1855 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1856 // At the beginning, assume every produced bits is useful
1857 UsefulBits = APInt(Bitwidth, 0);
1858 UsefulBits.flipAllBits();
1859 }
1860 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1861
1862 for (SDNode *Node : Op.getNode()->uses()) {
1863 // A use cannot produce useful bits
1864 APInt UsefulBitsForUse = APInt(UsefulBits);
1865 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1866 UsersUsefulBits |= UsefulBitsForUse;
1867 }
1868 // UsefulBits contains the produced bits that are meaningful for the
1869 // current definition, thus a user cannot make a bit meaningful at
1870 // this point
1871 UsefulBits &= UsersUsefulBits;
1872}
1873
1874/// Create a machine node performing a notional SHL of Op by ShlAmount. If
1875/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1876/// 0, return Op unchanged.
1877static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1878 if (ShlAmount == 0)
1879 return Op;
1880
1881 EVT VT = Op.getValueType();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001882 SDLoc dl(Op);
Tim Northover3b0846e2014-05-24 12:50:23 +00001883 unsigned BitWidth = VT.getSizeInBits();
1884 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1885
1886 SDNode *ShiftNode;
1887 if (ShlAmount > 0) {
1888 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1889 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001890 UBFMOpc, dl, VT, Op,
1891 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
1892 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00001893 } else {
1894 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1895 assert(ShlAmount < 0 && "expected right shift");
1896 int ShrAmount = -ShlAmount;
1897 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001898 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
1899 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00001900 }
1901
1902 return SDValue(ShiftNode, 0);
1903}
1904
1905/// Does this tree qualify as an attempt to move a bitfield into position,
1906/// essentially "(and (shl VAL, N), Mask)".
1907static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1908 SDValue &Src, int &ShiftAmount,
1909 int &MaskWidth) {
1910 EVT VT = Op.getValueType();
1911 unsigned BitWidth = VT.getSizeInBits();
1912 (void)BitWidth;
1913 assert(BitWidth == 32 || BitWidth == 64);
1914
1915 APInt KnownZero, KnownOne;
1916 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1917
1918 // Non-zero in the sense that they're not provably zero, which is the key
1919 // point if we want to use this value
1920 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1921
1922 // Discard a constant AND mask if present. It's safe because the node will
1923 // already have been factored into the computeKnownBits calculation above.
1924 uint64_t AndImm;
1925 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1926 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1927 Op = Op.getOperand(0);
1928 }
1929
1930 uint64_t ShlImm;
1931 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1932 return false;
1933 Op = Op.getOperand(0);
1934
1935 if (!isShiftedMask_64(NonZeroBits))
1936 return false;
1937
1938 ShiftAmount = countTrailingZeros(NonZeroBits);
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001939 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
Tim Northover3b0846e2014-05-24 12:50:23 +00001940
1941 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1942 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1943 // amount.
1944 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1945
1946 return true;
1947}
1948
1949// Given a OR operation, check if we have the following pattern
1950// ubfm c, b, imm, imm2 (or something that does the same jobs, see
1951// isBitfieldExtractOp)
1952// d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1953// countTrailingZeros(mask2) == imm2 - imm + 1
1954// f = d | c
1955// if yes, given reference arguments will be update so that one can replace
1956// the OR instruction with:
1957// f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1958static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1959 SDValue &Src, unsigned &ImmR,
1960 unsigned &ImmS, SelectionDAG *CurDAG) {
1961 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1962
1963 // Set Opc
1964 EVT VT = N->getValueType(0);
1965 if (VT == MVT::i32)
1966 Opc = AArch64::BFMWri;
1967 else if (VT == MVT::i64)
1968 Opc = AArch64::BFMXri;
1969 else
1970 return false;
1971
1972 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1973 // have the expected shape. Try to undo that.
1974 APInt UsefulBits;
1975 getUsefulBits(SDValue(N, 0), UsefulBits);
1976
1977 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1978 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1979
1980 // OR is commutative, check both possibilities (does llvm provide a
1981 // way to do that directely, e.g., via code matcher?)
1982 SDValue OrOpd1Val = N->getOperand(1);
1983 SDNode *OrOpd0 = N->getOperand(0).getNode();
1984 SDNode *OrOpd1 = N->getOperand(1).getNode();
1985 for (int i = 0; i < 2;
1986 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1987 unsigned BFXOpc;
1988 int DstLSB, Width;
1989 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1990 NumberOfIgnoredLowBits, true)) {
1991 // Check that the returned opcode is compatible with the pattern,
1992 // i.e., same type and zero extended (U and not S)
1993 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
1994 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
1995 continue;
1996
1997 // Compute the width of the bitfield insertion
1998 DstLSB = 0;
1999 Width = ImmS - ImmR + 1;
2000 // FIXME: This constraint is to catch bitfield insertion we may
2001 // want to widen the pattern if we want to grab general bitfied
2002 // move case
2003 if (Width <= 0)
2004 continue;
2005
2006 // If the mask on the insertee is correct, we have a BFXIL operation. We
2007 // can share the ImmR and ImmS values from the already-computed UBFM.
2008 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
2009 DstLSB, Width)) {
2010 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2011 ImmS = Width - 1;
2012 } else
2013 continue;
2014
2015 // Check the second part of the pattern
2016 EVT VT = OrOpd1->getValueType(0);
2017 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2018
2019 // Compute the Known Zero for the candidate of the first operand.
2020 // This allows to catch more general case than just looking for
2021 // AND with imm. Indeed, simplify-demanded-bits may have removed
2022 // the AND instruction because it proves it was useless.
2023 APInt KnownZero, KnownOne;
2024 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
2025
2026 // Check if there is enough room for the second operand to appear
2027 // in the first one
2028 APInt BitsToBeInserted =
2029 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
2030
2031 if ((BitsToBeInserted & ~KnownZero) != 0)
2032 continue;
2033
2034 // Set the first operand
2035 uint64_t Imm;
2036 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2037 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2038 // In that case, we can eliminate the AND
2039 Dst = OrOpd1->getOperand(0);
2040 else
2041 // Maybe the AND has been removed by simplify-demanded-bits
2042 // or is useful because it discards more bits
2043 Dst = OrOpd1Val;
2044
2045 // both parts match
2046 return true;
2047 }
2048
2049 return false;
2050}
2051
2052SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
2053 if (N->getOpcode() != ISD::OR)
2054 return nullptr;
2055
2056 unsigned Opc;
2057 unsigned LSB, MSB;
2058 SDValue Opd0, Opd1;
2059
2060 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
2061 return nullptr;
2062
2063 EVT VT = N->getValueType(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002064 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00002065 SDValue Ops[] = { Opd0,
2066 Opd1,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002067 CurDAG->getTargetConstant(LSB, dl, VT),
2068 CurDAG->getTargetConstant(MSB, dl, VT) };
Tim Northover3b0846e2014-05-24 12:50:23 +00002069 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2070}
2071
Geoff Berryc573bf7a2015-07-28 15:24:10 +00002072/// GenerateInexactFlagIfNeeded - Insert FRINTX instruction to generate inexact
2073/// signal on round-to-integer operations if needed. C11 leaves it
2074/// implementation-defined whether these operations trigger an inexact
2075/// exception. IEEE says they don't. Unfortunately, Darwin decided they do so
2076/// we sometimes have to insert a special instruction just to set the right bit
2077/// in FPSR.
2078SDNode *AArch64DAGToDAGISel::GenerateInexactFlagIfNeeded(const SDValue &In,
2079 unsigned InTyVariant,
2080 SDLoc DL) {
2081 if (Subtarget->isTargetDarwin() && !TM.Options.UnsafeFPMath) {
2082 // Pick the right FRINTX using InTyVariant needed to set the flags.
2083 // InTyVariant is 0 for 32-bit and 1 for 64-bit.
2084 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
2085 return CurDAG->getMachineNode(FRINTXOpcs[InTyVariant], DL,
2086 In.getValueType(), MVT::Glue, In);
2087 }
2088 return nullptr;
2089}
2090
Tim Northover3b0846e2014-05-24 12:50:23 +00002091SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
2092 EVT VT = N->getValueType(0);
2093 unsigned Variant;
2094 unsigned Opc;
Tim Northover3b0846e2014-05-24 12:50:23 +00002095
2096 if (VT == MVT::f32) {
2097 Variant = 0;
2098 } else if (VT == MVT::f64) {
2099 Variant = 1;
2100 } else
2101 return nullptr; // Unrecognized argument type. Fall back on default codegen.
2102
Tim Northover3b0846e2014-05-24 12:50:23 +00002103 switch (N->getOpcode()) {
2104 default:
2105 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
2106 case ISD::FCEIL: {
2107 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2108 Opc = FRINTPOpcs[Variant];
2109 break;
2110 }
2111 case ISD::FFLOOR: {
2112 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2113 Opc = FRINTMOpcs[Variant];
2114 break;
2115 }
2116 case ISD::FTRUNC: {
2117 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2118 Opc = FRINTZOpcs[Variant];
2119 break;
2120 }
2121 case ISD::FROUND: {
2122 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2123 Opc = FRINTAOpcs[Variant];
2124 break;
2125 }
2126 }
2127
2128 SDLoc dl(N);
2129 SDValue In = N->getOperand(0);
2130 SmallVector<SDValue, 2> Ops;
2131 Ops.push_back(In);
2132
Geoff Berryc573bf7a2015-07-28 15:24:10 +00002133 if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, Variant, dl))
2134 Ops.push_back(SDValue(FRINTXNode, 1));
Tim Northover3b0846e2014-05-24 12:50:23 +00002135
2136 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2137}
2138
Geoff Berryc573bf7a2015-07-28 15:24:10 +00002139/// SelectFPConvertWithRound - Try to combine FP rounding and
2140/// FP-INT conversion.
2141SDNode *AArch64DAGToDAGISel::SelectFPConvertWithRound(SDNode *N) {
2142 SDNode *Op0 = N->getOperand(0).getNode();
2143
2144 // Return if the round op is used by other nodes, as this would result in two
2145 // FRINTX, one each for round and convert.
2146 if (!Op0->hasOneUse())
2147 return nullptr;
2148
2149 unsigned InTyVariant;
2150 EVT InTy = Op0->getValueType(0);
2151 if (InTy == MVT::f32)
2152 InTyVariant = 0;
2153 else if (InTy == MVT::f64)
2154 InTyVariant = 1;
2155 else
2156 return nullptr;
2157
2158 unsigned OutTyVariant;
2159 EVT OutTy = N->getValueType(0);
2160 if (OutTy == MVT::i32)
2161 OutTyVariant = 0;
2162 else if (OutTy == MVT::i64)
2163 OutTyVariant = 1;
2164 else
2165 return nullptr;
2166
2167 assert((N->getOpcode() == ISD::FP_TO_SINT
2168 || N->getOpcode() == ISD::FP_TO_UINT) && "Unexpected opcode!");
2169 unsigned FpConVariant = N->getOpcode() == ISD::FP_TO_SINT ? 0 : 1;
2170
2171 unsigned Opc;
2172 switch (Op0->getOpcode()) {
2173 default:
2174 return nullptr;
2175 case ISD::FCEIL: {
2176 unsigned FCVTPOpcs[2][2][2] = {
2177 { { AArch64::FCVTPSUWSr, AArch64::FCVTPSUXSr },
2178 { AArch64::FCVTPSUWDr, AArch64::FCVTPSUXDr } },
2179 { { AArch64::FCVTPUUWSr, AArch64::FCVTPUUXSr },
2180 { AArch64::FCVTPUUWDr, AArch64::FCVTPUUXDr } } };
2181 Opc = FCVTPOpcs[FpConVariant][InTyVariant][OutTyVariant];
2182 break;
2183 }
2184 case ISD::FFLOOR: {
2185 unsigned FCVTMOpcs[2][2][2] = {
2186 { { AArch64::FCVTMSUWSr, AArch64::FCVTMSUXSr },
2187 { AArch64::FCVTMSUWDr, AArch64::FCVTMSUXDr } },
2188 { { AArch64::FCVTMUUWSr, AArch64::FCVTMUUXSr },
2189 { AArch64::FCVTMUUWDr, AArch64::FCVTMUUXDr } } };
2190 Opc = FCVTMOpcs[FpConVariant][InTyVariant][OutTyVariant];
2191 break;
2192 }
2193 case ISD::FTRUNC: {
2194 unsigned FCVTZOpcs[2][2][2] = {
2195 { { AArch64::FCVTZSUWSr, AArch64::FCVTZSUXSr },
2196 { AArch64::FCVTZSUWDr, AArch64::FCVTZSUXDr } },
2197 { { AArch64::FCVTZUUWSr, AArch64::FCVTZUUXSr },
2198 { AArch64::FCVTZUUWDr, AArch64::FCVTZUUXDr } } };
2199 Opc = FCVTZOpcs[FpConVariant][InTyVariant][OutTyVariant];
2200 break;
2201 }
2202 case ISD::FROUND: {
2203 unsigned FCVTAOpcs[2][2][2] = {
2204 { { AArch64::FCVTASUWSr, AArch64::FCVTASUXSr },
2205 { AArch64::FCVTASUWDr, AArch64::FCVTASUXDr } },
2206 { { AArch64::FCVTAUUWSr, AArch64::FCVTAUUXSr },
2207 { AArch64::FCVTAUUWDr, AArch64::FCVTAUUXDr } } };
2208 Opc = FCVTAOpcs[FpConVariant][InTyVariant][OutTyVariant];
2209 break;
2210 }
2211 }
2212
2213 SDLoc DL(N);
2214 SDValue In = Op0->getOperand(0);
2215 SmallVector<SDValue, 2> Ops;
2216 Ops.push_back(In);
2217
2218 if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, InTyVariant, DL))
2219 Ops.push_back(SDValue(FRINTXNode, 1));
2220
2221 return CurDAG->getMachineNode(Opc, DL, OutTy, Ops);
2222}
2223
Tim Northover3b0846e2014-05-24 12:50:23 +00002224bool
2225AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2226 unsigned RegWidth) {
2227 APFloat FVal(0.0);
2228 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2229 FVal = CN->getValueAPF();
2230 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2231 // Some otherwise illegal constants are allowed in this case.
2232 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2233 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2234 return false;
2235
2236 ConstantPoolSDNode *CN =
2237 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2238 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2239 } else
2240 return false;
2241
2242 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2243 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2244 // x-register.
2245 //
2246 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2247 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2248 // integers.
2249 bool IsExact;
2250
2251 // fbits is between 1 and 64 in the worst-case, which means the fmul
2252 // could have 2^64 as an actual operand. Need 65 bits of precision.
2253 APSInt IntVal(65, true);
2254 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2255
2256 // N.b. isPowerOf2 also checks for > 0.
2257 if (!IsExact || !IntVal.isPowerOf2()) return false;
2258 unsigned FBits = IntVal.logBase2();
2259
2260 // Checks above should have guaranteed that we haven't lost information in
2261 // finding FBits, but it must still be in range.
2262 if (FBits == 0 || FBits > RegWidth) return false;
2263
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002264 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00002265 return true;
2266}
2267
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002268// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2269// of the string and obtains the integer values from them and combines these
2270// into a single value to be used in the MRS/MSR instruction.
2271static int getIntOperandFromRegisterString(StringRef RegString) {
2272 SmallVector<StringRef, 5> Fields;
2273 RegString.split(Fields, ":");
2274
2275 if (Fields.size() == 1)
2276 return -1;
2277
2278 assert(Fields.size() == 5
2279 && "Invalid number of fields in read register string");
2280
2281 SmallVector<int, 5> Ops;
2282 bool AllIntFields = true;
2283
2284 for (StringRef Field : Fields) {
2285 unsigned IntField;
2286 AllIntFields &= !Field.getAsInteger(10, IntField);
2287 Ops.push_back(IntField);
2288 }
2289
2290 assert(AllIntFields &&
2291 "Unexpected non-integer value in special register string.");
2292
2293 // Need to combine the integer fields of the string into a single value
2294 // based on the bit encoding of MRS/MSR instruction.
2295 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2296 (Ops[3] << 3) | (Ops[4]);
2297}
2298
2299// Lower the read_register intrinsic to an MRS instruction node if the special
2300// register string argument is either of the form detailed in the ALCE (the
2301// form described in getIntOperandsFromRegsterString) or is a named register
2302// known by the MRS SysReg mapper.
2303SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) {
2304 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2305 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2306 SDLoc DL(N);
2307
2308 int Reg = getIntOperandFromRegisterString(RegString->getString());
2309 if (Reg != -1)
2310 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2311 MVT::Other,
2312 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2313 N->getOperand(0));
2314
2315 // Use the sysreg mapper to map the remaining possible strings to the
2316 // value for the register to be used for the instruction operand.
2317 AArch64SysReg::MRSMapper mapper;
2318 bool IsValidSpecialReg;
2319 Reg = mapper.fromString(RegString->getString(),
2320 Subtarget->getFeatureBits(),
2321 IsValidSpecialReg);
2322 if (IsValidSpecialReg)
2323 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2324 MVT::Other,
2325 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2326 N->getOperand(0));
2327
2328 return nullptr;
2329}
2330
2331// Lower the write_register intrinsic to an MSR instruction node if the special
2332// register string argument is either of the form detailed in the ALCE (the
2333// form described in getIntOperandsFromRegsterString) or is a named register
2334// known by the MSR SysReg mapper.
2335SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
2336 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2337 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2338 SDLoc DL(N);
2339
2340 int Reg = getIntOperandFromRegisterString(RegString->getString());
2341 if (Reg != -1)
2342 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2343 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2344 N->getOperand(2), N->getOperand(0));
2345
2346 // Check if the register was one of those allowed as the pstatefield value in
2347 // the MSR (immediate) instruction. To accept the values allowed in the
2348 // pstatefield for the MSR (immediate) instruction, we also require that an
2349 // immediate value has been provided as an argument, we know that this is
2350 // the case as it has been ensured by semantic checking.
2351 AArch64PState::PStateMapper PMapper;
2352 bool IsValidSpecialReg;
2353 Reg = PMapper.fromString(RegString->getString(),
2354 Subtarget->getFeatureBits(),
2355 IsValidSpecialReg);
2356 if (IsValidSpecialReg) {
2357 assert (isa<ConstantSDNode>(N->getOperand(2))
2358 && "Expected a constant integer expression.");
2359 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
2360 return CurDAG->getMachineNode(AArch64::MSRpstate, DL, MVT::Other,
2361 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2362 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
2363 N->getOperand(0));
2364 }
2365
2366 // Use the sysreg mapper to attempt to map the remaining possible strings
2367 // to the value for the register to be used for the MSR (register)
2368 // instruction operand.
2369 AArch64SysReg::MSRMapper Mapper;
2370 Reg = Mapper.fromString(RegString->getString(),
2371 Subtarget->getFeatureBits(),
2372 IsValidSpecialReg);
2373
2374 if (IsValidSpecialReg)
2375 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2376 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2377 N->getOperand(2), N->getOperand(0));
2378
2379 return nullptr;
2380}
2381
Tim Northover3b0846e2014-05-24 12:50:23 +00002382SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2383 // Dump information about the Node being selected
2384 DEBUG(errs() << "Selecting: ");
2385 DEBUG(Node->dump(CurDAG));
2386 DEBUG(errs() << "\n");
2387
2388 // If we have a custom node, we already have selected!
2389 if (Node->isMachineOpcode()) {
2390 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2391 Node->setNodeId(-1);
2392 return nullptr;
2393 }
2394
2395 // Few custom selection stuff.
2396 SDNode *ResNode = nullptr;
2397 EVT VT = Node->getValueType(0);
2398
2399 switch (Node->getOpcode()) {
2400 default:
2401 break;
2402
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002403 case ISD::READ_REGISTER:
2404 if (SDNode *Res = SelectReadRegister(Node))
2405 return Res;
2406 break;
2407
2408 case ISD::WRITE_REGISTER:
2409 if (SDNode *Res = SelectWriteRegister(Node))
2410 return Res;
2411 break;
2412
Tim Northover3b0846e2014-05-24 12:50:23 +00002413 case ISD::ADD:
2414 if (SDNode *I = SelectMLAV64LaneV128(Node))
2415 return I;
2416 break;
2417
2418 case ISD::LOAD: {
2419 // Try to select as an indexed load. Fall through to normal processing
2420 // if we can't.
2421 bool Done = false;
2422 SDNode *I = SelectIndexedLoad(Node, Done);
2423 if (Done)
2424 return I;
2425 break;
2426 }
2427
2428 case ISD::SRL:
2429 case ISD::AND:
2430 case ISD::SRA:
2431 if (SDNode *I = SelectBitfieldExtractOp(Node))
2432 return I;
2433 break;
2434
2435 case ISD::OR:
2436 if (SDNode *I = SelectBitfieldInsertOp(Node))
2437 return I;
2438 break;
2439
2440 case ISD::EXTRACT_VECTOR_ELT: {
2441 // Extracting lane zero is a special case where we can just use a plain
2442 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2443 // the rest of the compiler, especially the register allocator and copyi
2444 // propagation, to reason about, so is preferred when it's possible to
2445 // use it.
2446 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2447 // Bail and use the default Select() for non-zero lanes.
2448 if (LaneNode->getZExtValue() != 0)
2449 break;
2450 // If the element type is not the same as the result type, likewise
2451 // bail and use the default Select(), as there's more to do than just
2452 // a cross-class COPY. This catches extracts of i8 and i16 elements
2453 // since they will need an explicit zext.
2454 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2455 break;
2456 unsigned SubReg;
2457 switch (Node->getOperand(0)
2458 .getValueType()
2459 .getVectorElementType()
2460 .getSizeInBits()) {
2461 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002462 llvm_unreachable("Unexpected vector element type!");
Tim Northover3b0846e2014-05-24 12:50:23 +00002463 case 64:
2464 SubReg = AArch64::dsub;
2465 break;
2466 case 32:
2467 SubReg = AArch64::ssub;
2468 break;
Oliver Stannard89d15422014-08-27 16:16:04 +00002469 case 16:
2470 SubReg = AArch64::hsub;
2471 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00002472 case 8:
2473 llvm_unreachable("unexpected zext-requiring extract element!");
2474 }
2475 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2476 Node->getOperand(0));
2477 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2478 DEBUG(Extract->dumpr(CurDAG));
2479 DEBUG(dbgs() << "\n");
2480 return Extract.getNode();
2481 }
2482 case ISD::Constant: {
2483 // Materialize zero constants as copies from WZR/XZR. This allows
2484 // the coalescer to propagate these into other instructions.
2485 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2486 if (ConstNode->isNullValue()) {
2487 if (VT == MVT::i32)
2488 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2489 AArch64::WZR, MVT::i32).getNode();
2490 else if (VT == MVT::i64)
2491 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2492 AArch64::XZR, MVT::i64).getNode();
2493 }
2494 break;
2495 }
2496
2497 case ISD::FrameIndex: {
2498 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2499 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2500 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2501 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +00002502 SDValue TFI = CurDAG->getTargetFrameIndex(
2503 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002504 SDLoc DL(Node);
2505 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2506 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
Tim Northover3b0846e2014-05-24 12:50:23 +00002507 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2508 }
2509 case ISD::INTRINSIC_W_CHAIN: {
2510 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2511 switch (IntNo) {
2512 default:
2513 break;
2514 case Intrinsic::aarch64_ldaxp:
2515 case Intrinsic::aarch64_ldxp: {
2516 unsigned Op =
2517 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2518 SDValue MemAddr = Node->getOperand(2);
2519 SDLoc DL(Node);
2520 SDValue Chain = Node->getOperand(0);
2521
2522 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2523 MVT::Other, MemAddr, Chain);
2524
2525 // Transfer memoperands.
2526 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2527 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2528 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2529 return Ld;
2530 }
2531 case Intrinsic::aarch64_stlxp:
2532 case Intrinsic::aarch64_stxp: {
2533 unsigned Op =
2534 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2535 SDLoc DL(Node);
2536 SDValue Chain = Node->getOperand(0);
2537 SDValue ValLo = Node->getOperand(2);
2538 SDValue ValHi = Node->getOperand(3);
2539 SDValue MemAddr = Node->getOperand(4);
2540
2541 // Place arguments in the right order.
Benjamin Kramerea68a942015-02-19 15:26:17 +00002542 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00002543
2544 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2545 // Transfer memoperands.
2546 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2547 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2548 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2549
2550 return St;
2551 }
2552 case Intrinsic::aarch64_neon_ld1x2:
2553 if (VT == MVT::v8i8)
2554 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2555 else if (VT == MVT::v16i8)
2556 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002557 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002558 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002559 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002560 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2561 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2562 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2563 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2564 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2565 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2566 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2567 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2568 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2569 break;
2570 case Intrinsic::aarch64_neon_ld1x3:
2571 if (VT == MVT::v8i8)
2572 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2573 else if (VT == MVT::v16i8)
2574 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002575 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002576 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002577 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002578 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2579 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2580 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2581 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2582 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2583 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2584 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2585 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2586 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2587 break;
2588 case Intrinsic::aarch64_neon_ld1x4:
2589 if (VT == MVT::v8i8)
2590 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2591 else if (VT == MVT::v16i8)
2592 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002593 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002594 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002595 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002596 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2597 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2598 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2599 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2600 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2601 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2602 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2603 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2604 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2605 break;
2606 case Intrinsic::aarch64_neon_ld2:
2607 if (VT == MVT::v8i8)
2608 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2609 else if (VT == MVT::v16i8)
2610 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002611 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002612 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002613 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002614 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2615 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2616 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2617 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2618 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2619 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2620 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2621 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2622 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2623 break;
2624 case Intrinsic::aarch64_neon_ld3:
2625 if (VT == MVT::v8i8)
2626 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2627 else if (VT == MVT::v16i8)
2628 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002629 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002630 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002631 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002632 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2633 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2634 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2635 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2636 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2637 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2638 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2639 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2640 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2641 break;
2642 case Intrinsic::aarch64_neon_ld4:
2643 if (VT == MVT::v8i8)
2644 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2645 else if (VT == MVT::v16i8)
2646 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002647 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002648 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002649 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002650 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2651 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2652 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2653 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2654 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2655 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2656 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2657 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2658 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2659 break;
2660 case Intrinsic::aarch64_neon_ld2r:
2661 if (VT == MVT::v8i8)
2662 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2663 else if (VT == MVT::v16i8)
2664 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002665 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002666 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002667 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002668 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2669 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2670 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2671 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2672 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2673 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2674 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2675 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2676 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2677 break;
2678 case Intrinsic::aarch64_neon_ld3r:
2679 if (VT == MVT::v8i8)
2680 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2681 else if (VT == MVT::v16i8)
2682 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002683 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002684 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002685 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002686 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2687 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2688 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2689 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2690 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2691 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2692 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2693 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2694 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2695 break;
2696 case Intrinsic::aarch64_neon_ld4r:
2697 if (VT == MVT::v8i8)
2698 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2699 else if (VT == MVT::v16i8)
2700 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002701 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002702 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002703 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002704 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2705 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2706 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2707 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2708 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2709 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2710 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2711 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2712 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2713 break;
2714 case Intrinsic::aarch64_neon_ld2lane:
2715 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2716 return SelectLoadLane(Node, 2, AArch64::LD2i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002717 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2718 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002719 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2720 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2721 VT == MVT::v2f32)
2722 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2723 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2724 VT == MVT::v1f64)
2725 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2726 break;
2727 case Intrinsic::aarch64_neon_ld3lane:
2728 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2729 return SelectLoadLane(Node, 3, AArch64::LD3i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002730 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2731 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002732 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2733 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2734 VT == MVT::v2f32)
2735 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2736 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2737 VT == MVT::v1f64)
2738 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2739 break;
2740 case Intrinsic::aarch64_neon_ld4lane:
2741 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2742 return SelectLoadLane(Node, 4, AArch64::LD4i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002743 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2744 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002745 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2746 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2747 VT == MVT::v2f32)
2748 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2749 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2750 VT == MVT::v1f64)
2751 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2752 break;
2753 }
2754 } break;
2755 case ISD::INTRINSIC_WO_CHAIN: {
2756 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2757 switch (IntNo) {
2758 default:
2759 break;
2760 case Intrinsic::aarch64_neon_tbl2:
2761 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2762 : AArch64::TBLv16i8Two,
2763 false);
2764 case Intrinsic::aarch64_neon_tbl3:
2765 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2766 : AArch64::TBLv16i8Three,
2767 false);
2768 case Intrinsic::aarch64_neon_tbl4:
2769 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2770 : AArch64::TBLv16i8Four,
2771 false);
2772 case Intrinsic::aarch64_neon_tbx2:
2773 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2774 : AArch64::TBXv16i8Two,
2775 true);
2776 case Intrinsic::aarch64_neon_tbx3:
2777 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2778 : AArch64::TBXv16i8Three,
2779 true);
2780 case Intrinsic::aarch64_neon_tbx4:
2781 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2782 : AArch64::TBXv16i8Four,
2783 true);
2784 case Intrinsic::aarch64_neon_smull:
2785 case Intrinsic::aarch64_neon_umull:
2786 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2787 return N;
2788 break;
2789 }
2790 break;
2791 }
2792 case ISD::INTRINSIC_VOID: {
2793 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2794 if (Node->getNumOperands() >= 3)
2795 VT = Node->getOperand(2)->getValueType(0);
2796 switch (IntNo) {
2797 default:
2798 break;
2799 case Intrinsic::aarch64_neon_st1x2: {
2800 if (VT == MVT::v8i8)
2801 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2802 else if (VT == MVT::v16i8)
2803 return SelectStore(Node, 2, AArch64::ST1Twov16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002804 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002805 return SelectStore(Node, 2, AArch64::ST1Twov4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002806 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002807 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2808 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2809 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2810 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2811 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2812 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2813 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2814 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2815 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2816 break;
2817 }
2818 case Intrinsic::aarch64_neon_st1x3: {
2819 if (VT == MVT::v8i8)
2820 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2821 else if (VT == MVT::v16i8)
2822 return SelectStore(Node, 3, AArch64::ST1Threev16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002823 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002824 return SelectStore(Node, 3, AArch64::ST1Threev4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002825 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002826 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2827 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2828 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2829 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2830 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2831 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2832 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2833 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2834 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2835 break;
2836 }
2837 case Intrinsic::aarch64_neon_st1x4: {
2838 if (VT == MVT::v8i8)
2839 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2840 else if (VT == MVT::v16i8)
2841 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002842 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002843 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002844 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002845 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2846 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2847 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2848 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2849 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2850 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2851 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2852 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2853 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2854 break;
2855 }
2856 case Intrinsic::aarch64_neon_st2: {
2857 if (VT == MVT::v8i8)
2858 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2859 else if (VT == MVT::v16i8)
2860 return SelectStore(Node, 2, AArch64::ST2Twov16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002861 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002862 return SelectStore(Node, 2, AArch64::ST2Twov4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002863 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002864 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2865 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2866 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2867 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2868 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2869 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2870 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2871 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2872 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2873 break;
2874 }
2875 case Intrinsic::aarch64_neon_st3: {
2876 if (VT == MVT::v8i8)
2877 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2878 else if (VT == MVT::v16i8)
2879 return SelectStore(Node, 3, AArch64::ST3Threev16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002880 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002881 return SelectStore(Node, 3, AArch64::ST3Threev4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002882 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002883 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2884 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2885 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2886 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2887 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2888 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2889 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2890 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2891 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2892 break;
2893 }
2894 case Intrinsic::aarch64_neon_st4: {
2895 if (VT == MVT::v8i8)
2896 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2897 else if (VT == MVT::v16i8)
2898 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002899 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002900 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002901 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002902 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2903 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2904 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2905 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2906 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2907 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2908 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2909 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2910 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2911 break;
2912 }
2913 case Intrinsic::aarch64_neon_st2lane: {
2914 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2915 return SelectStoreLane(Node, 2, AArch64::ST2i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002916 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2917 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002918 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2919 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2920 VT == MVT::v2f32)
2921 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2922 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2923 VT == MVT::v1f64)
2924 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2925 break;
2926 }
2927 case Intrinsic::aarch64_neon_st3lane: {
2928 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2929 return SelectStoreLane(Node, 3, AArch64::ST3i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002930 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2931 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002932 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2933 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2934 VT == MVT::v2f32)
2935 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2936 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2937 VT == MVT::v1f64)
2938 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2939 break;
2940 }
2941 case Intrinsic::aarch64_neon_st4lane: {
2942 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2943 return SelectStoreLane(Node, 4, AArch64::ST4i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002944 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2945 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002946 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2947 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2948 VT == MVT::v2f32)
2949 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2950 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2951 VT == MVT::v1f64)
2952 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2953 break;
2954 }
2955 }
Mehdi Aminia7583982015-08-23 00:42:57 +00002956 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00002957 }
2958 case AArch64ISD::LD2post: {
2959 if (VT == MVT::v8i8)
2960 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2961 else if (VT == MVT::v16i8)
2962 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002963 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002964 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002965 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002966 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2967 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2968 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2969 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2970 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2971 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2972 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2973 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2974 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2975 break;
2976 }
2977 case AArch64ISD::LD3post: {
2978 if (VT == MVT::v8i8)
2979 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2980 else if (VT == MVT::v16i8)
2981 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002982 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002983 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002984 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002985 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2986 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2987 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2988 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2989 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2990 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2991 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2992 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2993 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2994 break;
2995 }
2996 case AArch64ISD::LD4post: {
2997 if (VT == MVT::v8i8)
2998 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2999 else if (VT == MVT::v16i8)
3000 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003001 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003002 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003003 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003004 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
3005 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3006 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
3007 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3008 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
3009 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3010 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
3011 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3012 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
3013 break;
3014 }
3015 case AArch64ISD::LD1x2post: {
3016 if (VT == MVT::v8i8)
3017 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
3018 else if (VT == MVT::v16i8)
3019 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003020 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003021 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003022 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003023 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
3024 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3025 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
3026 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3027 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
3028 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3029 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
3030 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3031 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
3032 break;
3033 }
3034 case AArch64ISD::LD1x3post: {
3035 if (VT == MVT::v8i8)
3036 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
3037 else if (VT == MVT::v16i8)
3038 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003039 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003040 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003041 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003042 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
3043 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3044 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
3045 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3046 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
3047 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3048 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
3049 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3050 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
3051 break;
3052 }
3053 case AArch64ISD::LD1x4post: {
3054 if (VT == MVT::v8i8)
3055 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
3056 else if (VT == MVT::v16i8)
3057 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003058 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003059 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003060 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003061 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
3062 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3063 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
3064 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3065 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
3066 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3067 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
3068 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3069 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
3070 break;
3071 }
3072 case AArch64ISD::LD1DUPpost: {
3073 if (VT == MVT::v8i8)
3074 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
3075 else if (VT == MVT::v16i8)
3076 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003077 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003078 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003079 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003080 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
3081 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3082 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
3083 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3084 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
3085 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3086 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
3087 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3088 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
3089 break;
3090 }
3091 case AArch64ISD::LD2DUPpost: {
3092 if (VT == MVT::v8i8)
3093 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
3094 else if (VT == MVT::v16i8)
3095 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003096 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003097 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003098 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003099 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
3100 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3101 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
3102 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3103 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
3104 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3105 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
3106 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3107 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
3108 break;
3109 }
3110 case AArch64ISD::LD3DUPpost: {
3111 if (VT == MVT::v8i8)
3112 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
3113 else if (VT == MVT::v16i8)
3114 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003115 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003116 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003117 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003118 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
3119 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3120 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
3121 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3122 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
3123 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3124 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
3125 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3126 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
3127 break;
3128 }
3129 case AArch64ISD::LD4DUPpost: {
3130 if (VT == MVT::v8i8)
3131 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
3132 else if (VT == MVT::v16i8)
3133 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003134 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003135 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003136 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003137 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
3138 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3139 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
3140 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3141 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
3142 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3143 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
3144 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3145 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
3146 break;
3147 }
3148 case AArch64ISD::LD1LANEpost: {
3149 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3150 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003151 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3152 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003153 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
3154 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3155 VT == MVT::v2f32)
3156 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
3157 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3158 VT == MVT::v1f64)
3159 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
3160 break;
3161 }
3162 case AArch64ISD::LD2LANEpost: {
3163 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3164 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003165 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3166 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003167 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
3168 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3169 VT == MVT::v2f32)
3170 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
3171 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3172 VT == MVT::v1f64)
3173 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
3174 break;
3175 }
3176 case AArch64ISD::LD3LANEpost: {
3177 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3178 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003179 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3180 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003181 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
3182 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3183 VT == MVT::v2f32)
3184 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
3185 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3186 VT == MVT::v1f64)
3187 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
3188 break;
3189 }
3190 case AArch64ISD::LD4LANEpost: {
3191 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3192 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003193 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3194 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003195 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
3196 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3197 VT == MVT::v2f32)
3198 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
3199 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3200 VT == MVT::v1f64)
3201 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
3202 break;
3203 }
3204 case AArch64ISD::ST2post: {
3205 VT = Node->getOperand(1).getValueType();
3206 if (VT == MVT::v8i8)
3207 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
3208 else if (VT == MVT::v16i8)
3209 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003210 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003211 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003212 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003213 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
3214 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3215 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
3216 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3217 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
3218 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3219 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
3220 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3221 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3222 break;
3223 }
3224 case AArch64ISD::ST3post: {
3225 VT = Node->getOperand(1).getValueType();
3226 if (VT == MVT::v8i8)
3227 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
3228 else if (VT == MVT::v16i8)
3229 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003230 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003231 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003232 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003233 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
3234 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3235 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
3236 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3237 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
3238 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3239 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
3240 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3241 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3242 break;
3243 }
3244 case AArch64ISD::ST4post: {
3245 VT = Node->getOperand(1).getValueType();
3246 if (VT == MVT::v8i8)
3247 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
3248 else if (VT == MVT::v16i8)
3249 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003250 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003251 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003252 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003253 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
3254 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3255 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
3256 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3257 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
3258 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3259 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
3260 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3261 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3262 break;
3263 }
3264 case AArch64ISD::ST1x2post: {
3265 VT = Node->getOperand(1).getValueType();
3266 if (VT == MVT::v8i8)
3267 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
3268 else if (VT == MVT::v16i8)
3269 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003270 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003271 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003272 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003273 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
3274 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3275 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
3276 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3277 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
3278 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3279 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3280 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3281 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
3282 break;
3283 }
3284 case AArch64ISD::ST1x3post: {
3285 VT = Node->getOperand(1).getValueType();
3286 if (VT == MVT::v8i8)
3287 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
3288 else if (VT == MVT::v16i8)
3289 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003290 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003291 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003292 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003293 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
3294 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3295 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
3296 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3297 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3298 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3299 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3300 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3301 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3302 break;
3303 }
3304 case AArch64ISD::ST1x4post: {
3305 VT = Node->getOperand(1).getValueType();
3306 if (VT == MVT::v8i8)
3307 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3308 else if (VT == MVT::v16i8)
3309 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003310 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003311 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003312 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003313 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3314 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3315 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3316 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3317 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3318 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3319 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3320 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3321 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3322 break;
3323 }
3324 case AArch64ISD::ST2LANEpost: {
3325 VT = Node->getOperand(1).getValueType();
3326 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3327 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003328 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3329 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003330 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3331 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3332 VT == MVT::v2f32)
3333 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3334 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3335 VT == MVT::v1f64)
3336 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3337 break;
3338 }
3339 case AArch64ISD::ST3LANEpost: {
3340 VT = Node->getOperand(1).getValueType();
3341 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3342 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003343 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3344 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003345 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3346 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3347 VT == MVT::v2f32)
3348 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3349 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3350 VT == MVT::v1f64)
3351 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3352 break;
3353 }
3354 case AArch64ISD::ST4LANEpost: {
3355 VT = Node->getOperand(1).getValueType();
3356 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3357 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003358 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3359 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003360 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3361 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3362 VT == MVT::v2f32)
3363 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3364 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3365 VT == MVT::v1f64)
3366 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3367 break;
3368 }
3369
3370 case ISD::FCEIL:
3371 case ISD::FFLOOR:
3372 case ISD::FTRUNC:
3373 case ISD::FROUND:
3374 if (SDNode *I = SelectLIBM(Node))
3375 return I;
3376 break;
Geoff Berryc573bf7a2015-07-28 15:24:10 +00003377
3378 case ISD::FP_TO_SINT:
3379 case ISD::FP_TO_UINT:
3380 if (SDNode *I = SelectFPConvertWithRound(Node))
3381 return I;
3382 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00003383 }
3384
3385 // Select the default instruction
3386 ResNode = SelectCode(Node);
3387
3388 DEBUG(errs() << "=> ");
3389 if (ResNode == nullptr || ResNode == Node)
3390 DEBUG(Node->dump(CurDAG));
3391 else
3392 DEBUG(ResNode->dump(CurDAG));
3393 DEBUG(errs() << "\n");
3394
3395 return ResNode;
3396}
3397
3398/// createAArch64ISelDag - This pass converts a legalized DAG into a
3399/// AArch64-specific DAG, ready for instruction scheduling.
3400FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3401 CodeGenOpt::Level OptLevel) {
3402 return new AArch64DAGToDAGISel(TM, OptLevel);
3403}