blob: a4126ab12656428e521c94912e6be12c62017e5b [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the AArch64 target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64TargetMachine.h"
15#include "MCTargetDesc/AArch64AddressingModes.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/CodeGen/SelectionDAGISel.h"
18#include "llvm/IR/Function.h" // To access function attributes.
19#include "llvm/IR/GlobalValue.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/ErrorHandling.h"
23#include "llvm/Support/MathExtras.h"
24#include "llvm/Support/raw_ostream.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE "aarch64-isel"
29
30//===--------------------------------------------------------------------===//
31/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32/// instructions for SelectionDAG operations.
33///
34namespace {
35
36class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
38
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
42
43 bool ForCodeSize;
44
45public:
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
49 ForCodeSize(false) {}
50
51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
53 }
54
55 bool runOnMachineFunction(MachineFunction &MF) override {
Sanjay Patel924879a2015-08-04 15:49:57 +000056 ForCodeSize = MF.getFunction()->optForSize();
Eric Christopher1e513342015-01-30 23:46:40 +000057 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
Tim Northover3b0846e2014-05-24 12:50:23 +000058 return SelectionDAGISel::runOnMachineFunction(MF);
59 }
60
61 SDNode *Select(SDNode *Node) override;
62
63 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
64 /// inline asm expressions.
65 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
Daniel Sanders60f1db02015-03-13 12:45:09 +000066 unsigned ConstraintID,
Tim Northover3b0846e2014-05-24 12:50:23 +000067 std::vector<SDValue> &OutOps) override;
68
69 SDNode *SelectMLAV64LaneV128(SDNode *N);
70 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
71 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
76 }
77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
79 }
Ahmed Bougachab8886b52015-09-10 01:42:28 +000080 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
82 }
83 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
85 }
86 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
88 }
89 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
91 }
92 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
94 }
Tim Northover3b0846e2014-05-24 12:50:23 +000095 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeIndexed(N, 1, Base, OffImm);
97 }
98 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeIndexed(N, 2, Base, OffImm);
100 }
101 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeIndexed(N, 4, Base, OffImm);
103 }
104 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeIndexed(N, 8, Base, OffImm);
106 }
107 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeIndexed(N, 16, Base, OffImm);
109 }
110 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
111 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
112 }
113 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
114 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
115 }
116 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
117 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
118 }
119 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
120 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
121 }
122 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
123 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
124 }
125
126 template<int Width>
127 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
128 SDValue &SignExtend, SDValue &DoShift) {
129 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
130 }
131
132 template<int Width>
133 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
134 SDValue &SignExtend, SDValue &DoShift) {
135 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
136 }
137
138
139 /// Form sequences of consecutive 64/128-bit registers for use in NEON
140 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
141 /// between 1 and 4 elements. If it contains a single element that is returned
142 /// unchanged; otherwise a REG_SEQUENCE value is returned.
143 SDValue createDTuple(ArrayRef<SDValue> Vecs);
144 SDValue createQTuple(ArrayRef<SDValue> Vecs);
145
146 /// Generic helper for the createDTuple/createQTuple
147 /// functions. Those should almost always be called instead.
Benjamin Kramerea68a942015-02-19 15:26:17 +0000148 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
149 const unsigned SubRegs[]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000150
151 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
152
153 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
154
155 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
156 unsigned SubRegIdx);
157 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
158 unsigned SubRegIdx);
159 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
160 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
161
162 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
163 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
164 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
165 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
166
Tim Northover3b0846e2014-05-24 12:50:23 +0000167 SDNode *SelectBitfieldExtractOp(SDNode *N);
168 SDNode *SelectBitfieldInsertOp(SDNode *N);
169
170 SDNode *SelectLIBM(SDNode *N);
Geoff Berryc573bf7a2015-07-28 15:24:10 +0000171 SDNode *SelectFPConvertWithRound(SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000172
Luke Cheeseman85fd06d2015-06-01 12:02:47 +0000173 SDNode *SelectReadRegister(SDNode *N);
174 SDNode *SelectWriteRegister(SDNode *N);
175
Tim Northover3b0846e2014-05-24 12:50:23 +0000176// Include the pieces autogenerated from the target description.
177#include "AArch64GenDAGISel.inc"
178
179private:
180 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
181 SDValue &Shift);
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000182 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
183 SDValue &OffImm);
Tim Northover3b0846e2014-05-24 12:50:23 +0000184 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
185 SDValue &OffImm);
186 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
187 SDValue &OffImm);
188 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
189 SDValue &Offset, SDValue &SignExtend,
190 SDValue &DoShift);
191 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
192 SDValue &Offset, SDValue &SignExtend,
193 SDValue &DoShift);
194 bool isWorthFolding(SDValue V) const;
195 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
196 SDValue &Offset, SDValue &SignExtend);
197
198 template<unsigned RegWidth>
199 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
200 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
201 }
202
203 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
Geoff Berryc573bf7a2015-07-28 15:24:10 +0000204
205 SDNode *GenerateInexactFlagIfNeeded(const SDValue &In, unsigned InTyVariant,
206 SDLoc DL);
Tim Northover3b0846e2014-05-24 12:50:23 +0000207};
208} // end anonymous namespace
209
210/// isIntImmediate - This method tests to see if the node is a constant
211/// operand. If so Imm will receive the 32-bit value.
212static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
213 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
214 Imm = C->getZExtValue();
215 return true;
216 }
217 return false;
218}
219
220// isIntImmediate - This method tests to see if a constant operand.
221// If so Imm will receive the value.
222static bool isIntImmediate(SDValue N, uint64_t &Imm) {
223 return isIntImmediate(N.getNode(), Imm);
224}
225
226// isOpcWithIntImmediate - This method tests to see if the node is a specific
227// opcode and that it has a immediate integer right operand.
228// If so Imm will receive the 32 bit value.
229static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
230 uint64_t &Imm) {
231 return N->getOpcode() == Opc &&
232 isIntImmediate(N->getOperand(1).getNode(), Imm);
233}
234
235bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
Daniel Sanders60f1db02015-03-13 12:45:09 +0000236 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000237 switch(ConstraintID) {
238 default:
239 llvm_unreachable("Unexpected asm memory constraint");
240 case InlineAsm::Constraint_i:
241 case InlineAsm::Constraint_m:
242 case InlineAsm::Constraint_Q:
243 // Require the address to be in a register. That is safe for all AArch64
244 // variants and it is hard to do anything much smarter without knowing
245 // how the operand is used.
246 OutOps.push_back(Op);
247 return false;
248 }
249 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000250}
251
252/// SelectArithImmed - Select an immediate value that can be represented as
253/// a 12-bit value shifted left by either 0 or 12. If so, return true with
254/// Val set to the 12-bit value and Shift set to the shifter operand.
255bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
256 SDValue &Shift) {
257 // This function is called from the addsub_shifted_imm ComplexPattern,
258 // which lists [imm] as the list of opcode it's interested in, however
259 // we still need to check whether the operand is actually an immediate
260 // here because the ComplexPattern opcode list is only used in
261 // root-level opcode matching.
262 if (!isa<ConstantSDNode>(N.getNode()))
263 return false;
264
265 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
266 unsigned ShiftAmt;
267
268 if (Immed >> 12 == 0) {
269 ShiftAmt = 0;
270 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
271 ShiftAmt = 12;
272 Immed = Immed >> 12;
273 } else
274 return false;
275
276 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000277 SDLoc dl(N);
278 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
279 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000280 return true;
281}
282
283/// SelectNegArithImmed - As above, but negates the value before trying to
284/// select it.
285bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
286 SDValue &Shift) {
287 // This function is called from the addsub_shifted_imm ComplexPattern,
288 // which lists [imm] as the list of opcode it's interested in, however
289 // we still need to check whether the operand is actually an immediate
290 // here because the ComplexPattern opcode list is only used in
291 // root-level opcode matching.
292 if (!isa<ConstantSDNode>(N.getNode()))
293 return false;
294
295 // The immediate operand must be a 24-bit zero-extended immediate.
296 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
297
298 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
299 // have the opposite effect on the C flag, so this pattern mustn't match under
300 // those circumstances.
301 if (Immed == 0)
302 return false;
303
304 if (N.getValueType() == MVT::i32)
305 Immed = ~((uint32_t)Immed) + 1;
306 else
307 Immed = ~Immed + 1ULL;
308 if (Immed & 0xFFFFFFFFFF000000ULL)
309 return false;
310
311 Immed &= 0xFFFFFFULL;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000312 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
313 Shift);
Tim Northover3b0846e2014-05-24 12:50:23 +0000314}
315
316/// getShiftTypeForNode - Translate a shift node to the corresponding
317/// ShiftType value.
318static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
319 switch (N.getOpcode()) {
320 default:
321 return AArch64_AM::InvalidShiftExtend;
322 case ISD::SHL:
323 return AArch64_AM::LSL;
324 case ISD::SRL:
325 return AArch64_AM::LSR;
326 case ISD::SRA:
327 return AArch64_AM::ASR;
328 case ISD::ROTR:
329 return AArch64_AM::ROR;
330 }
331}
332
Eric Christopher25dbdeb2015-03-07 01:39:09 +0000333/// \brief Determine whether it is worth to fold V into an extended register.
Tim Northover3b0846e2014-05-24 12:50:23 +0000334bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
Robin Morisset039781e2014-08-29 21:53:01 +0000335 // it hurts if the value is used at least twice, unless we are optimizing
Tim Northover3b0846e2014-05-24 12:50:23 +0000336 // for code size.
337 if (ForCodeSize || V.hasOneUse())
338 return true;
339 return false;
340}
341
342/// SelectShiftedRegister - Select a "shifted register" operand. If the value
343/// is not shifted, set the Shift operand to default of "LSL 0". The logical
344/// instructions allow the shifted register to be rotated, but the arithmetic
345/// instructions do not. The AllowROR parameter specifies whether ROR is
346/// supported.
347bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
348 SDValue &Reg, SDValue &Shift) {
349 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
350 if (ShType == AArch64_AM::InvalidShiftExtend)
351 return false;
352 if (!AllowROR && ShType == AArch64_AM::ROR)
353 return false;
354
355 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
356 unsigned BitSize = N.getValueType().getSizeInBits();
357 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
358 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
359
360 Reg = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000361 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000362 return isWorthFolding(N);
363 }
364
365 return false;
366}
367
368/// getExtendTypeForNode - Translate an extend node to the corresponding
369/// ExtendType value.
370static AArch64_AM::ShiftExtendType
371getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
372 if (N.getOpcode() == ISD::SIGN_EXTEND ||
373 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
374 EVT SrcVT;
375 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
376 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
377 else
378 SrcVT = N.getOperand(0).getValueType();
379
380 if (!IsLoadStore && SrcVT == MVT::i8)
381 return AArch64_AM::SXTB;
382 else if (!IsLoadStore && SrcVT == MVT::i16)
383 return AArch64_AM::SXTH;
384 else if (SrcVT == MVT::i32)
385 return AArch64_AM::SXTW;
386 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
387
388 return AArch64_AM::InvalidShiftExtend;
389 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
390 N.getOpcode() == ISD::ANY_EXTEND) {
391 EVT SrcVT = N.getOperand(0).getValueType();
392 if (!IsLoadStore && SrcVT == MVT::i8)
393 return AArch64_AM::UXTB;
394 else if (!IsLoadStore && SrcVT == MVT::i16)
395 return AArch64_AM::UXTH;
396 else if (SrcVT == MVT::i32)
397 return AArch64_AM::UXTW;
398 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
399
400 return AArch64_AM::InvalidShiftExtend;
401 } else if (N.getOpcode() == ISD::AND) {
402 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
403 if (!CSD)
404 return AArch64_AM::InvalidShiftExtend;
405 uint64_t AndMask = CSD->getZExtValue();
406
407 switch (AndMask) {
408 default:
409 return AArch64_AM::InvalidShiftExtend;
410 case 0xFF:
411 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
412 case 0xFFFF:
413 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
414 case 0xFFFFFFFF:
415 return AArch64_AM::UXTW;
416 }
417 }
418
419 return AArch64_AM::InvalidShiftExtend;
420}
421
422// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
423static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
424 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
425 DL->getOpcode() != AArch64ISD::DUPLANE32)
426 return false;
427
428 SDValue SV = DL->getOperand(0);
429 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
430 return false;
431
432 SDValue EV = SV.getOperand(1);
433 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
434 return false;
435
436 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
437 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
438 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
439 LaneOp = EV.getOperand(0);
440
441 return true;
442}
443
Chad Rosier6c1f0932015-09-17 13:10:27 +0000444// Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
Tim Northover3b0846e2014-05-24 12:50:23 +0000445// high lane extract.
446static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
447 SDValue &LaneOp, int &LaneIdx) {
448
449 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
450 std::swap(Op0, Op1);
451 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
452 return false;
453 }
454 StdOp = Op1;
455 return true;
456}
457
458/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
459/// is a lane in the upper half of a 128-bit vector. Recognize and select this
460/// so that we don't emit unnecessary lane extracts.
461SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000462 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000463 SDValue Op0 = N->getOperand(0);
464 SDValue Op1 = N->getOperand(1);
465 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
466 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
467 int LaneIdx = -1; // Will hold the lane index.
468
469 if (Op1.getOpcode() != ISD::MUL ||
470 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
471 LaneIdx)) {
472 std::swap(Op0, Op1);
473 if (Op1.getOpcode() != ISD::MUL ||
474 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
475 LaneIdx))
476 return nullptr;
477 }
478
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000479 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000480
481 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
482
483 unsigned MLAOpc = ~0U;
484
485 switch (N->getSimpleValueType(0).SimpleTy) {
486 default:
487 llvm_unreachable("Unrecognized MLA.");
488 case MVT::v4i16:
489 MLAOpc = AArch64::MLAv4i16_indexed;
490 break;
491 case MVT::v8i16:
492 MLAOpc = AArch64::MLAv8i16_indexed;
493 break;
494 case MVT::v2i32:
495 MLAOpc = AArch64::MLAv2i32_indexed;
496 break;
497 case MVT::v4i32:
498 MLAOpc = AArch64::MLAv4i32_indexed;
499 break;
500 }
501
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000502 return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
Tim Northover3b0846e2014-05-24 12:50:23 +0000503}
504
505SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000506 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000507 SDValue SMULLOp0;
508 SDValue SMULLOp1;
509 int LaneIdx;
510
511 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
512 LaneIdx))
513 return nullptr;
514
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000515 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000516
517 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
518
519 unsigned SMULLOpc = ~0U;
520
521 if (IntNo == Intrinsic::aarch64_neon_smull) {
522 switch (N->getSimpleValueType(0).SimpleTy) {
523 default:
524 llvm_unreachable("Unrecognized SMULL.");
525 case MVT::v4i32:
526 SMULLOpc = AArch64::SMULLv4i16_indexed;
527 break;
528 case MVT::v2i64:
529 SMULLOpc = AArch64::SMULLv2i32_indexed;
530 break;
531 }
532 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
533 switch (N->getSimpleValueType(0).SimpleTy) {
534 default:
535 llvm_unreachable("Unrecognized SMULL.");
536 case MVT::v4i32:
537 SMULLOpc = AArch64::UMULLv4i16_indexed;
538 break;
539 case MVT::v2i64:
540 SMULLOpc = AArch64::UMULLv2i32_indexed;
541 break;
542 }
543 } else
544 llvm_unreachable("Unrecognized intrinsic.");
545
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000546 return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
Tim Northover3b0846e2014-05-24 12:50:23 +0000547}
548
549/// Instructions that accept extend modifiers like UXTW expect the register
550/// being extended to be a GPR32, but the incoming DAG might be acting on a
551/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
552/// this is the case.
553static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
554 if (N.getValueType() == MVT::i32)
555 return N;
556
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000557 SDLoc dl(N);
558 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000559 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000560 dl, MVT::i32, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000561 return SDValue(Node, 0);
562}
563
564
565/// SelectArithExtendedRegister - Select a "extended register" operand. This
566/// operand folds in an extend followed by an optional left shift.
567bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
568 SDValue &Shift) {
569 unsigned ShiftVal = 0;
570 AArch64_AM::ShiftExtendType Ext;
571
572 if (N.getOpcode() == ISD::SHL) {
573 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
574 if (!CSD)
575 return false;
576 ShiftVal = CSD->getZExtValue();
577 if (ShiftVal > 4)
578 return false;
579
580 Ext = getExtendTypeForNode(N.getOperand(0));
581 if (Ext == AArch64_AM::InvalidShiftExtend)
582 return false;
583
584 Reg = N.getOperand(0).getOperand(0);
585 } else {
586 Ext = getExtendTypeForNode(N);
587 if (Ext == AArch64_AM::InvalidShiftExtend)
588 return false;
589
590 Reg = N.getOperand(0);
591 }
592
593 // AArch64 mandates that the RHS of the operation must use the smallest
Chad Rosier6c1f0932015-09-17 13:10:27 +0000594 // register class that could contain the size being extended from. Thus,
Tim Northover3b0846e2014-05-24 12:50:23 +0000595 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
596 // there might not be an actual 32-bit value in the program. We can
597 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
598 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
599 Reg = narrowIfNeeded(CurDAG, Reg);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000600 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
601 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000602 return isWorthFolding(N);
603}
604
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000605/// If there's a use of this ADDlow that's not itself a load/store then we'll
606/// need to create a real ADD instruction from it anyway and there's no point in
607/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
608/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
Chad Rosier6c1f0932015-09-17 13:10:27 +0000609/// leads to duplicated ADRP instructions.
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000610static bool isWorthFoldingADDlow(SDValue N) {
611 for (auto Use : N->uses()) {
612 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
613 Use->getOpcode() != ISD::ATOMIC_LOAD &&
614 Use->getOpcode() != ISD::ATOMIC_STORE)
615 return false;
616
617 // ldar and stlr have much more restrictive addressing modes (just a
618 // register).
619 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
620 return false;
621 }
622
623 return true;
624}
625
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000626/// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit
627/// immediate" address. The "Size" argument is the size in bytes of the memory
628/// reference, which determines the scale.
629bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size,
630 SDValue &Base,
631 SDValue &OffImm) {
632 SDLoc dl(N);
Ahmed Bougacha05541452015-09-10 01:54:43 +0000633 const DataLayout &DL = CurDAG->getDataLayout();
634 const TargetLowering *TLI = getTargetLowering();
635 if (N.getOpcode() == ISD::FrameIndex) {
636 int FI = cast<FrameIndexSDNode>(N)->getIndex();
637 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
638 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
639 return true;
640 }
641
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000642 // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed
643 // selected here doesn't support labels/immediates, only base+offset.
644
645 if (CurDAG->isBaseWithConstantOffset(N)) {
646 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
647 int64_t RHSC = RHS->getSExtValue();
648 unsigned Scale = Log2_32(Size);
Steven Wue3b1f2b2015-09-10 16:32:28 +0000649 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(0x40 << Scale) &&
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000650 RHSC < (0x40 << Scale)) {
651 Base = N.getOperand(0);
Ahmed Bougacha05541452015-09-10 01:54:43 +0000652 if (Base.getOpcode() == ISD::FrameIndex) {
653 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
654 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
655 }
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000656 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
657 return true;
658 }
659 }
660 }
661
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000662 // Base only. The address will be materialized into a register before
663 // the memory is accessed.
664 // add x0, Xbase, #offset
665 // stp x1, x2, [x0]
666 Base = N;
667 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
668 return true;
669}
670
Tim Northover3b0846e2014-05-24 12:50:23 +0000671/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
672/// immediate" address. The "Size" argument is the size in bytes of the memory
673/// reference, which determines the scale.
674bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
675 SDValue &Base, SDValue &OffImm) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000676 SDLoc dl(N);
Mehdi Amini44ede332015-07-09 02:09:04 +0000677 const DataLayout &DL = CurDAG->getDataLayout();
Tim Northover3b0846e2014-05-24 12:50:23 +0000678 const TargetLowering *TLI = getTargetLowering();
679 if (N.getOpcode() == ISD::FrameIndex) {
680 int FI = cast<FrameIndexSDNode>(N)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000681 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000682 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000683 return true;
684 }
685
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000686 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000687 GlobalAddressSDNode *GAN =
688 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
689 Base = N.getOperand(0);
690 OffImm = N.getOperand(1);
691 if (!GAN)
692 return true;
693
694 const GlobalValue *GV = GAN->getGlobal();
695 unsigned Alignment = GV->getAlignment();
Chad Rosier304fe3f2014-06-30 15:03:00 +0000696 Type *Ty = GV->getType()->getElementType();
Tim Northover4a8ac262014-12-02 23:53:43 +0000697 if (Alignment == 0 && Ty->isSized())
Mehdi Amini44ede332015-07-09 02:09:04 +0000698 Alignment = DL.getABITypeAlignment(Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000699
700 if (Alignment >= Size)
701 return true;
702 }
703
704 if (CurDAG->isBaseWithConstantOffset(N)) {
705 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
706 int64_t RHSC = (int64_t)RHS->getZExtValue();
707 unsigned Scale = Log2_32(Size);
708 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
709 Base = N.getOperand(0);
710 if (Base.getOpcode() == ISD::FrameIndex) {
711 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000712 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Tim Northover3b0846e2014-05-24 12:50:23 +0000713 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000714 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000715 return true;
716 }
717 }
718 }
719
720 // Before falling back to our general case, check if the unscaled
721 // instructions can handle this. If so, that's preferable.
722 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
723 return false;
724
725 // Base only. The address will be materialized into a register before
726 // the memory is accessed.
727 // add x0, Xbase, #offset
728 // ldr x0, [x0]
729 Base = N;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000730 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000731 return true;
732}
733
734/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
735/// immediate" address. This should only match when there is an offset that
736/// is not valid for a scaled immediate addressing mode. The "Size" argument
737/// is the size in bytes of the memory reference, which is needed here to know
738/// what is valid for a scaled immediate.
739bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
740 SDValue &Base,
741 SDValue &OffImm) {
742 if (!CurDAG->isBaseWithConstantOffset(N))
743 return false;
744 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
745 int64_t RHSC = RHS->getSExtValue();
746 // If the offset is valid as a scaled immediate, don't match here.
747 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
748 RHSC < (0x1000 << Log2_32(Size)))
749 return false;
750 if (RHSC >= -256 && RHSC < 256) {
751 Base = N.getOperand(0);
752 if (Base.getOpcode() == ISD::FrameIndex) {
753 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
754 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +0000755 Base = CurDAG->getTargetFrameIndex(
756 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Tim Northover3b0846e2014-05-24 12:50:23 +0000757 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000758 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000759 return true;
760 }
761 }
762 return false;
763}
764
765static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000766 SDLoc dl(N);
767 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000768 SDValue ImpDef = SDValue(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000769 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000770 MachineSDNode *Node = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000771 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000772 return SDValue(Node, 0);
773}
774
775/// \brief Check if the given SHL node (\p N), can be used to form an
776/// extended register for an addressing mode.
777bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
778 bool WantExtend, SDValue &Offset,
779 SDValue &SignExtend) {
780 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
781 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
782 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
783 return false;
784
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000785 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000786 if (WantExtend) {
787 AArch64_AM::ShiftExtendType Ext =
788 getExtendTypeForNode(N.getOperand(0), true);
789 if (Ext == AArch64_AM::InvalidShiftExtend)
790 return false;
791
792 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000793 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
794 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000795 } else {
796 Offset = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000797 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000798 }
799
800 unsigned LegalShiftVal = Log2_32(Size);
801 unsigned ShiftVal = CSD->getZExtValue();
802
803 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
804 return false;
805
806 if (isWorthFolding(N))
807 return true;
808
809 return false;
810}
811
812bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
813 SDValue &Base, SDValue &Offset,
814 SDValue &SignExtend,
815 SDValue &DoShift) {
816 if (N.getOpcode() != ISD::ADD)
817 return false;
818 SDValue LHS = N.getOperand(0);
819 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000820 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000821
822 // We don't want to match immediate adds here, because they are better lowered
823 // to the register-immediate addressing modes.
824 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
825 return false;
826
827 // Check if this particular node is reused in any non-memory related
828 // operation. If yes, do not try to fold this node into the address
829 // computation, since the computation will be kept.
830 const SDNode *Node = N.getNode();
831 for (SDNode *UI : Node->uses()) {
832 if (!isa<MemSDNode>(*UI))
833 return false;
834 }
835
836 // Remember if it is worth folding N when it produces extended register.
837 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
838
839 // Try to match a shifted extend on the RHS.
840 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
841 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
842 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000843 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000844 return true;
845 }
846
847 // Try to match a shifted extend on the LHS.
848 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
849 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
850 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000851 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000852 return true;
853 }
854
855 // There was no shift, whatever else we find.
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000856 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000857
858 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
859 // Try to match an unshifted extend on the LHS.
860 if (IsExtendedRegisterWorthFolding &&
861 (Ext = getExtendTypeForNode(LHS, true)) !=
862 AArch64_AM::InvalidShiftExtend) {
863 Base = RHS;
864 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000865 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
866 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000867 if (isWorthFolding(LHS))
868 return true;
869 }
870
871 // Try to match an unshifted extend on the RHS.
872 if (IsExtendedRegisterWorthFolding &&
873 (Ext = getExtendTypeForNode(RHS, true)) !=
874 AArch64_AM::InvalidShiftExtend) {
875 Base = LHS;
876 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000877 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
878 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000879 if (isWorthFolding(RHS))
880 return true;
881 }
882
883 return false;
884}
885
Hao Liu3cb826c2014-10-14 06:50:36 +0000886// Check if the given immediate is preferred by ADD. If an immediate can be
887// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
888// encoded by one MOVZ, return true.
889static bool isPreferredADD(int64_t ImmOff) {
890 // Constant in [0x0, 0xfff] can be encoded in ADD.
891 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
892 return true;
893 // Check if it can be encoded in an "ADD LSL #12".
894 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
895 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
896 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
897 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
898 return false;
899}
900
Tim Northover3b0846e2014-05-24 12:50:23 +0000901bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
902 SDValue &Base, SDValue &Offset,
903 SDValue &SignExtend,
904 SDValue &DoShift) {
905 if (N.getOpcode() != ISD::ADD)
906 return false;
907 SDValue LHS = N.getOperand(0);
908 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000909 SDLoc DL(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000910
Tim Northover3b0846e2014-05-24 12:50:23 +0000911 // Check if this particular node is reused in any non-memory related
912 // operation. If yes, do not try to fold this node into the address
913 // computation, since the computation will be kept.
914 const SDNode *Node = N.getNode();
915 for (SDNode *UI : Node->uses()) {
916 if (!isa<MemSDNode>(*UI))
917 return false;
918 }
919
Hao Liu3cb826c2014-10-14 06:50:36 +0000920 // Watch out if RHS is a wide immediate, it can not be selected into
921 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
922 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
923 // instructions like:
924 // MOV X0, WideImmediate
925 // ADD X1, BaseReg, X0
926 // LDR X2, [X1, 0]
927 // For such situation, using [BaseReg, XReg] addressing mode can save one
928 // ADD/SUB:
929 // MOV X0, WideImmediate
930 // LDR X2, [BaseReg, X0]
931 if (isa<ConstantSDNode>(RHS)) {
Benjamin Kramer619c4e52015-04-10 11:24:51 +0000932 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
Hao Liu3cb826c2014-10-14 06:50:36 +0000933 unsigned Scale = Log2_32(Size);
Chad Rosier6c1f0932015-09-17 13:10:27 +0000934 // Skip the immediate can be selected by load/store addressing mode.
Hao Liu3cb826c2014-10-14 06:50:36 +0000935 // Also skip the immediate can be encoded by a single ADD (SUB is also
936 // checked by using -ImmOff).
937 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
938 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
939 return false;
940
Hao Liu3cb826c2014-10-14 06:50:36 +0000941 SDValue Ops[] = { RHS };
942 SDNode *MOVI =
943 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
944 SDValue MOVIV = SDValue(MOVI, 0);
945 // This ADD of two X register will be selected into [Reg+Reg] mode.
946 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
947 }
948
Tim Northover3b0846e2014-05-24 12:50:23 +0000949 // Remember if it is worth folding N when it produces extended register.
950 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
951
952 // Try to match a shifted extend on the RHS.
953 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
954 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
955 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000956 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000957 return true;
958 }
959
960 // Try to match a shifted extend on the LHS.
961 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
962 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
963 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000964 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000965 return true;
966 }
967
968 // Match any non-shifted, non-extend, non-immediate add expression.
969 Base = LHS;
970 Offset = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000971 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
972 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000973 // Reg1 + Reg2 is free: no check needed.
974 return true;
975}
976
977SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +0000978 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +0000979 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +0000980 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
981 AArch64::dsub2, AArch64::dsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +0000982
983 return createTuple(Regs, RegClassIDs, SubRegs);
984}
985
986SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +0000987 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +0000988 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +0000989 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
990 AArch64::qsub2, AArch64::qsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +0000991
992 return createTuple(Regs, RegClassIDs, SubRegs);
993}
994
995SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
Benjamin Kramerea68a942015-02-19 15:26:17 +0000996 const unsigned RegClassIDs[],
997 const unsigned SubRegs[]) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000998 // There's no special register-class for a vector-list of 1 element: it's just
999 // a vector.
1000 if (Regs.size() == 1)
1001 return Regs[0];
1002
1003 assert(Regs.size() >= 2 && Regs.size() <= 4);
1004
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001005 SDLoc DL(Regs[0]);
Tim Northover3b0846e2014-05-24 12:50:23 +00001006
1007 SmallVector<SDValue, 4> Ops;
1008
1009 // First operand of REG_SEQUENCE is the desired RegClass.
1010 Ops.push_back(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001011 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +00001012
1013 // Then we get pairs of source & subregister-position for the components.
1014 for (unsigned i = 0; i < Regs.size(); ++i) {
1015 Ops.push_back(Regs[i]);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001016 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +00001017 }
1018
1019 SDNode *N =
1020 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1021 return SDValue(N, 0);
1022}
1023
1024SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
1025 unsigned Opc, bool isExt) {
1026 SDLoc dl(N);
1027 EVT VT = N->getValueType(0);
1028
1029 unsigned ExtOff = isExt;
1030
1031 // Form a REG_SEQUENCE to force register allocation.
1032 unsigned Vec0Off = ExtOff + 1;
1033 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1034 N->op_begin() + Vec0Off + NumVecs);
1035 SDValue RegSeq = createQTuple(Regs);
1036
1037 SmallVector<SDValue, 6> Ops;
1038 if (isExt)
1039 Ops.push_back(N->getOperand(1));
1040 Ops.push_back(RegSeq);
1041 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
1042 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
1043}
1044
1045SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
1046 LoadSDNode *LD = cast<LoadSDNode>(N);
1047 if (LD->isUnindexed())
1048 return nullptr;
1049 EVT VT = LD->getMemoryVT();
1050 EVT DstVT = N->getValueType(0);
1051 ISD::MemIndexedMode AM = LD->getAddressingMode();
1052 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1053
1054 // We're not doing validity checking here. That was done when checking
1055 // if we should mark the load as indexed or not. We're just selecting
1056 // the right instruction.
1057 unsigned Opcode = 0;
1058
1059 ISD::LoadExtType ExtType = LD->getExtensionType();
1060 bool InsertTo64 = false;
1061 if (VT == MVT::i64)
1062 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1063 else if (VT == MVT::i32) {
1064 if (ExtType == ISD::NON_EXTLOAD)
1065 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1066 else if (ExtType == ISD::SEXTLOAD)
1067 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1068 else {
1069 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1070 InsertTo64 = true;
1071 // The result of the load is only i32. It's the subreg_to_reg that makes
1072 // it into an i64.
1073 DstVT = MVT::i32;
1074 }
1075 } else if (VT == MVT::i16) {
1076 if (ExtType == ISD::SEXTLOAD) {
1077 if (DstVT == MVT::i64)
1078 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1079 else
1080 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1081 } else {
1082 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1083 InsertTo64 = DstVT == MVT::i64;
1084 // The result of the load is only i32. It's the subreg_to_reg that makes
1085 // it into an i64.
1086 DstVT = MVT::i32;
1087 }
1088 } else if (VT == MVT::i8) {
1089 if (ExtType == ISD::SEXTLOAD) {
1090 if (DstVT == MVT::i64)
1091 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1092 else
1093 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1094 } else {
1095 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1096 InsertTo64 = DstVT == MVT::i64;
1097 // The result of the load is only i32. It's the subreg_to_reg that makes
1098 // it into an i64.
1099 DstVT = MVT::i32;
1100 }
Ahmed Bougachae0e12db2015-08-04 01:29:38 +00001101 } else if (VT == MVT::f16) {
1102 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +00001103 } else if (VT == MVT::f32) {
1104 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1105 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1106 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1107 } else if (VT.is128BitVector()) {
1108 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1109 } else
1110 return nullptr;
1111 SDValue Chain = LD->getChain();
1112 SDValue Base = LD->getBasePtr();
1113 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1114 int OffsetVal = (int)OffsetOp->getZExtValue();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001115 SDLoc dl(N);
1116 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +00001117 SDValue Ops[] = { Base, Offset, Chain };
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001118 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
Tim Northover3b0846e2014-05-24 12:50:23 +00001119 MVT::Other, Ops);
1120 // Either way, we're replacing the node, so tell the caller that.
1121 Done = true;
1122 SDValue LoadedVal = SDValue(Res, 1);
1123 if (InsertTo64) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001124 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001125 LoadedVal =
1126 SDValue(CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001127 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1128 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1129 SubReg),
Tim Northover3b0846e2014-05-24 12:50:23 +00001130 0);
1131 }
1132
1133 ReplaceUses(SDValue(N, 0), LoadedVal);
1134 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1135 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1136
1137 return nullptr;
1138}
1139
1140SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1141 unsigned Opc, unsigned SubRegIdx) {
1142 SDLoc dl(N);
1143 EVT VT = N->getValueType(0);
1144 SDValue Chain = N->getOperand(0);
1145
Benjamin Kramerea68a942015-02-19 15:26:17 +00001146 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1147 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001148
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001149 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001150
1151 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1152 SDValue SuperReg = SDValue(Ld, 0);
1153 for (unsigned i = 0; i < NumVecs; ++i)
1154 ReplaceUses(SDValue(N, i),
1155 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1156
1157 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1158 return nullptr;
1159}
1160
1161SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1162 unsigned Opc, unsigned SubRegIdx) {
1163 SDLoc dl(N);
1164 EVT VT = N->getValueType(0);
1165 SDValue Chain = N->getOperand(0);
1166
Benjamin Kramerea68a942015-02-19 15:26:17 +00001167 SDValue Ops[] = {N->getOperand(1), // Mem operand
1168 N->getOperand(2), // Incremental
1169 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001170
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001171 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1172 MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001173
1174 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1175
1176 // Update uses of write back register
1177 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1178
1179 // Update uses of vector list
1180 SDValue SuperReg = SDValue(Ld, 1);
1181 if (NumVecs == 1)
1182 ReplaceUses(SDValue(N, 0), SuperReg);
1183 else
1184 for (unsigned i = 0; i < NumVecs; ++i)
1185 ReplaceUses(SDValue(N, i),
1186 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1187
1188 // Update the chain
1189 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1190 return nullptr;
1191}
1192
1193SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1194 unsigned Opc) {
1195 SDLoc dl(N);
1196 EVT VT = N->getOperand(2)->getValueType(0);
1197
1198 // Form a REG_SEQUENCE to force register allocation.
1199 bool Is128Bit = VT.getSizeInBits() == 128;
1200 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1201 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1202
Benjamin Kramerea68a942015-02-19 15:26:17 +00001203 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001204 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1205
1206 return St;
1207}
1208
1209SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1210 unsigned Opc) {
1211 SDLoc dl(N);
1212 EVT VT = N->getOperand(2)->getValueType(0);
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001213 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1214 MVT::Other}; // Type for the Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001215
1216 // Form a REG_SEQUENCE to force register allocation.
1217 bool Is128Bit = VT.getSizeInBits() == 128;
1218 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1219 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1220
Benjamin Kramerea68a942015-02-19 15:26:17 +00001221 SDValue Ops[] = {RegSeq,
1222 N->getOperand(NumVecs + 1), // base register
1223 N->getOperand(NumVecs + 2), // Incremental
1224 N->getOperand(0)}; // Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001225 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1226
1227 return St;
1228}
1229
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001230namespace {
Tim Northover3b0846e2014-05-24 12:50:23 +00001231/// WidenVector - Given a value in the V64 register class, produce the
1232/// equivalent value in the V128 register class.
1233class WidenVector {
1234 SelectionDAG &DAG;
1235
1236public:
1237 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1238
1239 SDValue operator()(SDValue V64Reg) {
1240 EVT VT = V64Reg.getValueType();
1241 unsigned NarrowSize = VT.getVectorNumElements();
1242 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1243 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1244 SDLoc DL(V64Reg);
1245
1246 SDValue Undef =
1247 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1248 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1249 }
1250};
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001251} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +00001252
1253/// NarrowVector - Given a value in the V128 register class, produce the
1254/// equivalent value in the V64 register class.
1255static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1256 EVT VT = V128Reg.getValueType();
1257 unsigned WideSize = VT.getVectorNumElements();
1258 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1259 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1260
1261 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1262 V128Reg);
1263}
1264
1265SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1266 unsigned Opc) {
1267 SDLoc dl(N);
1268 EVT VT = N->getValueType(0);
1269 bool Narrow = VT.getSizeInBits() == 64;
1270
1271 // Form a REG_SEQUENCE to force register allocation.
1272 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1273
1274 if (Narrow)
1275 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1276 WidenVector(*CurDAG));
1277
1278 SDValue RegSeq = createQTuple(Regs);
1279
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001280 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001281
1282 unsigned LaneNo =
1283 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1284
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001285 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001286 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001287 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1288 SDValue SuperReg = SDValue(Ld, 0);
1289
1290 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1291 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1292 AArch64::qsub3 };
1293 for (unsigned i = 0; i < NumVecs; ++i) {
1294 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1295 if (Narrow)
1296 NV = NarrowVector(NV, *CurDAG);
1297 ReplaceUses(SDValue(N, i), NV);
1298 }
1299
1300 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1301
1302 return Ld;
1303}
1304
1305SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1306 unsigned Opc) {
1307 SDLoc dl(N);
1308 EVT VT = N->getValueType(0);
1309 bool Narrow = VT.getSizeInBits() == 64;
1310
1311 // Form a REG_SEQUENCE to force register allocation.
1312 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1313
1314 if (Narrow)
1315 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1316 WidenVector(*CurDAG));
1317
1318 SDValue RegSeq = createQTuple(Regs);
1319
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001320 const EVT ResTys[] = {MVT::i64, // Type of the write back register
Ahmed Bougachae14a4d42015-04-17 23:43:33 +00001321 RegSeq->getValueType(0), MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001322
1323 unsigned LaneNo =
1324 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1325
Benjamin Kramerea68a942015-02-19 15:26:17 +00001326 SDValue Ops[] = {RegSeq,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001327 CurDAG->getTargetConstant(LaneNo, dl,
1328 MVT::i64), // Lane Number
Benjamin Kramerea68a942015-02-19 15:26:17 +00001329 N->getOperand(NumVecs + 2), // Base register
1330 N->getOperand(NumVecs + 3), // Incremental
1331 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001332 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1333
1334 // Update uses of the write back register
1335 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1336
1337 // Update uses of the vector list
1338 SDValue SuperReg = SDValue(Ld, 1);
1339 if (NumVecs == 1) {
1340 ReplaceUses(SDValue(N, 0),
1341 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1342 } else {
1343 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1344 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1345 AArch64::qsub3 };
1346 for (unsigned i = 0; i < NumVecs; ++i) {
1347 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1348 SuperReg);
1349 if (Narrow)
1350 NV = NarrowVector(NV, *CurDAG);
1351 ReplaceUses(SDValue(N, i), NV);
1352 }
1353 }
1354
1355 // Update the Chain
1356 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1357
1358 return Ld;
1359}
1360
1361SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1362 unsigned Opc) {
1363 SDLoc dl(N);
1364 EVT VT = N->getOperand(2)->getValueType(0);
1365 bool Narrow = VT.getSizeInBits() == 64;
1366
1367 // Form a REG_SEQUENCE to force register allocation.
1368 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1369
1370 if (Narrow)
1371 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1372 WidenVector(*CurDAG));
1373
1374 SDValue RegSeq = createQTuple(Regs);
1375
1376 unsigned LaneNo =
1377 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1378
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001379 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001380 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001381 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1382
1383 // Transfer memoperands.
1384 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1385 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1386 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1387
1388 return St;
1389}
1390
1391SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1392 unsigned Opc) {
1393 SDLoc dl(N);
1394 EVT VT = N->getOperand(2)->getValueType(0);
1395 bool Narrow = VT.getSizeInBits() == 64;
1396
1397 // Form a REG_SEQUENCE to force register allocation.
1398 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1399
1400 if (Narrow)
1401 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1402 WidenVector(*CurDAG));
1403
1404 SDValue RegSeq = createQTuple(Regs);
1405
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001406 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1407 MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001408
1409 unsigned LaneNo =
1410 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1411
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001412 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001413 N->getOperand(NumVecs + 2), // Base Register
1414 N->getOperand(NumVecs + 3), // Incremental
1415 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001416 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1417
1418 // Transfer memoperands.
1419 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1420 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1421 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1422
1423 return St;
1424}
1425
1426static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1427 unsigned &Opc, SDValue &Opd0,
1428 unsigned &LSB, unsigned &MSB,
1429 unsigned NumberOfIgnoredLowBits,
1430 bool BiggerPattern) {
1431 assert(N->getOpcode() == ISD::AND &&
1432 "N must be a AND operation to call this function");
1433
1434 EVT VT = N->getValueType(0);
1435
1436 // Here we can test the type of VT and return false when the type does not
1437 // match, but since it is done prior to that call in the current context
1438 // we turned that into an assert to avoid redundant code.
1439 assert((VT == MVT::i32 || VT == MVT::i64) &&
1440 "Type checking must have been done before calling this function");
1441
1442 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1443 // changed the AND node to a 32-bit mask operation. We'll have to
1444 // undo that as part of the transform here if we want to catch all
1445 // the opportunities.
1446 // Currently the NumberOfIgnoredLowBits argument helps to recover
1447 // form these situations when matching bigger pattern (bitfield insert).
1448
1449 // For unsigned extracts, check for a shift right and mask
1450 uint64_t And_imm = 0;
1451 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1452 return false;
1453
1454 const SDNode *Op0 = N->getOperand(0).getNode();
1455
1456 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1457 // simplified. Try to undo that
1458 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1459
1460 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1461 if (And_imm & (And_imm + 1))
1462 return false;
1463
1464 bool ClampMSB = false;
1465 uint64_t Srl_imm = 0;
1466 // Handle the SRL + ANY_EXTEND case.
1467 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1468 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1469 // Extend the incoming operand of the SRL to 64-bit.
1470 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1471 // Make sure to clamp the MSB so that we preserve the semantics of the
1472 // original operations.
1473 ClampMSB = true;
1474 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1475 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1476 Srl_imm)) {
1477 // If the shift result was truncated, we can still combine them.
1478 Opd0 = Op0->getOperand(0).getOperand(0);
1479
1480 // Use the type of SRL node.
1481 VT = Opd0->getValueType(0);
1482 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1483 Opd0 = Op0->getOperand(0);
1484 } else if (BiggerPattern) {
1485 // Let's pretend a 0 shift right has been performed.
1486 // The resulting code will be at least as good as the original one
1487 // plus it may expose more opportunities for bitfield insert pattern.
1488 // FIXME: Currently we limit this to the bigger pattern, because
Chad Rosier6c1f0932015-09-17 13:10:27 +00001489 // some optimizations expect AND and not UBFM.
Tim Northover3b0846e2014-05-24 12:50:23 +00001490 Opd0 = N->getOperand(0);
1491 } else
1492 return false;
1493
Matthias Braun75260352015-02-24 18:52:04 +00001494 // Bail out on large immediates. This happens when no proper
1495 // combining/constant folding was performed.
Matthias Braun02892ec2015-02-25 18:03:50 +00001496 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1497 DEBUG((dbgs() << N
1498 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001499 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001500 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001501
1502 LSB = Srl_imm;
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001503 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1504 : countTrailingOnes<uint64_t>(And_imm)) -
Tim Northover3b0846e2014-05-24 12:50:23 +00001505 1;
1506 if (ClampMSB)
1507 // Since we're moving the extend before the right shift operation, we need
1508 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1509 // the zeros which would get shifted in with the original right shift
1510 // operation.
1511 MSB = MSB > 31 ? 31 : MSB;
1512
1513 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1514 return true;
1515}
1516
David Xu052b9d92014-09-02 09:33:56 +00001517static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1518 SDValue &Opd0, unsigned &LSB,
1519 unsigned &MSB) {
1520 // We are looking for the following pattern which basically extracts several
1521 // continuous bits from the source value and places it from the LSB of the
1522 // destination value, all other bits of the destination value or set to zero:
Tim Northover3b0846e2014-05-24 12:50:23 +00001523 //
1524 // Value2 = AND Value, MaskImm
1525 // SRL Value2, ShiftImm
1526 //
David Xu052b9d92014-09-02 09:33:56 +00001527 // with MaskImm >> ShiftImm to search for the bit width.
Tim Northover3b0846e2014-05-24 12:50:23 +00001528 //
1529 // This gets selected into a single UBFM:
1530 //
David Xu052b9d92014-09-02 09:33:56 +00001531 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
Tim Northover3b0846e2014-05-24 12:50:23 +00001532 //
1533
1534 if (N->getOpcode() != ISD::SRL)
1535 return false;
1536
1537 uint64_t And_mask = 0;
1538 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1539 return false;
1540
1541 Opd0 = N->getOperand(0).getOperand(0);
1542
1543 uint64_t Srl_imm = 0;
1544 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1545 return false;
1546
David Xu052b9d92014-09-02 09:33:56 +00001547 // Check whether we really have several bits extract here.
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001548 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
David Xu052b9d92014-09-02 09:33:56 +00001549 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001550 if (N->getValueType(0) == MVT::i32)
1551 Opc = AArch64::UBFMWri;
1552 else
1553 Opc = AArch64::UBFMXri;
1554
David Xu052b9d92014-09-02 09:33:56 +00001555 LSB = Srl_imm;
1556 MSB = BitWide + Srl_imm - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001557 return true;
1558 }
1559
1560 return false;
1561}
1562
1563static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001564 unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001565 bool BiggerPattern) {
1566 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1567 "N must be a SHR/SRA operation to call this function");
1568
1569 EVT VT = N->getValueType(0);
1570
1571 // Here we can test the type of VT and return false when the type does not
1572 // match, but since it is done prior to that call in the current context
1573 // we turned that into an assert to avoid redundant code.
1574 assert((VT == MVT::i32 || VT == MVT::i64) &&
1575 "Type checking must have been done before calling this function");
1576
David Xu052b9d92014-09-02 09:33:56 +00001577 // Check for AND + SRL doing several bits extract.
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001578 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
Tim Northover3b0846e2014-05-24 12:50:23 +00001579 return true;
1580
1581 // we're looking for a shift of a shift
1582 uint64_t Shl_imm = 0;
1583 uint64_t Trunc_bits = 0;
1584 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1585 Opd0 = N->getOperand(0).getOperand(0);
1586 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1587 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1588 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1589 // be considered as setting high 32 bits as zero. Our strategy here is to
1590 // always generate 64bit UBFM. This consistency will help the CSE pass
1591 // later find more redundancy.
1592 Opd0 = N->getOperand(0).getOperand(0);
1593 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1594 VT = Opd0->getValueType(0);
1595 assert(VT == MVT::i64 && "the promoted type should be i64");
1596 } else if (BiggerPattern) {
1597 // Let's pretend a 0 shift left has been performed.
1598 // FIXME: Currently we limit this to the bigger pattern case,
1599 // because some optimizations expect AND and not UBFM
1600 Opd0 = N->getOperand(0);
1601 } else
1602 return false;
1603
Matthias Braun75260352015-02-24 18:52:04 +00001604 // Missing combines/constant folding may have left us with strange
1605 // constants.
Matthias Braun02892ec2015-02-25 18:03:50 +00001606 if (Shl_imm >= VT.getSizeInBits()) {
1607 DEBUG((dbgs() << N
1608 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001609 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001610 }
Matthias Braun75260352015-02-24 18:52:04 +00001611
Tim Northover3b0846e2014-05-24 12:50:23 +00001612 uint64_t Srl_imm = 0;
1613 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1614 return false;
1615
1616 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1617 "bad amount in shift node!");
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001618 int immr = Srl_imm - Shl_imm;
1619 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
1620 Imms = VT.getSizeInBits() - Shl_imm - Trunc_bits - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001621 // SRA requires a signed extraction
1622 if (VT == MVT::i32)
1623 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1624 else
1625 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1626 return true;
1627}
1628
1629static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001630 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001631 unsigned NumberOfIgnoredLowBits = 0,
1632 bool BiggerPattern = false) {
1633 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1634 return false;
1635
1636 switch (N->getOpcode()) {
1637 default:
1638 if (!N->isMachineOpcode())
1639 return false;
1640 break;
1641 case ISD::AND:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001642 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001643 NumberOfIgnoredLowBits, BiggerPattern);
1644 case ISD::SRL:
1645 case ISD::SRA:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001646 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
Tim Northover3b0846e2014-05-24 12:50:23 +00001647 }
1648
1649 unsigned NOpc = N->getMachineOpcode();
1650 switch (NOpc) {
1651 default:
1652 return false;
1653 case AArch64::SBFMWri:
1654 case AArch64::UBFMWri:
1655 case AArch64::SBFMXri:
1656 case AArch64::UBFMXri:
1657 Opc = NOpc;
1658 Opd0 = N->getOperand(0);
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001659 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1660 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
Tim Northover3b0846e2014-05-24 12:50:23 +00001661 return true;
1662 }
1663 // Unreachable
1664 return false;
1665}
1666
1667SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001668 unsigned Opc, Immr, Imms;
Tim Northover3b0846e2014-05-24 12:50:23 +00001669 SDValue Opd0;
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001670 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
Tim Northover3b0846e2014-05-24 12:50:23 +00001671 return nullptr;
1672
1673 EVT VT = N->getValueType(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001674 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001675
1676 // If the bit extract operation is 64bit but the original type is 32bit, we
1677 // need to add one EXTRACT_SUBREG.
1678 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001679 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
1680 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001681
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001682 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1683 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001684 MachineSDNode *Node =
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001685 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
Tim Northover3b0846e2014-05-24 12:50:23 +00001686 SDValue(BFM, 0), SubReg);
1687 return Node;
1688 }
1689
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001690 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1691 CurDAG->getTargetConstant(Imms, dl, VT)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001692 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1693}
1694
1695/// Does DstMask form a complementary pair with the mask provided by
1696/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1697/// this asks whether DstMask zeroes precisely those bits that will be set by
1698/// the other half.
1699static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1700 unsigned NumberOfIgnoredHighBits, EVT VT) {
1701 assert((VT == MVT::i32 || VT == MVT::i64) &&
1702 "i32 or i64 mask type expected!");
1703 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1704
1705 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1706 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1707
1708 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1709 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1710}
1711
1712// Look for bits that will be useful for later uses.
1713// A bit is consider useless as soon as it is dropped and never used
1714// before it as been dropped.
1715// E.g., looking for useful bit of x
1716// 1. y = x & 0x7
1717// 2. z = y >> 2
1718// After #1, x useful bits are 0x7, then the useful bits of x, live through
1719// y.
1720// After #2, the useful bits of x are 0x4.
1721// However, if x is used on an unpredicatable instruction, then all its bits
1722// are useful.
1723// E.g.
1724// 1. y = x & 0x7
1725// 2. z = y >> 2
1726// 3. str x, [@x]
1727static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1728
1729static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1730 unsigned Depth) {
1731 uint64_t Imm =
1732 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1733 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1734 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1735 getUsefulBits(Op, UsefulBits, Depth + 1);
1736}
1737
1738static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1739 uint64_t Imm, uint64_t MSB,
1740 unsigned Depth) {
1741 // inherit the bitwidth value
1742 APInt OpUsefulBits(UsefulBits);
1743 OpUsefulBits = 1;
1744
1745 if (MSB >= Imm) {
1746 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1747 --OpUsefulBits;
1748 // The interesting part will be in the lower part of the result
1749 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1750 // The interesting part was starting at Imm in the argument
1751 OpUsefulBits = OpUsefulBits.shl(Imm);
1752 } else {
1753 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1754 --OpUsefulBits;
1755 // The interesting part will be shifted in the result
1756 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1757 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1758 // The interesting part was at zero in the argument
1759 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1760 }
1761
1762 UsefulBits &= OpUsefulBits;
1763}
1764
1765static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1766 unsigned Depth) {
1767 uint64_t Imm =
1768 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1769 uint64_t MSB =
1770 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1771
1772 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1773}
1774
1775static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1776 unsigned Depth) {
1777 uint64_t ShiftTypeAndValue =
1778 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1779 APInt Mask(UsefulBits);
1780 Mask.clearAllBits();
1781 Mask.flipAllBits();
1782
1783 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1784 // Shift Left
1785 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1786 Mask = Mask.shl(ShiftAmt);
1787 getUsefulBits(Op, Mask, Depth + 1);
1788 Mask = Mask.lshr(ShiftAmt);
1789 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1790 // Shift Right
1791 // We do not handle AArch64_AM::ASR, because the sign will change the
1792 // number of useful bits
1793 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1794 Mask = Mask.lshr(ShiftAmt);
1795 getUsefulBits(Op, Mask, Depth + 1);
1796 Mask = Mask.shl(ShiftAmt);
1797 } else
1798 return;
1799
1800 UsefulBits &= Mask;
1801}
1802
1803static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1804 unsigned Depth) {
1805 uint64_t Imm =
1806 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1807 uint64_t MSB =
1808 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1809
1810 if (Op.getOperand(1) == Orig)
1811 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1812
1813 APInt OpUsefulBits(UsefulBits);
1814 OpUsefulBits = 1;
1815
1816 if (MSB >= Imm) {
1817 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1818 --OpUsefulBits;
1819 UsefulBits &= ~OpUsefulBits;
1820 getUsefulBits(Op, UsefulBits, Depth + 1);
1821 } else {
1822 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1823 --OpUsefulBits;
1824 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1825 getUsefulBits(Op, UsefulBits, Depth + 1);
1826 }
1827}
1828
1829static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1830 SDValue Orig, unsigned Depth) {
1831
1832 // Users of this node should have already been instruction selected
1833 // FIXME: Can we turn that into an assert?
1834 if (!UserNode->isMachineOpcode())
1835 return;
1836
1837 switch (UserNode->getMachineOpcode()) {
1838 default:
1839 return;
1840 case AArch64::ANDSWri:
1841 case AArch64::ANDSXri:
1842 case AArch64::ANDWri:
1843 case AArch64::ANDXri:
1844 // We increment Depth only when we call the getUsefulBits
1845 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1846 Depth);
1847 case AArch64::UBFMWri:
1848 case AArch64::UBFMXri:
1849 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1850
1851 case AArch64::ORRWrs:
1852 case AArch64::ORRXrs:
1853 if (UserNode->getOperand(1) != Orig)
1854 return;
1855 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1856 Depth);
1857 case AArch64::BFMWri:
1858 case AArch64::BFMXri:
1859 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1860 }
1861}
1862
1863static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1864 if (Depth >= 6)
1865 return;
1866 // Initialize UsefulBits
1867 if (!Depth) {
1868 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1869 // At the beginning, assume every produced bits is useful
1870 UsefulBits = APInt(Bitwidth, 0);
1871 UsefulBits.flipAllBits();
1872 }
1873 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1874
1875 for (SDNode *Node : Op.getNode()->uses()) {
1876 // A use cannot produce useful bits
1877 APInt UsefulBitsForUse = APInt(UsefulBits);
1878 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1879 UsersUsefulBits |= UsefulBitsForUse;
1880 }
1881 // UsefulBits contains the produced bits that are meaningful for the
1882 // current definition, thus a user cannot make a bit meaningful at
1883 // this point
1884 UsefulBits &= UsersUsefulBits;
1885}
1886
1887/// Create a machine node performing a notional SHL of Op by ShlAmount. If
1888/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1889/// 0, return Op unchanged.
1890static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1891 if (ShlAmount == 0)
1892 return Op;
1893
1894 EVT VT = Op.getValueType();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001895 SDLoc dl(Op);
Tim Northover3b0846e2014-05-24 12:50:23 +00001896 unsigned BitWidth = VT.getSizeInBits();
1897 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1898
1899 SDNode *ShiftNode;
1900 if (ShlAmount > 0) {
1901 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1902 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001903 UBFMOpc, dl, VT, Op,
1904 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
1905 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00001906 } else {
1907 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1908 assert(ShlAmount < 0 && "expected right shift");
1909 int ShrAmount = -ShlAmount;
1910 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001911 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
1912 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00001913 }
1914
1915 return SDValue(ShiftNode, 0);
1916}
1917
1918/// Does this tree qualify as an attempt to move a bitfield into position,
1919/// essentially "(and (shl VAL, N), Mask)".
1920static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1921 SDValue &Src, int &ShiftAmount,
1922 int &MaskWidth) {
1923 EVT VT = Op.getValueType();
1924 unsigned BitWidth = VT.getSizeInBits();
1925 (void)BitWidth;
1926 assert(BitWidth == 32 || BitWidth == 64);
1927
1928 APInt KnownZero, KnownOne;
1929 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1930
1931 // Non-zero in the sense that they're not provably zero, which is the key
1932 // point if we want to use this value
1933 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1934
1935 // Discard a constant AND mask if present. It's safe because the node will
1936 // already have been factored into the computeKnownBits calculation above.
1937 uint64_t AndImm;
1938 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1939 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1940 Op = Op.getOperand(0);
1941 }
1942
1943 uint64_t ShlImm;
1944 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1945 return false;
1946 Op = Op.getOperand(0);
1947
1948 if (!isShiftedMask_64(NonZeroBits))
1949 return false;
1950
1951 ShiftAmount = countTrailingZeros(NonZeroBits);
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00001952 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
Tim Northover3b0846e2014-05-24 12:50:23 +00001953
1954 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1955 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1956 // amount.
1957 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1958
1959 return true;
1960}
1961
1962// Given a OR operation, check if we have the following pattern
1963// ubfm c, b, imm, imm2 (or something that does the same jobs, see
1964// isBitfieldExtractOp)
1965// d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1966// countTrailingZeros(mask2) == imm2 - imm + 1
1967// f = d | c
1968// if yes, given reference arguments will be update so that one can replace
1969// the OR instruction with:
1970// f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1971static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1972 SDValue &Src, unsigned &ImmR,
1973 unsigned &ImmS, SelectionDAG *CurDAG) {
1974 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1975
1976 // Set Opc
1977 EVT VT = N->getValueType(0);
1978 if (VT == MVT::i32)
1979 Opc = AArch64::BFMWri;
1980 else if (VT == MVT::i64)
1981 Opc = AArch64::BFMXri;
1982 else
1983 return false;
1984
1985 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1986 // have the expected shape. Try to undo that.
1987 APInt UsefulBits;
1988 getUsefulBits(SDValue(N, 0), UsefulBits);
1989
1990 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1991 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1992
1993 // OR is commutative, check both possibilities (does llvm provide a
1994 // way to do that directely, e.g., via code matcher?)
1995 SDValue OrOpd1Val = N->getOperand(1);
1996 SDNode *OrOpd0 = N->getOperand(0).getNode();
1997 SDNode *OrOpd1 = N->getOperand(1).getNode();
1998 for (int i = 0; i < 2;
1999 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
2000 unsigned BFXOpc;
2001 int DstLSB, Width;
2002 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
2003 NumberOfIgnoredLowBits, true)) {
2004 // Check that the returned opcode is compatible with the pattern,
2005 // i.e., same type and zero extended (U and not S)
2006 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
2007 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
2008 continue;
2009
2010 // Compute the width of the bitfield insertion
2011 DstLSB = 0;
2012 Width = ImmS - ImmR + 1;
2013 // FIXME: This constraint is to catch bitfield insertion we may
2014 // want to widen the pattern if we want to grab general bitfied
2015 // move case
2016 if (Width <= 0)
2017 continue;
2018
2019 // If the mask on the insertee is correct, we have a BFXIL operation. We
2020 // can share the ImmR and ImmS values from the already-computed UBFM.
2021 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
2022 DstLSB, Width)) {
2023 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2024 ImmS = Width - 1;
2025 } else
2026 continue;
2027
2028 // Check the second part of the pattern
2029 EVT VT = OrOpd1->getValueType(0);
2030 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2031
2032 // Compute the Known Zero for the candidate of the first operand.
2033 // This allows to catch more general case than just looking for
2034 // AND with imm. Indeed, simplify-demanded-bits may have removed
2035 // the AND instruction because it proves it was useless.
2036 APInt KnownZero, KnownOne;
2037 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
2038
2039 // Check if there is enough room for the second operand to appear
2040 // in the first one
2041 APInt BitsToBeInserted =
2042 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
2043
2044 if ((BitsToBeInserted & ~KnownZero) != 0)
2045 continue;
2046
2047 // Set the first operand
2048 uint64_t Imm;
2049 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2050 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2051 // In that case, we can eliminate the AND
2052 Dst = OrOpd1->getOperand(0);
2053 else
2054 // Maybe the AND has been removed by simplify-demanded-bits
2055 // or is useful because it discards more bits
2056 Dst = OrOpd1Val;
2057
2058 // both parts match
2059 return true;
2060 }
2061
2062 return false;
2063}
2064
2065SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
2066 if (N->getOpcode() != ISD::OR)
2067 return nullptr;
2068
2069 unsigned Opc;
2070 unsigned LSB, MSB;
2071 SDValue Opd0, Opd1;
2072
2073 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
2074 return nullptr;
2075
2076 EVT VT = N->getValueType(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002077 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00002078 SDValue Ops[] = { Opd0,
2079 Opd1,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002080 CurDAG->getTargetConstant(LSB, dl, VT),
2081 CurDAG->getTargetConstant(MSB, dl, VT) };
Tim Northover3b0846e2014-05-24 12:50:23 +00002082 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2083}
2084
Geoff Berryc573bf7a2015-07-28 15:24:10 +00002085/// GenerateInexactFlagIfNeeded - Insert FRINTX instruction to generate inexact
2086/// signal on round-to-integer operations if needed. C11 leaves it
2087/// implementation-defined whether these operations trigger an inexact
2088/// exception. IEEE says they don't. Unfortunately, Darwin decided they do so
2089/// we sometimes have to insert a special instruction just to set the right bit
2090/// in FPSR.
2091SDNode *AArch64DAGToDAGISel::GenerateInexactFlagIfNeeded(const SDValue &In,
2092 unsigned InTyVariant,
2093 SDLoc DL) {
2094 if (Subtarget->isTargetDarwin() && !TM.Options.UnsafeFPMath) {
2095 // Pick the right FRINTX using InTyVariant needed to set the flags.
2096 // InTyVariant is 0 for 32-bit and 1 for 64-bit.
2097 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
2098 return CurDAG->getMachineNode(FRINTXOpcs[InTyVariant], DL,
2099 In.getValueType(), MVT::Glue, In);
2100 }
2101 return nullptr;
2102}
2103
Tim Northover3b0846e2014-05-24 12:50:23 +00002104SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
2105 EVT VT = N->getValueType(0);
2106 unsigned Variant;
2107 unsigned Opc;
Tim Northover3b0846e2014-05-24 12:50:23 +00002108
2109 if (VT == MVT::f32) {
2110 Variant = 0;
2111 } else if (VT == MVT::f64) {
2112 Variant = 1;
2113 } else
2114 return nullptr; // Unrecognized argument type. Fall back on default codegen.
2115
Tim Northover3b0846e2014-05-24 12:50:23 +00002116 switch (N->getOpcode()) {
2117 default:
2118 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
2119 case ISD::FCEIL: {
2120 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2121 Opc = FRINTPOpcs[Variant];
2122 break;
2123 }
2124 case ISD::FFLOOR: {
2125 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2126 Opc = FRINTMOpcs[Variant];
2127 break;
2128 }
2129 case ISD::FTRUNC: {
2130 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2131 Opc = FRINTZOpcs[Variant];
2132 break;
2133 }
2134 case ISD::FROUND: {
2135 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2136 Opc = FRINTAOpcs[Variant];
2137 break;
2138 }
2139 }
2140
2141 SDLoc dl(N);
2142 SDValue In = N->getOperand(0);
2143 SmallVector<SDValue, 2> Ops;
2144 Ops.push_back(In);
2145
Geoff Berryc573bf7a2015-07-28 15:24:10 +00002146 if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, Variant, dl))
2147 Ops.push_back(SDValue(FRINTXNode, 1));
Tim Northover3b0846e2014-05-24 12:50:23 +00002148
2149 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2150}
2151
Geoff Berryc573bf7a2015-07-28 15:24:10 +00002152/// SelectFPConvertWithRound - Try to combine FP rounding and
2153/// FP-INT conversion.
2154SDNode *AArch64DAGToDAGISel::SelectFPConvertWithRound(SDNode *N) {
2155 SDNode *Op0 = N->getOperand(0).getNode();
2156
2157 // Return if the round op is used by other nodes, as this would result in two
2158 // FRINTX, one each for round and convert.
2159 if (!Op0->hasOneUse())
2160 return nullptr;
2161
2162 unsigned InTyVariant;
2163 EVT InTy = Op0->getValueType(0);
2164 if (InTy == MVT::f32)
2165 InTyVariant = 0;
2166 else if (InTy == MVT::f64)
2167 InTyVariant = 1;
2168 else
2169 return nullptr;
2170
2171 unsigned OutTyVariant;
2172 EVT OutTy = N->getValueType(0);
2173 if (OutTy == MVT::i32)
2174 OutTyVariant = 0;
2175 else if (OutTy == MVT::i64)
2176 OutTyVariant = 1;
2177 else
2178 return nullptr;
2179
2180 assert((N->getOpcode() == ISD::FP_TO_SINT
2181 || N->getOpcode() == ISD::FP_TO_UINT) && "Unexpected opcode!");
2182 unsigned FpConVariant = N->getOpcode() == ISD::FP_TO_SINT ? 0 : 1;
2183
2184 unsigned Opc;
2185 switch (Op0->getOpcode()) {
2186 default:
2187 return nullptr;
2188 case ISD::FCEIL: {
2189 unsigned FCVTPOpcs[2][2][2] = {
2190 { { AArch64::FCVTPSUWSr, AArch64::FCVTPSUXSr },
2191 { AArch64::FCVTPSUWDr, AArch64::FCVTPSUXDr } },
2192 { { AArch64::FCVTPUUWSr, AArch64::FCVTPUUXSr },
2193 { AArch64::FCVTPUUWDr, AArch64::FCVTPUUXDr } } };
2194 Opc = FCVTPOpcs[FpConVariant][InTyVariant][OutTyVariant];
2195 break;
2196 }
2197 case ISD::FFLOOR: {
2198 unsigned FCVTMOpcs[2][2][2] = {
2199 { { AArch64::FCVTMSUWSr, AArch64::FCVTMSUXSr },
2200 { AArch64::FCVTMSUWDr, AArch64::FCVTMSUXDr } },
2201 { { AArch64::FCVTMUUWSr, AArch64::FCVTMUUXSr },
2202 { AArch64::FCVTMUUWDr, AArch64::FCVTMUUXDr } } };
2203 Opc = FCVTMOpcs[FpConVariant][InTyVariant][OutTyVariant];
2204 break;
2205 }
2206 case ISD::FTRUNC: {
2207 unsigned FCVTZOpcs[2][2][2] = {
2208 { { AArch64::FCVTZSUWSr, AArch64::FCVTZSUXSr },
2209 { AArch64::FCVTZSUWDr, AArch64::FCVTZSUXDr } },
2210 { { AArch64::FCVTZUUWSr, AArch64::FCVTZUUXSr },
2211 { AArch64::FCVTZUUWDr, AArch64::FCVTZUUXDr } } };
2212 Opc = FCVTZOpcs[FpConVariant][InTyVariant][OutTyVariant];
2213 break;
2214 }
2215 case ISD::FROUND: {
2216 unsigned FCVTAOpcs[2][2][2] = {
2217 { { AArch64::FCVTASUWSr, AArch64::FCVTASUXSr },
2218 { AArch64::FCVTASUWDr, AArch64::FCVTASUXDr } },
2219 { { AArch64::FCVTAUUWSr, AArch64::FCVTAUUXSr },
2220 { AArch64::FCVTAUUWDr, AArch64::FCVTAUUXDr } } };
2221 Opc = FCVTAOpcs[FpConVariant][InTyVariant][OutTyVariant];
2222 break;
2223 }
2224 }
2225
2226 SDLoc DL(N);
2227 SDValue In = Op0->getOperand(0);
2228 SmallVector<SDValue, 2> Ops;
2229 Ops.push_back(In);
2230
2231 if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, InTyVariant, DL))
2232 Ops.push_back(SDValue(FRINTXNode, 1));
2233
2234 return CurDAG->getMachineNode(Opc, DL, OutTy, Ops);
2235}
2236
Tim Northover3b0846e2014-05-24 12:50:23 +00002237bool
2238AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2239 unsigned RegWidth) {
2240 APFloat FVal(0.0);
2241 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2242 FVal = CN->getValueAPF();
2243 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2244 // Some otherwise illegal constants are allowed in this case.
2245 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2246 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2247 return false;
2248
2249 ConstantPoolSDNode *CN =
2250 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2251 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2252 } else
2253 return false;
2254
2255 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2256 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2257 // x-register.
2258 //
2259 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2260 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2261 // integers.
2262 bool IsExact;
2263
2264 // fbits is between 1 and 64 in the worst-case, which means the fmul
2265 // could have 2^64 as an actual operand. Need 65 bits of precision.
2266 APSInt IntVal(65, true);
2267 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2268
2269 // N.b. isPowerOf2 also checks for > 0.
2270 if (!IsExact || !IntVal.isPowerOf2()) return false;
2271 unsigned FBits = IntVal.logBase2();
2272
2273 // Checks above should have guaranteed that we haven't lost information in
2274 // finding FBits, but it must still be in range.
2275 if (FBits == 0 || FBits > RegWidth) return false;
2276
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002277 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00002278 return true;
2279}
2280
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002281// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2282// of the string and obtains the integer values from them and combines these
2283// into a single value to be used in the MRS/MSR instruction.
2284static int getIntOperandFromRegisterString(StringRef RegString) {
2285 SmallVector<StringRef, 5> Fields;
Chandler Carruthe4405e92015-09-10 06:12:31 +00002286 RegString.split(Fields, ':');
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002287
2288 if (Fields.size() == 1)
2289 return -1;
2290
2291 assert(Fields.size() == 5
2292 && "Invalid number of fields in read register string");
2293
2294 SmallVector<int, 5> Ops;
2295 bool AllIntFields = true;
2296
2297 for (StringRef Field : Fields) {
2298 unsigned IntField;
2299 AllIntFields &= !Field.getAsInteger(10, IntField);
2300 Ops.push_back(IntField);
2301 }
2302
2303 assert(AllIntFields &&
2304 "Unexpected non-integer value in special register string.");
2305
2306 // Need to combine the integer fields of the string into a single value
2307 // based on the bit encoding of MRS/MSR instruction.
2308 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2309 (Ops[3] << 3) | (Ops[4]);
2310}
2311
2312// Lower the read_register intrinsic to an MRS instruction node if the special
2313// register string argument is either of the form detailed in the ALCE (the
2314// form described in getIntOperandsFromRegsterString) or is a named register
2315// known by the MRS SysReg mapper.
2316SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) {
2317 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2318 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2319 SDLoc DL(N);
2320
2321 int Reg = getIntOperandFromRegisterString(RegString->getString());
2322 if (Reg != -1)
2323 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2324 MVT::Other,
2325 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2326 N->getOperand(0));
2327
2328 // Use the sysreg mapper to map the remaining possible strings to the
2329 // value for the register to be used for the instruction operand.
2330 AArch64SysReg::MRSMapper mapper;
2331 bool IsValidSpecialReg;
2332 Reg = mapper.fromString(RegString->getString(),
2333 Subtarget->getFeatureBits(),
2334 IsValidSpecialReg);
2335 if (IsValidSpecialReg)
2336 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2337 MVT::Other,
2338 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2339 N->getOperand(0));
2340
2341 return nullptr;
2342}
2343
2344// Lower the write_register intrinsic to an MSR instruction node if the special
2345// register string argument is either of the form detailed in the ALCE (the
2346// form described in getIntOperandsFromRegsterString) or is a named register
2347// known by the MSR SysReg mapper.
2348SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
2349 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2350 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2351 SDLoc DL(N);
2352
2353 int Reg = getIntOperandFromRegisterString(RegString->getString());
2354 if (Reg != -1)
2355 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2356 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2357 N->getOperand(2), N->getOperand(0));
2358
2359 // Check if the register was one of those allowed as the pstatefield value in
2360 // the MSR (immediate) instruction. To accept the values allowed in the
2361 // pstatefield for the MSR (immediate) instruction, we also require that an
2362 // immediate value has been provided as an argument, we know that this is
2363 // the case as it has been ensured by semantic checking.
2364 AArch64PState::PStateMapper PMapper;
2365 bool IsValidSpecialReg;
2366 Reg = PMapper.fromString(RegString->getString(),
2367 Subtarget->getFeatureBits(),
2368 IsValidSpecialReg);
2369 if (IsValidSpecialReg) {
2370 assert (isa<ConstantSDNode>(N->getOperand(2))
2371 && "Expected a constant integer expression.");
2372 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
2373 return CurDAG->getMachineNode(AArch64::MSRpstate, DL, MVT::Other,
2374 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2375 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
2376 N->getOperand(0));
2377 }
2378
2379 // Use the sysreg mapper to attempt to map the remaining possible strings
2380 // to the value for the register to be used for the MSR (register)
2381 // instruction operand.
2382 AArch64SysReg::MSRMapper Mapper;
2383 Reg = Mapper.fromString(RegString->getString(),
2384 Subtarget->getFeatureBits(),
2385 IsValidSpecialReg);
2386
2387 if (IsValidSpecialReg)
2388 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2389 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2390 N->getOperand(2), N->getOperand(0));
2391
2392 return nullptr;
2393}
2394
Tim Northover3b0846e2014-05-24 12:50:23 +00002395SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2396 // Dump information about the Node being selected
2397 DEBUG(errs() << "Selecting: ");
2398 DEBUG(Node->dump(CurDAG));
2399 DEBUG(errs() << "\n");
2400
2401 // If we have a custom node, we already have selected!
2402 if (Node->isMachineOpcode()) {
2403 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2404 Node->setNodeId(-1);
2405 return nullptr;
2406 }
2407
2408 // Few custom selection stuff.
2409 SDNode *ResNode = nullptr;
2410 EVT VT = Node->getValueType(0);
2411
2412 switch (Node->getOpcode()) {
2413 default:
2414 break;
2415
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002416 case ISD::READ_REGISTER:
2417 if (SDNode *Res = SelectReadRegister(Node))
2418 return Res;
2419 break;
2420
2421 case ISD::WRITE_REGISTER:
2422 if (SDNode *Res = SelectWriteRegister(Node))
2423 return Res;
2424 break;
2425
Tim Northover3b0846e2014-05-24 12:50:23 +00002426 case ISD::ADD:
2427 if (SDNode *I = SelectMLAV64LaneV128(Node))
2428 return I;
2429 break;
2430
2431 case ISD::LOAD: {
2432 // Try to select as an indexed load. Fall through to normal processing
2433 // if we can't.
2434 bool Done = false;
2435 SDNode *I = SelectIndexedLoad(Node, Done);
2436 if (Done)
2437 return I;
2438 break;
2439 }
2440
2441 case ISD::SRL:
2442 case ISD::AND:
2443 case ISD::SRA:
2444 if (SDNode *I = SelectBitfieldExtractOp(Node))
2445 return I;
2446 break;
2447
2448 case ISD::OR:
2449 if (SDNode *I = SelectBitfieldInsertOp(Node))
2450 return I;
2451 break;
2452
2453 case ISD::EXTRACT_VECTOR_ELT: {
2454 // Extracting lane zero is a special case where we can just use a plain
2455 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2456 // the rest of the compiler, especially the register allocator and copyi
2457 // propagation, to reason about, so is preferred when it's possible to
2458 // use it.
2459 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2460 // Bail and use the default Select() for non-zero lanes.
2461 if (LaneNode->getZExtValue() != 0)
2462 break;
2463 // If the element type is not the same as the result type, likewise
2464 // bail and use the default Select(), as there's more to do than just
2465 // a cross-class COPY. This catches extracts of i8 and i16 elements
2466 // since they will need an explicit zext.
2467 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2468 break;
2469 unsigned SubReg;
2470 switch (Node->getOperand(0)
2471 .getValueType()
2472 .getVectorElementType()
2473 .getSizeInBits()) {
2474 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002475 llvm_unreachable("Unexpected vector element type!");
Tim Northover3b0846e2014-05-24 12:50:23 +00002476 case 64:
2477 SubReg = AArch64::dsub;
2478 break;
2479 case 32:
2480 SubReg = AArch64::ssub;
2481 break;
Oliver Stannard89d15422014-08-27 16:16:04 +00002482 case 16:
2483 SubReg = AArch64::hsub;
2484 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00002485 case 8:
2486 llvm_unreachable("unexpected zext-requiring extract element!");
2487 }
2488 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2489 Node->getOperand(0));
2490 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2491 DEBUG(Extract->dumpr(CurDAG));
2492 DEBUG(dbgs() << "\n");
2493 return Extract.getNode();
2494 }
2495 case ISD::Constant: {
2496 // Materialize zero constants as copies from WZR/XZR. This allows
2497 // the coalescer to propagate these into other instructions.
2498 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2499 if (ConstNode->isNullValue()) {
2500 if (VT == MVT::i32)
2501 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2502 AArch64::WZR, MVT::i32).getNode();
2503 else if (VT == MVT::i64)
2504 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2505 AArch64::XZR, MVT::i64).getNode();
2506 }
2507 break;
2508 }
2509
2510 case ISD::FrameIndex: {
2511 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2512 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2513 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2514 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +00002515 SDValue TFI = CurDAG->getTargetFrameIndex(
2516 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002517 SDLoc DL(Node);
2518 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2519 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
Tim Northover3b0846e2014-05-24 12:50:23 +00002520 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2521 }
2522 case ISD::INTRINSIC_W_CHAIN: {
2523 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2524 switch (IntNo) {
2525 default:
2526 break;
2527 case Intrinsic::aarch64_ldaxp:
2528 case Intrinsic::aarch64_ldxp: {
2529 unsigned Op =
2530 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2531 SDValue MemAddr = Node->getOperand(2);
2532 SDLoc DL(Node);
2533 SDValue Chain = Node->getOperand(0);
2534
2535 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2536 MVT::Other, MemAddr, Chain);
2537
2538 // Transfer memoperands.
2539 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2540 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2541 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2542 return Ld;
2543 }
2544 case Intrinsic::aarch64_stlxp:
2545 case Intrinsic::aarch64_stxp: {
2546 unsigned Op =
2547 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2548 SDLoc DL(Node);
2549 SDValue Chain = Node->getOperand(0);
2550 SDValue ValLo = Node->getOperand(2);
2551 SDValue ValHi = Node->getOperand(3);
2552 SDValue MemAddr = Node->getOperand(4);
2553
2554 // Place arguments in the right order.
Benjamin Kramerea68a942015-02-19 15:26:17 +00002555 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00002556
2557 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2558 // Transfer memoperands.
2559 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2560 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2561 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2562
2563 return St;
2564 }
2565 case Intrinsic::aarch64_neon_ld1x2:
2566 if (VT == MVT::v8i8)
2567 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2568 else if (VT == MVT::v16i8)
2569 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002570 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002571 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002572 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002573 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2574 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2575 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2576 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2577 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2578 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2579 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2580 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2581 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2582 break;
2583 case Intrinsic::aarch64_neon_ld1x3:
2584 if (VT == MVT::v8i8)
2585 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2586 else if (VT == MVT::v16i8)
2587 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002588 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002589 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002590 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002591 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2592 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2593 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2594 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2595 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2596 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2597 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2598 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2599 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2600 break;
2601 case Intrinsic::aarch64_neon_ld1x4:
2602 if (VT == MVT::v8i8)
2603 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2604 else if (VT == MVT::v16i8)
2605 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002606 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002607 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002608 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002609 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2610 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2611 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2612 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2613 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2614 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2615 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2616 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2617 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2618 break;
2619 case Intrinsic::aarch64_neon_ld2:
2620 if (VT == MVT::v8i8)
2621 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2622 else if (VT == MVT::v16i8)
2623 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002624 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002625 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002626 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002627 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2628 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2629 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2630 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2631 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2632 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2633 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2634 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2635 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2636 break;
2637 case Intrinsic::aarch64_neon_ld3:
2638 if (VT == MVT::v8i8)
2639 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2640 else if (VT == MVT::v16i8)
2641 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002642 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002643 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002644 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002645 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2646 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2647 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2648 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2649 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2650 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2651 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2652 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2653 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2654 break;
2655 case Intrinsic::aarch64_neon_ld4:
2656 if (VT == MVT::v8i8)
2657 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2658 else if (VT == MVT::v16i8)
2659 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002660 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002661 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002662 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002663 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2664 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2665 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2666 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2667 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2668 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2669 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2670 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2671 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2672 break;
2673 case Intrinsic::aarch64_neon_ld2r:
2674 if (VT == MVT::v8i8)
2675 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2676 else if (VT == MVT::v16i8)
2677 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002678 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002679 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002680 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002681 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2682 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2683 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2684 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2685 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2686 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2687 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2688 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2689 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2690 break;
2691 case Intrinsic::aarch64_neon_ld3r:
2692 if (VT == MVT::v8i8)
2693 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2694 else if (VT == MVT::v16i8)
2695 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002696 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002697 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002698 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002699 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2700 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2701 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2702 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2703 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2704 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2705 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2706 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2707 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2708 break;
2709 case Intrinsic::aarch64_neon_ld4r:
2710 if (VT == MVT::v8i8)
2711 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2712 else if (VT == MVT::v16i8)
2713 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002714 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002715 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002716 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002717 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2718 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2719 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2720 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2721 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2722 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2723 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2724 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2725 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2726 break;
2727 case Intrinsic::aarch64_neon_ld2lane:
2728 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2729 return SelectLoadLane(Node, 2, AArch64::LD2i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002730 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2731 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002732 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2733 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2734 VT == MVT::v2f32)
2735 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2736 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2737 VT == MVT::v1f64)
2738 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2739 break;
2740 case Intrinsic::aarch64_neon_ld3lane:
2741 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2742 return SelectLoadLane(Node, 3, AArch64::LD3i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002743 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2744 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002745 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2746 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2747 VT == MVT::v2f32)
2748 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2749 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2750 VT == MVT::v1f64)
2751 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2752 break;
2753 case Intrinsic::aarch64_neon_ld4lane:
2754 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2755 return SelectLoadLane(Node, 4, AArch64::LD4i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002756 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2757 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002758 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2759 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2760 VT == MVT::v2f32)
2761 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2762 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2763 VT == MVT::v1f64)
2764 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2765 break;
2766 }
2767 } break;
2768 case ISD::INTRINSIC_WO_CHAIN: {
2769 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2770 switch (IntNo) {
2771 default:
2772 break;
2773 case Intrinsic::aarch64_neon_tbl2:
2774 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2775 : AArch64::TBLv16i8Two,
2776 false);
2777 case Intrinsic::aarch64_neon_tbl3:
2778 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2779 : AArch64::TBLv16i8Three,
2780 false);
2781 case Intrinsic::aarch64_neon_tbl4:
2782 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2783 : AArch64::TBLv16i8Four,
2784 false);
2785 case Intrinsic::aarch64_neon_tbx2:
2786 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2787 : AArch64::TBXv16i8Two,
2788 true);
2789 case Intrinsic::aarch64_neon_tbx3:
2790 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2791 : AArch64::TBXv16i8Three,
2792 true);
2793 case Intrinsic::aarch64_neon_tbx4:
2794 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2795 : AArch64::TBXv16i8Four,
2796 true);
2797 case Intrinsic::aarch64_neon_smull:
2798 case Intrinsic::aarch64_neon_umull:
2799 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2800 return N;
2801 break;
2802 }
2803 break;
2804 }
2805 case ISD::INTRINSIC_VOID: {
2806 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2807 if (Node->getNumOperands() >= 3)
2808 VT = Node->getOperand(2)->getValueType(0);
2809 switch (IntNo) {
2810 default:
2811 break;
2812 case Intrinsic::aarch64_neon_st1x2: {
2813 if (VT == MVT::v8i8)
2814 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2815 else if (VT == MVT::v16i8)
2816 return SelectStore(Node, 2, AArch64::ST1Twov16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002817 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002818 return SelectStore(Node, 2, AArch64::ST1Twov4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002819 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002820 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2821 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2822 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2823 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2824 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2825 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2826 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2827 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2828 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2829 break;
2830 }
2831 case Intrinsic::aarch64_neon_st1x3: {
2832 if (VT == MVT::v8i8)
2833 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2834 else if (VT == MVT::v16i8)
2835 return SelectStore(Node, 3, AArch64::ST1Threev16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002836 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002837 return SelectStore(Node, 3, AArch64::ST1Threev4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002838 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002839 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2840 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2841 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2842 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2843 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2844 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2845 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2846 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2847 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2848 break;
2849 }
2850 case Intrinsic::aarch64_neon_st1x4: {
2851 if (VT == MVT::v8i8)
2852 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2853 else if (VT == MVT::v16i8)
2854 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002855 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002856 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002857 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002858 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2859 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2860 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2861 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2862 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2863 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2864 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2865 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2866 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2867 break;
2868 }
2869 case Intrinsic::aarch64_neon_st2: {
2870 if (VT == MVT::v8i8)
2871 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2872 else if (VT == MVT::v16i8)
2873 return SelectStore(Node, 2, AArch64::ST2Twov16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002874 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002875 return SelectStore(Node, 2, AArch64::ST2Twov4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002876 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002877 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2878 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2879 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2880 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2881 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2882 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2883 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2884 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2885 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2886 break;
2887 }
2888 case Intrinsic::aarch64_neon_st3: {
2889 if (VT == MVT::v8i8)
2890 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2891 else if (VT == MVT::v16i8)
2892 return SelectStore(Node, 3, AArch64::ST3Threev16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002893 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002894 return SelectStore(Node, 3, AArch64::ST3Threev4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002895 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002896 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2897 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2898 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2899 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2900 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2901 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2902 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2903 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2904 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2905 break;
2906 }
2907 case Intrinsic::aarch64_neon_st4: {
2908 if (VT == MVT::v8i8)
2909 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2910 else if (VT == MVT::v16i8)
2911 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
Oliver Stannard89d15422014-08-27 16:16:04 +00002912 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002913 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
Oliver Stannard89d15422014-08-27 16:16:04 +00002914 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002915 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2916 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2917 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2918 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2919 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2920 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2921 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2922 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2923 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2924 break;
2925 }
2926 case Intrinsic::aarch64_neon_st2lane: {
2927 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2928 return SelectStoreLane(Node, 2, AArch64::ST2i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002929 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2930 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002931 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2932 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2933 VT == MVT::v2f32)
2934 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2935 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2936 VT == MVT::v1f64)
2937 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2938 break;
2939 }
2940 case Intrinsic::aarch64_neon_st3lane: {
2941 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2942 return SelectStoreLane(Node, 3, AArch64::ST3i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002943 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2944 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002945 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2946 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2947 VT == MVT::v2f32)
2948 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2949 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2950 VT == MVT::v1f64)
2951 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2952 break;
2953 }
2954 case Intrinsic::aarch64_neon_st4lane: {
2955 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2956 return SelectStoreLane(Node, 4, AArch64::ST4i8);
Oliver Stannard89d15422014-08-27 16:16:04 +00002957 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2958 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002959 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2960 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2961 VT == MVT::v2f32)
2962 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2963 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2964 VT == MVT::v1f64)
2965 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2966 break;
2967 }
2968 }
Mehdi Aminia7583982015-08-23 00:42:57 +00002969 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00002970 }
2971 case AArch64ISD::LD2post: {
2972 if (VT == MVT::v8i8)
2973 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2974 else if (VT == MVT::v16i8)
2975 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002976 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002977 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002978 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002979 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2980 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2981 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2982 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2983 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2984 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2985 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2986 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2987 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2988 break;
2989 }
2990 case AArch64ISD::LD3post: {
2991 if (VT == MVT::v8i8)
2992 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2993 else if (VT == MVT::v16i8)
2994 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002995 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002996 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00002997 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00002998 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2999 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3000 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
3001 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3002 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
3003 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3004 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
3005 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3006 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
3007 break;
3008 }
3009 case AArch64ISD::LD4post: {
3010 if (VT == MVT::v8i8)
3011 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
3012 else if (VT == MVT::v16i8)
3013 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003014 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003015 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003016 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003017 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
3018 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3019 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
3020 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3021 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
3022 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3023 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
3024 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3025 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
3026 break;
3027 }
3028 case AArch64ISD::LD1x2post: {
3029 if (VT == MVT::v8i8)
3030 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
3031 else if (VT == MVT::v16i8)
3032 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003033 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003034 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003035 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003036 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
3037 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3038 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
3039 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3040 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
3041 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3042 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
3043 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3044 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
3045 break;
3046 }
3047 case AArch64ISD::LD1x3post: {
3048 if (VT == MVT::v8i8)
3049 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
3050 else if (VT == MVT::v16i8)
3051 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003052 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003053 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003054 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003055 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
3056 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3057 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
3058 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3059 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
3060 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3061 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
3062 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3063 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
3064 break;
3065 }
3066 case AArch64ISD::LD1x4post: {
3067 if (VT == MVT::v8i8)
3068 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
3069 else if (VT == MVT::v16i8)
3070 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003071 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003072 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003073 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003074 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
3075 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3076 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
3077 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3078 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
3079 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3080 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
3081 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3082 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
3083 break;
3084 }
3085 case AArch64ISD::LD1DUPpost: {
3086 if (VT == MVT::v8i8)
3087 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
3088 else if (VT == MVT::v16i8)
3089 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003090 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003091 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003092 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003093 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
3094 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3095 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
3096 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3097 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
3098 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3099 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
3100 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3101 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
3102 break;
3103 }
3104 case AArch64ISD::LD2DUPpost: {
3105 if (VT == MVT::v8i8)
3106 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
3107 else if (VT == MVT::v16i8)
3108 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003109 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003110 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003111 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003112 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
3113 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3114 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
3115 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3116 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
3117 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3118 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
3119 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3120 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
3121 break;
3122 }
3123 case AArch64ISD::LD3DUPpost: {
3124 if (VT == MVT::v8i8)
3125 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
3126 else if (VT == MVT::v16i8)
3127 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003128 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003129 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003130 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003131 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
3132 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3133 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
3134 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3135 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
3136 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3137 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
3138 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3139 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
3140 break;
3141 }
3142 case AArch64ISD::LD4DUPpost: {
3143 if (VT == MVT::v8i8)
3144 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
3145 else if (VT == MVT::v16i8)
3146 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003147 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003148 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
Oliver Stannard89d15422014-08-27 16:16:04 +00003149 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003150 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
3151 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3152 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
3153 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3154 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
3155 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3156 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
3157 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3158 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
3159 break;
3160 }
3161 case AArch64ISD::LD1LANEpost: {
3162 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3163 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003164 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3165 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003166 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
3167 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3168 VT == MVT::v2f32)
3169 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
3170 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3171 VT == MVT::v1f64)
3172 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
3173 break;
3174 }
3175 case AArch64ISD::LD2LANEpost: {
3176 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3177 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003178 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3179 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003180 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
3181 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3182 VT == MVT::v2f32)
3183 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
3184 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3185 VT == MVT::v1f64)
3186 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
3187 break;
3188 }
3189 case AArch64ISD::LD3LANEpost: {
3190 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3191 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003192 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3193 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003194 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
3195 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3196 VT == MVT::v2f32)
3197 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
3198 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3199 VT == MVT::v1f64)
3200 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
3201 break;
3202 }
3203 case AArch64ISD::LD4LANEpost: {
3204 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3205 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003206 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3207 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003208 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
3209 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3210 VT == MVT::v2f32)
3211 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
3212 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3213 VT == MVT::v1f64)
3214 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
3215 break;
3216 }
3217 case AArch64ISD::ST2post: {
3218 VT = Node->getOperand(1).getValueType();
3219 if (VT == MVT::v8i8)
3220 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
3221 else if (VT == MVT::v16i8)
3222 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003223 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003224 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003225 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003226 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
3227 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3228 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
3229 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3230 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
3231 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3232 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
3233 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3234 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3235 break;
3236 }
3237 case AArch64ISD::ST3post: {
3238 VT = Node->getOperand(1).getValueType();
3239 if (VT == MVT::v8i8)
3240 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
3241 else if (VT == MVT::v16i8)
3242 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003243 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003244 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003245 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003246 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
3247 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3248 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
3249 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3250 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
3251 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3252 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
3253 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3254 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3255 break;
3256 }
3257 case AArch64ISD::ST4post: {
3258 VT = Node->getOperand(1).getValueType();
3259 if (VT == MVT::v8i8)
3260 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
3261 else if (VT == MVT::v16i8)
3262 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003263 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003264 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003265 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003266 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
3267 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3268 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
3269 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3270 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
3271 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3272 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
3273 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3274 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3275 break;
3276 }
3277 case AArch64ISD::ST1x2post: {
3278 VT = Node->getOperand(1).getValueType();
3279 if (VT == MVT::v8i8)
3280 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
3281 else if (VT == MVT::v16i8)
3282 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003283 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003284 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003285 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003286 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
3287 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3288 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
3289 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3290 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
3291 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3292 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3293 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3294 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
3295 break;
3296 }
3297 case AArch64ISD::ST1x3post: {
3298 VT = Node->getOperand(1).getValueType();
3299 if (VT == MVT::v8i8)
3300 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
3301 else if (VT == MVT::v16i8)
3302 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003303 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003304 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003305 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003306 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
3307 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3308 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
3309 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3310 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3311 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3312 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3313 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3314 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3315 break;
3316 }
3317 case AArch64ISD::ST1x4post: {
3318 VT = Node->getOperand(1).getValueType();
3319 if (VT == MVT::v8i8)
3320 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3321 else if (VT == MVT::v16i8)
3322 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003323 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003324 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003325 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003326 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3327 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3328 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3329 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3330 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3331 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3332 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3333 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3334 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3335 break;
3336 }
3337 case AArch64ISD::ST2LANEpost: {
3338 VT = Node->getOperand(1).getValueType();
3339 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3340 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003341 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3342 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003343 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3344 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3345 VT == MVT::v2f32)
3346 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3347 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3348 VT == MVT::v1f64)
3349 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3350 break;
3351 }
3352 case AArch64ISD::ST3LANEpost: {
3353 VT = Node->getOperand(1).getValueType();
3354 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3355 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003356 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3357 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003358 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3359 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3360 VT == MVT::v2f32)
3361 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3362 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3363 VT == MVT::v1f64)
3364 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3365 break;
3366 }
3367 case AArch64ISD::ST4LANEpost: {
3368 VT = Node->getOperand(1).getValueType();
3369 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3370 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
Oliver Stannard89d15422014-08-27 16:16:04 +00003371 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3372 VT == MVT::v8f16)
Tim Northover3b0846e2014-05-24 12:50:23 +00003373 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3374 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3375 VT == MVT::v2f32)
3376 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3377 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3378 VT == MVT::v1f64)
3379 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3380 break;
3381 }
3382
3383 case ISD::FCEIL:
3384 case ISD::FFLOOR:
3385 case ISD::FTRUNC:
3386 case ISD::FROUND:
3387 if (SDNode *I = SelectLIBM(Node))
3388 return I;
3389 break;
Geoff Berryc573bf7a2015-07-28 15:24:10 +00003390
3391 case ISD::FP_TO_SINT:
3392 case ISD::FP_TO_UINT:
3393 if (SDNode *I = SelectFPConvertWithRound(Node))
3394 return I;
3395 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00003396 }
3397
3398 // Select the default instruction
3399 ResNode = SelectCode(Node);
3400
3401 DEBUG(errs() << "=> ");
3402 if (ResNode == nullptr || ResNode == Node)
3403 DEBUG(Node->dump(CurDAG));
3404 else
3405 DEBUG(ResNode->dump(CurDAG));
3406 DEBUG(errs() << "\n");
3407
3408 return ResNode;
3409}
3410
3411/// createAArch64ISelDag - This pass converts a legalized DAG into a
3412/// AArch64-specific DAG, ready for instruction scheduling.
3413FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3414 CodeGenOpt::Level OptLevel) {
3415 return new AArch64DAGToDAGISel(TM, OptLevel);
3416}