blob: 7141e77fcd25316547baa4d2a156ddeb81cabcac [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the AArch64 target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64TargetMachine.h"
15#include "MCTargetDesc/AArch64AddressingModes.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/CodeGen/SelectionDAGISel.h"
18#include "llvm/IR/Function.h" // To access function attributes.
19#include "llvm/IR/GlobalValue.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/ErrorHandling.h"
23#include "llvm/Support/MathExtras.h"
24#include "llvm/Support/raw_ostream.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE "aarch64-isel"
29
30//===--------------------------------------------------------------------===//
31/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32/// instructions for SelectionDAG operations.
33///
34namespace {
35
36class AArch64DAGToDAGISel : public SelectionDAGISel {
Tim Northover3b0846e2014-05-24 12:50:23 +000037
38 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
39 /// make the right decision when generating code for different targets.
40 const AArch64Subtarget *Subtarget;
41
42 bool ForCodeSize;
43
44public:
45 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
46 CodeGenOpt::Level OptLevel)
Stephen Canon8216d882015-09-22 11:43:17 +000047 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr),
Tim Northover3b0846e2014-05-24 12:50:23 +000048 ForCodeSize(false) {}
49
Mehdi Amini117296c2016-10-01 02:56:57 +000050 StringRef getPassName() const override {
Tim Northover3b0846e2014-05-24 12:50:23 +000051 return "AArch64 Instruction Selection";
52 }
53
54 bool runOnMachineFunction(MachineFunction &MF) override {
Sanjay Patel924879a2015-08-04 15:49:57 +000055 ForCodeSize = MF.getFunction()->optForSize();
Eric Christopher1e513342015-01-30 23:46:40 +000056 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
Tim Northover3b0846e2014-05-24 12:50:23 +000057 return SelectionDAGISel::runOnMachineFunction(MF);
58 }
59
Justin Bogner283e3bd2016-05-12 23:10:30 +000060 void Select(SDNode *Node) override;
Tim Northover3b0846e2014-05-24 12:50:23 +000061
62 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
63 /// inline asm expressions.
64 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
Daniel Sanders60f1db02015-03-13 12:45:09 +000065 unsigned ConstraintID,
Tim Northover3b0846e2014-05-24 12:50:23 +000066 std::vector<SDValue> &OutOps) override;
67
Justin Bogner283e3bd2016-05-12 23:10:30 +000068 bool tryMLAV64LaneV128(SDNode *N);
69 bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +000070 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
71 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
72 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
74 return SelectShiftedRegister(N, false, Reg, Shift);
75 }
76 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, true, Reg, Shift);
78 }
Ahmed Bougachab8886b52015-09-10 01:42:28 +000079 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
80 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
81 }
82 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
84 }
85 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
87 }
88 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
90 }
91 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
93 }
Tim Northover3b0846e2014-05-24 12:50:23 +000094 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed(N, 1, Base, OffImm);
96 }
97 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeIndexed(N, 2, Base, OffImm);
99 }
100 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeIndexed(N, 4, Base, OffImm);
102 }
103 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeIndexed(N, 8, Base, OffImm);
105 }
106 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeIndexed(N, 16, Base, OffImm);
108 }
109 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
111 }
112 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
113 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
114 }
115 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
116 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
117 }
118 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
119 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
120 }
121 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
122 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
123 }
124
125 template<int Width>
126 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
127 SDValue &SignExtend, SDValue &DoShift) {
128 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
129 }
130
131 template<int Width>
132 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
133 SDValue &SignExtend, SDValue &DoShift) {
134 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
135 }
136
137
138 /// Form sequences of consecutive 64/128-bit registers for use in NEON
139 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
140 /// between 1 and 4 elements. If it contains a single element that is returned
141 /// unchanged; otherwise a REG_SEQUENCE value is returned.
142 SDValue createDTuple(ArrayRef<SDValue> Vecs);
143 SDValue createQTuple(ArrayRef<SDValue> Vecs);
144
145 /// Generic helper for the createDTuple/createQTuple
146 /// functions. Those should almost always be called instead.
Benjamin Kramerea68a942015-02-19 15:26:17 +0000147 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
148 const unsigned SubRegs[]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000149
Justin Bogner283e3bd2016-05-12 23:10:30 +0000150 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000151
Justin Bogner283e3bd2016-05-12 23:10:30 +0000152 bool tryIndexedLoad(SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000153
Justin Bogner283e3bd2016-05-12 23:10:30 +0000154 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
Tim Northover3b0846e2014-05-24 12:50:23 +0000155 unsigned SubRegIdx);
Justin Bogner283e3bd2016-05-12 23:10:30 +0000156 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
Tim Northover3b0846e2014-05-24 12:50:23 +0000157 unsigned SubRegIdx);
Justin Bogner283e3bd2016-05-12 23:10:30 +0000158 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
159 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000160
Justin Bogner283e3bd2016-05-12 23:10:30 +0000161 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
162 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
163 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
164 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000165
Justin Bogner283e3bd2016-05-12 23:10:30 +0000166 bool tryBitfieldExtractOp(SDNode *N);
Chad Rosierbe879ea2016-06-03 20:05:49 +0000167 bool tryBitfieldExtractOpFromSExt(SDNode *N);
Justin Bogner283e3bd2016-05-12 23:10:30 +0000168 bool tryBitfieldInsertOp(SDNode *N);
169 bool tryBitfieldInsertInZeroOp(SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000170
Justin Bogner283e3bd2016-05-12 23:10:30 +0000171 bool tryReadRegister(SDNode *N);
172 bool tryWriteRegister(SDNode *N);
Luke Cheeseman85fd06d2015-06-01 12:02:47 +0000173
Tim Northover3b0846e2014-05-24 12:50:23 +0000174// Include the pieces autogenerated from the target description.
175#include "AArch64GenDAGISel.inc"
176
177private:
178 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
179 SDValue &Shift);
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000180 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
181 SDValue &OffImm);
Tim Northover3b0846e2014-05-24 12:50:23 +0000182 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
183 SDValue &OffImm);
184 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
185 SDValue &OffImm);
186 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
187 SDValue &Offset, SDValue &SignExtend,
188 SDValue &DoShift);
189 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
190 SDValue &Offset, SDValue &SignExtend,
191 SDValue &DoShift);
192 bool isWorthFolding(SDValue V) const;
193 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
194 SDValue &Offset, SDValue &SignExtend);
195
196 template<unsigned RegWidth>
197 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
198 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
199 }
200
201 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
Tim Northovercdf15292016-04-14 17:03:29 +0000202
203 void SelectCMP_SWAP(SDNode *N);
204
Tim Northover3b0846e2014-05-24 12:50:23 +0000205};
206} // end anonymous namespace
207
208/// isIntImmediate - This method tests to see if the node is a constant
209/// operand. If so Imm will receive the 32-bit value.
210static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
211 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
212 Imm = C->getZExtValue();
213 return true;
214 }
215 return false;
216}
217
218// isIntImmediate - This method tests to see if a constant operand.
219// If so Imm will receive the value.
220static bool isIntImmediate(SDValue N, uint64_t &Imm) {
221 return isIntImmediate(N.getNode(), Imm);
222}
223
224// isOpcWithIntImmediate - This method tests to see if the node is a specific
225// opcode and that it has a immediate integer right operand.
226// If so Imm will receive the 32 bit value.
227static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
228 uint64_t &Imm) {
229 return N->getOpcode() == Opc &&
230 isIntImmediate(N->getOperand(1).getNode(), Imm);
231}
232
233bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
Daniel Sanders60f1db02015-03-13 12:45:09 +0000234 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000235 switch(ConstraintID) {
236 default:
237 llvm_unreachable("Unexpected asm memory constraint");
238 case InlineAsm::Constraint_i:
239 case InlineAsm::Constraint_m:
240 case InlineAsm::Constraint_Q:
241 // Require the address to be in a register. That is safe for all AArch64
242 // variants and it is hard to do anything much smarter without knowing
243 // how the operand is used.
244 OutOps.push_back(Op);
245 return false;
246 }
247 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000248}
249
250/// SelectArithImmed - Select an immediate value that can be represented as
251/// a 12-bit value shifted left by either 0 or 12. If so, return true with
252/// Val set to the 12-bit value and Shift set to the shifter operand.
253bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
254 SDValue &Shift) {
255 // This function is called from the addsub_shifted_imm ComplexPattern,
256 // which lists [imm] as the list of opcode it's interested in, however
257 // we still need to check whether the operand is actually an immediate
258 // here because the ComplexPattern opcode list is only used in
259 // root-level opcode matching.
260 if (!isa<ConstantSDNode>(N.getNode()))
261 return false;
262
263 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
264 unsigned ShiftAmt;
265
266 if (Immed >> 12 == 0) {
267 ShiftAmt = 0;
268 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
269 ShiftAmt = 12;
270 Immed = Immed >> 12;
271 } else
272 return false;
273
274 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000275 SDLoc dl(N);
276 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
277 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000278 return true;
279}
280
281/// SelectNegArithImmed - As above, but negates the value before trying to
282/// select it.
283bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
284 SDValue &Shift) {
285 // This function is called from the addsub_shifted_imm ComplexPattern,
286 // which lists [imm] as the list of opcode it's interested in, however
287 // we still need to check whether the operand is actually an immediate
288 // here because the ComplexPattern opcode list is only used in
289 // root-level opcode matching.
290 if (!isa<ConstantSDNode>(N.getNode()))
291 return false;
292
293 // The immediate operand must be a 24-bit zero-extended immediate.
294 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
295
296 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
297 // have the opposite effect on the C flag, so this pattern mustn't match under
298 // those circumstances.
299 if (Immed == 0)
300 return false;
301
302 if (N.getValueType() == MVT::i32)
303 Immed = ~((uint32_t)Immed) + 1;
304 else
305 Immed = ~Immed + 1ULL;
306 if (Immed & 0xFFFFFFFFFF000000ULL)
307 return false;
308
309 Immed &= 0xFFFFFFULL;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000310 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
311 Shift);
Tim Northover3b0846e2014-05-24 12:50:23 +0000312}
313
314/// getShiftTypeForNode - Translate a shift node to the corresponding
315/// ShiftType value.
316static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
317 switch (N.getOpcode()) {
318 default:
319 return AArch64_AM::InvalidShiftExtend;
320 case ISD::SHL:
321 return AArch64_AM::LSL;
322 case ISD::SRL:
323 return AArch64_AM::LSR;
324 case ISD::SRA:
325 return AArch64_AM::ASR;
326 case ISD::ROTR:
327 return AArch64_AM::ROR;
328 }
329}
330
Balaram Makam2aba753e2017-03-31 18:16:53 +0000331/// \brief Determine whether it is worth it to fold SHL into the addressing
332/// mode.
333static bool isWorthFoldingSHL(SDValue V) {
334 assert(V.getOpcode() == ISD::SHL && "invalid opcode");
335 // It is worth folding logical shift of up to three places.
336 auto *CSD = dyn_cast<ConstantSDNode>(V.getOperand(1));
337 if (!CSD)
338 return false;
339 unsigned ShiftVal = CSD->getZExtValue();
340 if (ShiftVal > 3)
341 return false;
342
343 // Check if this particular node is reused in any non-memory related
344 // operation. If yes, do not try to fold this node into the address
345 // computation, since the computation will be kept.
346 const SDNode *Node = V.getNode();
347 for (SDNode *UI : Node->uses())
348 if (!isa<MemSDNode>(*UI))
349 for (SDNode *UII : UI->uses())
350 if (!isa<MemSDNode>(*UII))
351 return false;
352 return true;
353}
354
Eric Christopher25dbdeb2015-03-07 01:39:09 +0000355/// \brief Determine whether it is worth to fold V into an extended register.
Tim Northover3b0846e2014-05-24 12:50:23 +0000356bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
Balaram Makam2aba753e2017-03-31 18:16:53 +0000357 // Trivial if we are optimizing for code size or if there is only
358 // one use of the value.
359 if (ForCodeSize || V.hasOneUse())
360 return true;
361 // If a subtarget has a fastpath LSL we can fold a logical shift into
362 // the addressing mode and save a cycle.
363 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::SHL &&
364 isWorthFoldingSHL(V))
365 return true;
366 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::ADD) {
367 const SDValue LHS = V.getOperand(0);
368 const SDValue RHS = V.getOperand(1);
369 if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(LHS))
370 return true;
371 if (RHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(RHS))
372 return true;
373 }
374
375 // It hurts otherwise, since the value will be reused.
376 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000377}
378
379/// SelectShiftedRegister - Select a "shifted register" operand. If the value
380/// is not shifted, set the Shift operand to default of "LSL 0". The logical
381/// instructions allow the shifted register to be rotated, but the arithmetic
382/// instructions do not. The AllowROR parameter specifies whether ROR is
383/// supported.
384bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
385 SDValue &Reg, SDValue &Shift) {
386 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
387 if (ShType == AArch64_AM::InvalidShiftExtend)
388 return false;
389 if (!AllowROR && ShType == AArch64_AM::ROR)
390 return false;
391
392 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
Sanjay Patelb1f0a0f2016-09-14 16:05:51 +0000393 unsigned BitSize = N.getValueSizeInBits();
Tim Northover3b0846e2014-05-24 12:50:23 +0000394 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
395 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
396
397 Reg = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000398 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000399 return isWorthFolding(N);
400 }
401
402 return false;
403}
404
405/// getExtendTypeForNode - Translate an extend node to the corresponding
406/// ExtendType value.
407static AArch64_AM::ShiftExtendType
408getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
409 if (N.getOpcode() == ISD::SIGN_EXTEND ||
410 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
411 EVT SrcVT;
412 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
413 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
414 else
415 SrcVT = N.getOperand(0).getValueType();
416
417 if (!IsLoadStore && SrcVT == MVT::i8)
418 return AArch64_AM::SXTB;
419 else if (!IsLoadStore && SrcVT == MVT::i16)
420 return AArch64_AM::SXTH;
421 else if (SrcVT == MVT::i32)
422 return AArch64_AM::SXTW;
423 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
424
425 return AArch64_AM::InvalidShiftExtend;
426 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
427 N.getOpcode() == ISD::ANY_EXTEND) {
428 EVT SrcVT = N.getOperand(0).getValueType();
429 if (!IsLoadStore && SrcVT == MVT::i8)
430 return AArch64_AM::UXTB;
431 else if (!IsLoadStore && SrcVT == MVT::i16)
432 return AArch64_AM::UXTH;
433 else if (SrcVT == MVT::i32)
434 return AArch64_AM::UXTW;
435 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
436
437 return AArch64_AM::InvalidShiftExtend;
438 } else if (N.getOpcode() == ISD::AND) {
439 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
440 if (!CSD)
441 return AArch64_AM::InvalidShiftExtend;
442 uint64_t AndMask = CSD->getZExtValue();
443
444 switch (AndMask) {
445 default:
446 return AArch64_AM::InvalidShiftExtend;
447 case 0xFF:
448 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
449 case 0xFFFF:
450 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
451 case 0xFFFFFFFF:
452 return AArch64_AM::UXTW;
453 }
454 }
455
456 return AArch64_AM::InvalidShiftExtend;
457}
458
459// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
460static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
461 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
462 DL->getOpcode() != AArch64ISD::DUPLANE32)
463 return false;
464
465 SDValue SV = DL->getOperand(0);
466 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
467 return false;
468
469 SDValue EV = SV.getOperand(1);
470 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
471 return false;
472
473 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
474 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
475 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
476 LaneOp = EV.getOperand(0);
477
478 return true;
479}
480
Chad Rosier6c1f0932015-09-17 13:10:27 +0000481// Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
Tim Northover3b0846e2014-05-24 12:50:23 +0000482// high lane extract.
483static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
484 SDValue &LaneOp, int &LaneIdx) {
485
486 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
487 std::swap(Op0, Op1);
488 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
489 return false;
490 }
491 StdOp = Op1;
492 return true;
493}
494
495/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
496/// is a lane in the upper half of a 128-bit vector. Recognize and select this
497/// so that we don't emit unnecessary lane extracts.
Justin Bogner283e3bd2016-05-12 23:10:30 +0000498bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000499 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000500 SDValue Op0 = N->getOperand(0);
501 SDValue Op1 = N->getOperand(1);
502 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
503 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
504 int LaneIdx = -1; // Will hold the lane index.
505
506 if (Op1.getOpcode() != ISD::MUL ||
507 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
508 LaneIdx)) {
509 std::swap(Op0, Op1);
510 if (Op1.getOpcode() != ISD::MUL ||
511 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
512 LaneIdx))
Justin Bogner283e3bd2016-05-12 23:10:30 +0000513 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000514 }
515
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000516 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000517
518 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
519
520 unsigned MLAOpc = ~0U;
521
522 switch (N->getSimpleValueType(0).SimpleTy) {
523 default:
524 llvm_unreachable("Unrecognized MLA.");
525 case MVT::v4i16:
526 MLAOpc = AArch64::MLAv4i16_indexed;
527 break;
528 case MVT::v8i16:
529 MLAOpc = AArch64::MLAv8i16_indexed;
530 break;
531 case MVT::v2i32:
532 MLAOpc = AArch64::MLAv2i32_indexed;
533 break;
534 case MVT::v4i32:
535 MLAOpc = AArch64::MLAv4i32_indexed;
536 break;
537 }
538
Justin Bogner283e3bd2016-05-12 23:10:30 +0000539 ReplaceNode(N, CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops));
540 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000541}
542
Justin Bogner283e3bd2016-05-12 23:10:30 +0000543bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo, SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000544 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000545 SDValue SMULLOp0;
546 SDValue SMULLOp1;
547 int LaneIdx;
548
549 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
550 LaneIdx))
Justin Bogner283e3bd2016-05-12 23:10:30 +0000551 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000552
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000553 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000554
555 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
556
557 unsigned SMULLOpc = ~0U;
558
559 if (IntNo == Intrinsic::aarch64_neon_smull) {
560 switch (N->getSimpleValueType(0).SimpleTy) {
561 default:
562 llvm_unreachable("Unrecognized SMULL.");
563 case MVT::v4i32:
564 SMULLOpc = AArch64::SMULLv4i16_indexed;
565 break;
566 case MVT::v2i64:
567 SMULLOpc = AArch64::SMULLv2i32_indexed;
568 break;
569 }
570 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
571 switch (N->getSimpleValueType(0).SimpleTy) {
572 default:
573 llvm_unreachable("Unrecognized SMULL.");
574 case MVT::v4i32:
575 SMULLOpc = AArch64::UMULLv4i16_indexed;
576 break;
577 case MVT::v2i64:
578 SMULLOpc = AArch64::UMULLv2i32_indexed;
579 break;
580 }
581 } else
582 llvm_unreachable("Unrecognized intrinsic.");
583
Justin Bogner283e3bd2016-05-12 23:10:30 +0000584 ReplaceNode(N, CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops));
585 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000586}
587
588/// Instructions that accept extend modifiers like UXTW expect the register
589/// being extended to be a GPR32, but the incoming DAG might be acting on a
590/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
591/// this is the case.
592static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
593 if (N.getValueType() == MVT::i32)
594 return N;
595
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000596 SDLoc dl(N);
597 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000598 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000599 dl, MVT::i32, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000600 return SDValue(Node, 0);
601}
602
603
604/// SelectArithExtendedRegister - Select a "extended register" operand. This
605/// operand folds in an extend followed by an optional left shift.
606bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
607 SDValue &Shift) {
608 unsigned ShiftVal = 0;
609 AArch64_AM::ShiftExtendType Ext;
610
611 if (N.getOpcode() == ISD::SHL) {
612 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
613 if (!CSD)
614 return false;
615 ShiftVal = CSD->getZExtValue();
616 if (ShiftVal > 4)
617 return false;
618
619 Ext = getExtendTypeForNode(N.getOperand(0));
620 if (Ext == AArch64_AM::InvalidShiftExtend)
621 return false;
622
623 Reg = N.getOperand(0).getOperand(0);
624 } else {
625 Ext = getExtendTypeForNode(N);
626 if (Ext == AArch64_AM::InvalidShiftExtend)
627 return false;
628
629 Reg = N.getOperand(0);
Geoff Berry256fcf92016-09-26 15:34:47 +0000630
631 // Don't match if free 32-bit -> 64-bit zext can be used instead.
632 if (Ext == AArch64_AM::UXTW &&
633 Reg->getValueType(0).getSizeInBits() == 32 && isDef32(*Reg.getNode()))
634 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000635 }
636
637 // AArch64 mandates that the RHS of the operation must use the smallest
Chad Rosier6c1f0932015-09-17 13:10:27 +0000638 // register class that could contain the size being extended from. Thus,
Tim Northover3b0846e2014-05-24 12:50:23 +0000639 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
640 // there might not be an actual 32-bit value in the program. We can
641 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
642 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
643 Reg = narrowIfNeeded(CurDAG, Reg);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000644 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
645 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000646 return isWorthFolding(N);
647}
648
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000649/// If there's a use of this ADDlow that's not itself a load/store then we'll
650/// need to create a real ADD instruction from it anyway and there's no point in
651/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
652/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
Chad Rosier6c1f0932015-09-17 13:10:27 +0000653/// leads to duplicated ADRP instructions.
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000654static bool isWorthFoldingADDlow(SDValue N) {
655 for (auto Use : N->uses()) {
656 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
657 Use->getOpcode() != ISD::ATOMIC_LOAD &&
658 Use->getOpcode() != ISD::ATOMIC_STORE)
659 return false;
660
661 // ldar and stlr have much more restrictive addressing modes (just a
662 // register).
JF Bastien800f87a2016-04-06 21:19:33 +0000663 if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getOrdering()))
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000664 return false;
665 }
666
667 return true;
668}
669
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000670/// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit
671/// immediate" address. The "Size" argument is the size in bytes of the memory
672/// reference, which determines the scale.
673bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size,
674 SDValue &Base,
675 SDValue &OffImm) {
676 SDLoc dl(N);
Ahmed Bougacha05541452015-09-10 01:54:43 +0000677 const DataLayout &DL = CurDAG->getDataLayout();
678 const TargetLowering *TLI = getTargetLowering();
679 if (N.getOpcode() == ISD::FrameIndex) {
680 int FI = cast<FrameIndexSDNode>(N)->getIndex();
681 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
682 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
683 return true;
684 }
685
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000686 // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed
687 // selected here doesn't support labels/immediates, only base+offset.
688
689 if (CurDAG->isBaseWithConstantOffset(N)) {
690 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
691 int64_t RHSC = RHS->getSExtValue();
692 unsigned Scale = Log2_32(Size);
Steven Wue3b1f2b2015-09-10 16:32:28 +0000693 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(0x40 << Scale) &&
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000694 RHSC < (0x40 << Scale)) {
695 Base = N.getOperand(0);
Ahmed Bougacha05541452015-09-10 01:54:43 +0000696 if (Base.getOpcode() == ISD::FrameIndex) {
697 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
698 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
699 }
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000700 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
701 return true;
702 }
703 }
704 }
705
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000706 // Base only. The address will be materialized into a register before
707 // the memory is accessed.
708 // add x0, Xbase, #offset
709 // stp x1, x2, [x0]
710 Base = N;
711 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
712 return true;
713}
714
Tim Northover3b0846e2014-05-24 12:50:23 +0000715/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
716/// immediate" address. The "Size" argument is the size in bytes of the memory
717/// reference, which determines the scale.
718bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
719 SDValue &Base, SDValue &OffImm) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000720 SDLoc dl(N);
Mehdi Amini44ede332015-07-09 02:09:04 +0000721 const DataLayout &DL = CurDAG->getDataLayout();
Tim Northover3b0846e2014-05-24 12:50:23 +0000722 const TargetLowering *TLI = getTargetLowering();
723 if (N.getOpcode() == ISD::FrameIndex) {
724 int FI = cast<FrameIndexSDNode>(N)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000725 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000726 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000727 return true;
728 }
729
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000730 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000731 GlobalAddressSDNode *GAN =
732 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
733 Base = N.getOperand(0);
734 OffImm = N.getOperand(1);
735 if (!GAN)
736 return true;
737
738 const GlobalValue *GV = GAN->getGlobal();
739 unsigned Alignment = GV->getAlignment();
Manuel Jacob5f6eaac2016-01-16 20:30:46 +0000740 Type *Ty = GV->getValueType();
Tim Northover4a8ac262014-12-02 23:53:43 +0000741 if (Alignment == 0 && Ty->isSized())
Mehdi Amini44ede332015-07-09 02:09:04 +0000742 Alignment = DL.getABITypeAlignment(Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000743
744 if (Alignment >= Size)
745 return true;
746 }
747
748 if (CurDAG->isBaseWithConstantOffset(N)) {
749 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
750 int64_t RHSC = (int64_t)RHS->getZExtValue();
751 unsigned Scale = Log2_32(Size);
752 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
753 Base = N.getOperand(0);
754 if (Base.getOpcode() == ISD::FrameIndex) {
755 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000756 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Tim Northover3b0846e2014-05-24 12:50:23 +0000757 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000758 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000759 return true;
760 }
761 }
762 }
763
764 // Before falling back to our general case, check if the unscaled
765 // instructions can handle this. If so, that's preferable.
766 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
767 return false;
768
769 // Base only. The address will be materialized into a register before
770 // the memory is accessed.
771 // add x0, Xbase, #offset
772 // ldr x0, [x0]
773 Base = N;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000774 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000775 return true;
776}
777
778/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
779/// immediate" address. This should only match when there is an offset that
780/// is not valid for a scaled immediate addressing mode. The "Size" argument
781/// is the size in bytes of the memory reference, which is needed here to know
782/// what is valid for a scaled immediate.
783bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
784 SDValue &Base,
785 SDValue &OffImm) {
786 if (!CurDAG->isBaseWithConstantOffset(N))
787 return false;
788 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
789 int64_t RHSC = RHS->getSExtValue();
790 // If the offset is valid as a scaled immediate, don't match here.
791 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
792 RHSC < (0x1000 << Log2_32(Size)))
793 return false;
794 if (RHSC >= -256 && RHSC < 256) {
795 Base = N.getOperand(0);
796 if (Base.getOpcode() == ISD::FrameIndex) {
797 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
798 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +0000799 Base = CurDAG->getTargetFrameIndex(
800 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Tim Northover3b0846e2014-05-24 12:50:23 +0000801 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000802 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000803 return true;
804 }
805 }
806 return false;
807}
808
809static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000810 SDLoc dl(N);
811 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000812 SDValue ImpDef = SDValue(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000813 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000814 MachineSDNode *Node = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000815 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000816 return SDValue(Node, 0);
817}
818
819/// \brief Check if the given SHL node (\p N), can be used to form an
820/// extended register for an addressing mode.
821bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
822 bool WantExtend, SDValue &Offset,
823 SDValue &SignExtend) {
824 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
825 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
826 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
827 return false;
828
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000829 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000830 if (WantExtend) {
831 AArch64_AM::ShiftExtendType Ext =
832 getExtendTypeForNode(N.getOperand(0), true);
833 if (Ext == AArch64_AM::InvalidShiftExtend)
834 return false;
835
836 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000837 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
838 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000839 } else {
840 Offset = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000841 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000842 }
843
844 unsigned LegalShiftVal = Log2_32(Size);
845 unsigned ShiftVal = CSD->getZExtValue();
846
847 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
848 return false;
849
Eric Christopher114fa1c2016-02-29 22:50:49 +0000850 return isWorthFolding(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000851}
852
853bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
854 SDValue &Base, SDValue &Offset,
855 SDValue &SignExtend,
856 SDValue &DoShift) {
857 if (N.getOpcode() != ISD::ADD)
858 return false;
859 SDValue LHS = N.getOperand(0);
860 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000861 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000862
863 // We don't want to match immediate adds here, because they are better lowered
864 // to the register-immediate addressing modes.
865 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
866 return false;
867
868 // Check if this particular node is reused in any non-memory related
869 // operation. If yes, do not try to fold this node into the address
870 // computation, since the computation will be kept.
871 const SDNode *Node = N.getNode();
872 for (SDNode *UI : Node->uses()) {
873 if (!isa<MemSDNode>(*UI))
874 return false;
875 }
876
877 // Remember if it is worth folding N when it produces extended register.
878 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
879
880 // Try to match a shifted extend on the RHS.
881 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
882 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
883 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000884 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000885 return true;
886 }
887
888 // Try to match a shifted extend on the LHS.
889 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
890 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
891 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000892 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000893 return true;
894 }
895
896 // There was no shift, whatever else we find.
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000897 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000898
899 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
900 // Try to match an unshifted extend on the LHS.
901 if (IsExtendedRegisterWorthFolding &&
902 (Ext = getExtendTypeForNode(LHS, true)) !=
903 AArch64_AM::InvalidShiftExtend) {
904 Base = RHS;
905 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000906 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
907 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000908 if (isWorthFolding(LHS))
909 return true;
910 }
911
912 // Try to match an unshifted extend on the RHS.
913 if (IsExtendedRegisterWorthFolding &&
914 (Ext = getExtendTypeForNode(RHS, true)) !=
915 AArch64_AM::InvalidShiftExtend) {
916 Base = LHS;
917 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000918 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
919 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000920 if (isWorthFolding(RHS))
921 return true;
922 }
923
924 return false;
925}
926
Hao Liu3cb826c2014-10-14 06:50:36 +0000927// Check if the given immediate is preferred by ADD. If an immediate can be
928// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
929// encoded by one MOVZ, return true.
930static bool isPreferredADD(int64_t ImmOff) {
931 // Constant in [0x0, 0xfff] can be encoded in ADD.
932 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
933 return true;
934 // Check if it can be encoded in an "ADD LSL #12".
935 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
936 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
937 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
938 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
939 return false;
940}
941
Tim Northover3b0846e2014-05-24 12:50:23 +0000942bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
943 SDValue &Base, SDValue &Offset,
944 SDValue &SignExtend,
945 SDValue &DoShift) {
946 if (N.getOpcode() != ISD::ADD)
947 return false;
948 SDValue LHS = N.getOperand(0);
949 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000950 SDLoc DL(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000951
Tim Northover3b0846e2014-05-24 12:50:23 +0000952 // Check if this particular node is reused in any non-memory related
953 // operation. If yes, do not try to fold this node into the address
954 // computation, since the computation will be kept.
955 const SDNode *Node = N.getNode();
956 for (SDNode *UI : Node->uses()) {
957 if (!isa<MemSDNode>(*UI))
958 return false;
959 }
960
Hao Liu3cb826c2014-10-14 06:50:36 +0000961 // Watch out if RHS is a wide immediate, it can not be selected into
962 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
963 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
964 // instructions like:
965 // MOV X0, WideImmediate
966 // ADD X1, BaseReg, X0
967 // LDR X2, [X1, 0]
968 // For such situation, using [BaseReg, XReg] addressing mode can save one
969 // ADD/SUB:
970 // MOV X0, WideImmediate
971 // LDR X2, [BaseReg, X0]
972 if (isa<ConstantSDNode>(RHS)) {
Benjamin Kramer619c4e52015-04-10 11:24:51 +0000973 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
Hao Liu3cb826c2014-10-14 06:50:36 +0000974 unsigned Scale = Log2_32(Size);
Chad Rosier6c1f0932015-09-17 13:10:27 +0000975 // Skip the immediate can be selected by load/store addressing mode.
Hao Liu3cb826c2014-10-14 06:50:36 +0000976 // Also skip the immediate can be encoded by a single ADD (SUB is also
977 // checked by using -ImmOff).
978 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
979 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
980 return false;
981
Hao Liu3cb826c2014-10-14 06:50:36 +0000982 SDValue Ops[] = { RHS };
983 SDNode *MOVI =
984 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
985 SDValue MOVIV = SDValue(MOVI, 0);
986 // This ADD of two X register will be selected into [Reg+Reg] mode.
987 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
988 }
989
Tim Northover3b0846e2014-05-24 12:50:23 +0000990 // Remember if it is worth folding N when it produces extended register.
991 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
992
993 // Try to match a shifted extend on the RHS.
994 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
995 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
996 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000997 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000998 return true;
999 }
1000
1001 // Try to match a shifted extend on the LHS.
1002 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1003 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
1004 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001005 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001006 return true;
1007 }
1008
1009 // Match any non-shifted, non-extend, non-immediate add expression.
1010 Base = LHS;
1011 Offset = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001012 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
1013 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001014 // Reg1 + Reg2 is free: no check needed.
1015 return true;
1016}
1017
1018SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +00001019 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +00001020 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +00001021 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
1022 AArch64::dsub2, AArch64::dsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +00001023
1024 return createTuple(Regs, RegClassIDs, SubRegs);
1025}
1026
1027SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +00001028 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +00001029 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +00001030 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
1031 AArch64::qsub2, AArch64::qsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +00001032
1033 return createTuple(Regs, RegClassIDs, SubRegs);
1034}
1035
1036SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
Benjamin Kramerea68a942015-02-19 15:26:17 +00001037 const unsigned RegClassIDs[],
1038 const unsigned SubRegs[]) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001039 // There's no special register-class for a vector-list of 1 element: it's just
1040 // a vector.
1041 if (Regs.size() == 1)
1042 return Regs[0];
1043
1044 assert(Regs.size() >= 2 && Regs.size() <= 4);
1045
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001046 SDLoc DL(Regs[0]);
Tim Northover3b0846e2014-05-24 12:50:23 +00001047
1048 SmallVector<SDValue, 4> Ops;
1049
1050 // First operand of REG_SEQUENCE is the desired RegClass.
1051 Ops.push_back(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001052 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +00001053
1054 // Then we get pairs of source & subregister-position for the components.
1055 for (unsigned i = 0; i < Regs.size(); ++i) {
1056 Ops.push_back(Regs[i]);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001057 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +00001058 }
1059
1060 SDNode *N =
1061 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1062 return SDValue(N, 0);
1063}
1064
Justin Bogner283e3bd2016-05-12 23:10:30 +00001065void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1066 bool isExt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001067 SDLoc dl(N);
1068 EVT VT = N->getValueType(0);
1069
1070 unsigned ExtOff = isExt;
1071
1072 // Form a REG_SEQUENCE to force register allocation.
1073 unsigned Vec0Off = ExtOff + 1;
1074 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1075 N->op_begin() + Vec0Off + NumVecs);
1076 SDValue RegSeq = createQTuple(Regs);
1077
1078 SmallVector<SDValue, 6> Ops;
1079 if (isExt)
1080 Ops.push_back(N->getOperand(1));
1081 Ops.push_back(RegSeq);
1082 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
Justin Bogner283e3bd2016-05-12 23:10:30 +00001083 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
Tim Northover3b0846e2014-05-24 12:50:23 +00001084}
1085
Justin Bogner283e3bd2016-05-12 23:10:30 +00001086bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001087 LoadSDNode *LD = cast<LoadSDNode>(N);
1088 if (LD->isUnindexed())
Justin Bogner283e3bd2016-05-12 23:10:30 +00001089 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001090 EVT VT = LD->getMemoryVT();
1091 EVT DstVT = N->getValueType(0);
1092 ISD::MemIndexedMode AM = LD->getAddressingMode();
1093 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1094
1095 // We're not doing validity checking here. That was done when checking
1096 // if we should mark the load as indexed or not. We're just selecting
1097 // the right instruction.
1098 unsigned Opcode = 0;
1099
1100 ISD::LoadExtType ExtType = LD->getExtensionType();
1101 bool InsertTo64 = false;
1102 if (VT == MVT::i64)
1103 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1104 else if (VT == MVT::i32) {
1105 if (ExtType == ISD::NON_EXTLOAD)
1106 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1107 else if (ExtType == ISD::SEXTLOAD)
1108 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1109 else {
1110 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1111 InsertTo64 = true;
1112 // The result of the load is only i32. It's the subreg_to_reg that makes
1113 // it into an i64.
1114 DstVT = MVT::i32;
1115 }
1116 } else if (VT == MVT::i16) {
1117 if (ExtType == ISD::SEXTLOAD) {
1118 if (DstVT == MVT::i64)
1119 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1120 else
1121 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1122 } else {
1123 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1124 InsertTo64 = DstVT == MVT::i64;
1125 // The result of the load is only i32. It's the subreg_to_reg that makes
1126 // it into an i64.
1127 DstVT = MVT::i32;
1128 }
1129 } else if (VT == MVT::i8) {
1130 if (ExtType == ISD::SEXTLOAD) {
1131 if (DstVT == MVT::i64)
1132 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1133 else
1134 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1135 } else {
1136 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1137 InsertTo64 = DstVT == MVT::i64;
1138 // The result of the load is only i32. It's the subreg_to_reg that makes
1139 // it into an i64.
1140 DstVT = MVT::i32;
1141 }
Ahmed Bougachae0e12db2015-08-04 01:29:38 +00001142 } else if (VT == MVT::f16) {
1143 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +00001144 } else if (VT == MVT::f32) {
1145 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1146 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1147 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1148 } else if (VT.is128BitVector()) {
1149 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1150 } else
Justin Bogner283e3bd2016-05-12 23:10:30 +00001151 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001152 SDValue Chain = LD->getChain();
1153 SDValue Base = LD->getBasePtr();
1154 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1155 int OffsetVal = (int)OffsetOp->getZExtValue();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001156 SDLoc dl(N);
1157 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +00001158 SDValue Ops[] = { Base, Offset, Chain };
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001159 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
Tim Northover3b0846e2014-05-24 12:50:23 +00001160 MVT::Other, Ops);
1161 // Either way, we're replacing the node, so tell the caller that.
Tim Northover3b0846e2014-05-24 12:50:23 +00001162 SDValue LoadedVal = SDValue(Res, 1);
1163 if (InsertTo64) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001164 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001165 LoadedVal =
1166 SDValue(CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001167 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1168 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1169 SubReg),
Tim Northover3b0846e2014-05-24 12:50:23 +00001170 0);
1171 }
1172
1173 ReplaceUses(SDValue(N, 0), LoadedVal);
1174 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1175 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00001176 CurDAG->RemoveDeadNode(N);
Justin Bogner283e3bd2016-05-12 23:10:30 +00001177 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001178}
1179
Justin Bogner283e3bd2016-05-12 23:10:30 +00001180void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1181 unsigned SubRegIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001182 SDLoc dl(N);
1183 EVT VT = N->getValueType(0);
1184 SDValue Chain = N->getOperand(0);
1185
Benjamin Kramerea68a942015-02-19 15:26:17 +00001186 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1187 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001188
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001189 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001190
1191 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1192 SDValue SuperReg = SDValue(Ld, 0);
1193 for (unsigned i = 0; i < NumVecs; ++i)
1194 ReplaceUses(SDValue(N, i),
1195 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1196
1197 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
Sanjin Sijaric6f020d92016-11-07 22:39:02 +00001198
1199 // Transfer memoperands.
1200 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1201 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1202 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
1203
Justin Bogner3525da72016-05-12 20:54:27 +00001204 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001205}
1206
Justin Bogner283e3bd2016-05-12 23:10:30 +00001207void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1208 unsigned Opc, unsigned SubRegIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001209 SDLoc dl(N);
1210 EVT VT = N->getValueType(0);
1211 SDValue Chain = N->getOperand(0);
1212
Benjamin Kramerea68a942015-02-19 15:26:17 +00001213 SDValue Ops[] = {N->getOperand(1), // Mem operand
1214 N->getOperand(2), // Incremental
1215 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001216
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001217 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1218 MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001219
1220 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1221
1222 // Update uses of write back register
1223 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1224
1225 // Update uses of vector list
1226 SDValue SuperReg = SDValue(Ld, 1);
1227 if (NumVecs == 1)
1228 ReplaceUses(SDValue(N, 0), SuperReg);
1229 else
1230 for (unsigned i = 0; i < NumVecs; ++i)
1231 ReplaceUses(SDValue(N, i),
1232 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1233
1234 // Update the chain
1235 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00001236 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001237}
1238
Justin Bogner283e3bd2016-05-12 23:10:30 +00001239void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1240 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001241 SDLoc dl(N);
1242 EVT VT = N->getOperand(2)->getValueType(0);
1243
1244 // Form a REG_SEQUENCE to force register allocation.
1245 bool Is128Bit = VT.getSizeInBits() == 128;
1246 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1247 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1248
Benjamin Kramerea68a942015-02-19 15:26:17 +00001249 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001250 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1251
Sanjin Sijaric6f020d92016-11-07 22:39:02 +00001252 // Transfer memoperands.
1253 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1254 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1255 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1256
Justin Bogner283e3bd2016-05-12 23:10:30 +00001257 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001258}
1259
Justin Bogner283e3bd2016-05-12 23:10:30 +00001260void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1261 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001262 SDLoc dl(N);
1263 EVT VT = N->getOperand(2)->getValueType(0);
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001264 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1265 MVT::Other}; // Type for the Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001266
1267 // Form a REG_SEQUENCE to force register allocation.
1268 bool Is128Bit = VT.getSizeInBits() == 128;
1269 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1270 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1271
Benjamin Kramerea68a942015-02-19 15:26:17 +00001272 SDValue Ops[] = {RegSeq,
1273 N->getOperand(NumVecs + 1), // base register
1274 N->getOperand(NumVecs + 2), // Incremental
1275 N->getOperand(0)}; // Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001276 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1277
Justin Bogner283e3bd2016-05-12 23:10:30 +00001278 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001279}
1280
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001281namespace {
Tim Northover3b0846e2014-05-24 12:50:23 +00001282/// WidenVector - Given a value in the V64 register class, produce the
1283/// equivalent value in the V128 register class.
1284class WidenVector {
1285 SelectionDAG &DAG;
1286
1287public:
1288 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1289
1290 SDValue operator()(SDValue V64Reg) {
1291 EVT VT = V64Reg.getValueType();
1292 unsigned NarrowSize = VT.getVectorNumElements();
1293 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1294 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1295 SDLoc DL(V64Reg);
1296
1297 SDValue Undef =
1298 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1299 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1300 }
1301};
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001302} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +00001303
1304/// NarrowVector - Given a value in the V128 register class, produce the
1305/// equivalent value in the V64 register class.
1306static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1307 EVT VT = V128Reg.getValueType();
1308 unsigned WideSize = VT.getVectorNumElements();
1309 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1310 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1311
1312 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1313 V128Reg);
1314}
1315
Justin Bogner283e3bd2016-05-12 23:10:30 +00001316void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1317 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001318 SDLoc dl(N);
1319 EVT VT = N->getValueType(0);
1320 bool Narrow = VT.getSizeInBits() == 64;
1321
1322 // Form a REG_SEQUENCE to force register allocation.
1323 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1324
1325 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001326 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001327 WidenVector(*CurDAG));
1328
1329 SDValue RegSeq = createQTuple(Regs);
1330
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001331 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001332
1333 unsigned LaneNo =
1334 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1335
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001336 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001337 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001338 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1339 SDValue SuperReg = SDValue(Ld, 0);
1340
1341 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
Craig Topper26260942015-10-18 05:15:34 +00001342 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1343 AArch64::qsub2, AArch64::qsub3 };
Tim Northover3b0846e2014-05-24 12:50:23 +00001344 for (unsigned i = 0; i < NumVecs; ++i) {
1345 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1346 if (Narrow)
1347 NV = NarrowVector(NV, *CurDAG);
1348 ReplaceUses(SDValue(N, i), NV);
1349 }
1350
1351 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
Justin Bogner3525da72016-05-12 20:54:27 +00001352 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001353}
1354
Justin Bogner283e3bd2016-05-12 23:10:30 +00001355void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1356 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001357 SDLoc dl(N);
1358 EVT VT = N->getValueType(0);
1359 bool Narrow = VT.getSizeInBits() == 64;
1360
1361 // Form a REG_SEQUENCE to force register allocation.
1362 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1363
1364 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001365 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001366 WidenVector(*CurDAG));
1367
1368 SDValue RegSeq = createQTuple(Regs);
1369
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001370 const EVT ResTys[] = {MVT::i64, // Type of the write back register
Ahmed Bougachae14a4d42015-04-17 23:43:33 +00001371 RegSeq->getValueType(0), MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001372
1373 unsigned LaneNo =
1374 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1375
Benjamin Kramerea68a942015-02-19 15:26:17 +00001376 SDValue Ops[] = {RegSeq,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001377 CurDAG->getTargetConstant(LaneNo, dl,
1378 MVT::i64), // Lane Number
Benjamin Kramerea68a942015-02-19 15:26:17 +00001379 N->getOperand(NumVecs + 2), // Base register
1380 N->getOperand(NumVecs + 3), // Incremental
1381 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001382 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1383
1384 // Update uses of the write back register
1385 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1386
1387 // Update uses of the vector list
1388 SDValue SuperReg = SDValue(Ld, 1);
1389 if (NumVecs == 1) {
1390 ReplaceUses(SDValue(N, 0),
1391 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1392 } else {
1393 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
Craig Topper26260942015-10-18 05:15:34 +00001394 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1395 AArch64::qsub2, AArch64::qsub3 };
Tim Northover3b0846e2014-05-24 12:50:23 +00001396 for (unsigned i = 0; i < NumVecs; ++i) {
1397 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1398 SuperReg);
1399 if (Narrow)
1400 NV = NarrowVector(NV, *CurDAG);
1401 ReplaceUses(SDValue(N, i), NV);
1402 }
1403 }
1404
1405 // Update the Chain
1406 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00001407 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001408}
1409
Justin Bogner283e3bd2016-05-12 23:10:30 +00001410void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1411 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001412 SDLoc dl(N);
1413 EVT VT = N->getOperand(2)->getValueType(0);
1414 bool Narrow = VT.getSizeInBits() == 64;
1415
1416 // Form a REG_SEQUENCE to force register allocation.
1417 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1418
1419 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001420 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001421 WidenVector(*CurDAG));
1422
1423 SDValue RegSeq = createQTuple(Regs);
1424
1425 unsigned LaneNo =
1426 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1427
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001428 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001429 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001430 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1431
1432 // Transfer memoperands.
1433 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1434 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1435 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1436
Justin Bogner283e3bd2016-05-12 23:10:30 +00001437 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001438}
1439
Justin Bogner283e3bd2016-05-12 23:10:30 +00001440void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1441 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001442 SDLoc dl(N);
1443 EVT VT = N->getOperand(2)->getValueType(0);
1444 bool Narrow = VT.getSizeInBits() == 64;
1445
1446 // Form a REG_SEQUENCE to force register allocation.
1447 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1448
1449 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001450 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001451 WidenVector(*CurDAG));
1452
1453 SDValue RegSeq = createQTuple(Regs);
1454
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001455 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1456 MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001457
1458 unsigned LaneNo =
1459 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1460
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001461 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001462 N->getOperand(NumVecs + 2), // Base Register
1463 N->getOperand(NumVecs + 3), // Incremental
1464 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001465 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1466
1467 // Transfer memoperands.
1468 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1469 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1470 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1471
Justin Bogner283e3bd2016-05-12 23:10:30 +00001472 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001473}
1474
1475static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1476 unsigned &Opc, SDValue &Opd0,
1477 unsigned &LSB, unsigned &MSB,
1478 unsigned NumberOfIgnoredLowBits,
1479 bool BiggerPattern) {
1480 assert(N->getOpcode() == ISD::AND &&
1481 "N must be a AND operation to call this function");
1482
1483 EVT VT = N->getValueType(0);
1484
1485 // Here we can test the type of VT and return false when the type does not
1486 // match, but since it is done prior to that call in the current context
1487 // we turned that into an assert to avoid redundant code.
1488 assert((VT == MVT::i32 || VT == MVT::i64) &&
1489 "Type checking must have been done before calling this function");
1490
1491 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1492 // changed the AND node to a 32-bit mask operation. We'll have to
1493 // undo that as part of the transform here if we want to catch all
1494 // the opportunities.
1495 // Currently the NumberOfIgnoredLowBits argument helps to recover
1496 // form these situations when matching bigger pattern (bitfield insert).
1497
1498 // For unsigned extracts, check for a shift right and mask
Chad Rosier7e8dd512016-05-14 18:56:28 +00001499 uint64_t AndImm = 0;
1500 if (!isOpcWithIntImmediate(N, ISD::AND, AndImm))
Tim Northover3b0846e2014-05-24 12:50:23 +00001501 return false;
1502
1503 const SDNode *Op0 = N->getOperand(0).getNode();
1504
1505 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1506 // simplified. Try to undo that
Chad Rosier7e8dd512016-05-14 18:56:28 +00001507 AndImm |= (1 << NumberOfIgnoredLowBits) - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001508
1509 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
Chad Rosier7e8dd512016-05-14 18:56:28 +00001510 if (AndImm & (AndImm + 1))
Tim Northover3b0846e2014-05-24 12:50:23 +00001511 return false;
1512
1513 bool ClampMSB = false;
Chad Rosier7e8dd512016-05-14 18:56:28 +00001514 uint64_t SrlImm = 0;
Tim Northover3b0846e2014-05-24 12:50:23 +00001515 // Handle the SRL + ANY_EXTEND case.
1516 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
Chad Rosier7e8dd512016-05-14 18:56:28 +00001517 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001518 // Extend the incoming operand of the SRL to 64-bit.
1519 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1520 // Make sure to clamp the MSB so that we preserve the semantics of the
1521 // original operations.
1522 ClampMSB = true;
1523 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1524 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
Chad Rosier7e8dd512016-05-14 18:56:28 +00001525 SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001526 // If the shift result was truncated, we can still combine them.
1527 Opd0 = Op0->getOperand(0).getOperand(0);
1528
1529 // Use the type of SRL node.
1530 VT = Opd0->getValueType(0);
Chad Rosier7e8dd512016-05-14 18:56:28 +00001531 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001532 Opd0 = Op0->getOperand(0);
1533 } else if (BiggerPattern) {
1534 // Let's pretend a 0 shift right has been performed.
1535 // The resulting code will be at least as good as the original one
1536 // plus it may expose more opportunities for bitfield insert pattern.
1537 // FIXME: Currently we limit this to the bigger pattern, because
Chad Rosier6c1f0932015-09-17 13:10:27 +00001538 // some optimizations expect AND and not UBFM.
Tim Northover3b0846e2014-05-24 12:50:23 +00001539 Opd0 = N->getOperand(0);
1540 } else
1541 return false;
1542
Matthias Braun75260352015-02-24 18:52:04 +00001543 // Bail out on large immediates. This happens when no proper
1544 // combining/constant folding was performed.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001545 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
Matthias Braun02892ec2015-02-25 18:03:50 +00001546 DEBUG((dbgs() << N
1547 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001548 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001549 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001550
Chad Rosier7e8dd512016-05-14 18:56:28 +00001551 LSB = SrlImm;
1552 MSB = SrlImm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(AndImm)
1553 : countTrailingOnes<uint64_t>(AndImm)) -
Tim Northover3b0846e2014-05-24 12:50:23 +00001554 1;
1555 if (ClampMSB)
1556 // Since we're moving the extend before the right shift operation, we need
1557 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1558 // the zeros which would get shifted in with the original right shift
1559 // operation.
1560 MSB = MSB > 31 ? 31 : MSB;
1561
1562 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1563 return true;
1564}
1565
Chad Rosier2d658702016-06-03 15:00:09 +00001566static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
1567 SDValue &Opd0, unsigned &Immr,
1568 unsigned &Imms) {
1569 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
1570
1571 EVT VT = N->getValueType(0);
1572 unsigned BitWidth = VT.getSizeInBits();
1573 assert((VT == MVT::i32 || VT == MVT::i64) &&
1574 "Type checking must have been done before calling this function");
1575
1576 SDValue Op = N->getOperand(0);
1577 if (Op->getOpcode() == ISD::TRUNCATE) {
1578 Op = Op->getOperand(0);
1579 VT = Op->getValueType(0);
1580 BitWidth = VT.getSizeInBits();
1581 }
1582
1583 uint64_t ShiftImm;
1584 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) &&
1585 !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
1586 return false;
1587
1588 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
1589 if (ShiftImm + Width > BitWidth)
1590 return false;
1591
1592 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
1593 Opd0 = Op.getOperand(0);
1594 Immr = ShiftImm;
1595 Imms = ShiftImm + Width - 1;
1596 return true;
1597}
1598
David Xu052b9d92014-09-02 09:33:56 +00001599static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1600 SDValue &Opd0, unsigned &LSB,
1601 unsigned &MSB) {
1602 // We are looking for the following pattern which basically extracts several
1603 // continuous bits from the source value and places it from the LSB of the
1604 // destination value, all other bits of the destination value or set to zero:
Tim Northover3b0846e2014-05-24 12:50:23 +00001605 //
1606 // Value2 = AND Value, MaskImm
1607 // SRL Value2, ShiftImm
1608 //
David Xu052b9d92014-09-02 09:33:56 +00001609 // with MaskImm >> ShiftImm to search for the bit width.
Tim Northover3b0846e2014-05-24 12:50:23 +00001610 //
1611 // This gets selected into a single UBFM:
1612 //
Chad Rosier7e8dd512016-05-14 18:56:28 +00001613 // UBFM Value, ShiftImm, BitWide + SrlImm -1
Tim Northover3b0846e2014-05-24 12:50:23 +00001614 //
1615
1616 if (N->getOpcode() != ISD::SRL)
1617 return false;
1618
Chad Rosier7e8dd512016-05-14 18:56:28 +00001619 uint64_t AndMask = 0;
1620 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask))
Tim Northover3b0846e2014-05-24 12:50:23 +00001621 return false;
1622
1623 Opd0 = N->getOperand(0).getOperand(0);
1624
Chad Rosier7e8dd512016-05-14 18:56:28 +00001625 uint64_t SrlImm = 0;
1626 if (!isIntImmediate(N->getOperand(1), SrlImm))
Tim Northover3b0846e2014-05-24 12:50:23 +00001627 return false;
1628
David Xu052b9d92014-09-02 09:33:56 +00001629 // Check whether we really have several bits extract here.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001630 unsigned BitWide = 64 - countLeadingOnes(~(AndMask >> SrlImm));
1631 if (BitWide && isMask_64(AndMask >> SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001632 if (N->getValueType(0) == MVT::i32)
1633 Opc = AArch64::UBFMWri;
1634 else
1635 Opc = AArch64::UBFMXri;
1636
Chad Rosier7e8dd512016-05-14 18:56:28 +00001637 LSB = SrlImm;
1638 MSB = BitWide + SrlImm - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001639 return true;
1640 }
1641
1642 return false;
1643}
1644
1645static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001646 unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001647 bool BiggerPattern) {
1648 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1649 "N must be a SHR/SRA operation to call this function");
1650
1651 EVT VT = N->getValueType(0);
1652
1653 // Here we can test the type of VT and return false when the type does not
1654 // match, but since it is done prior to that call in the current context
1655 // we turned that into an assert to avoid redundant code.
1656 assert((VT == MVT::i32 || VT == MVT::i64) &&
1657 "Type checking must have been done before calling this function");
1658
David Xu052b9d92014-09-02 09:33:56 +00001659 // Check for AND + SRL doing several bits extract.
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001660 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
Tim Northover3b0846e2014-05-24 12:50:23 +00001661 return true;
1662
Chad Rosierc73d5592016-05-16 12:55:01 +00001663 // We're looking for a shift of a shift.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001664 uint64_t ShlImm = 0;
1665 uint64_t TruncBits = 0;
1666 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001667 Opd0 = N->getOperand(0).getOperand(0);
1668 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1669 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1670 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1671 // be considered as setting high 32 bits as zero. Our strategy here is to
1672 // always generate 64bit UBFM. This consistency will help the CSE pass
1673 // later find more redundancy.
1674 Opd0 = N->getOperand(0).getOperand(0);
Chad Rosier7e8dd512016-05-14 18:56:28 +00001675 TruncBits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
Tim Northover3b0846e2014-05-24 12:50:23 +00001676 VT = Opd0->getValueType(0);
1677 assert(VT == MVT::i64 && "the promoted type should be i64");
1678 } else if (BiggerPattern) {
1679 // Let's pretend a 0 shift left has been performed.
1680 // FIXME: Currently we limit this to the bigger pattern case,
1681 // because some optimizations expect AND and not UBFM
1682 Opd0 = N->getOperand(0);
1683 } else
1684 return false;
1685
Matthias Braun75260352015-02-24 18:52:04 +00001686 // Missing combines/constant folding may have left us with strange
1687 // constants.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001688 if (ShlImm >= VT.getSizeInBits()) {
Matthias Braun02892ec2015-02-25 18:03:50 +00001689 DEBUG((dbgs() << N
1690 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001691 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001692 }
Matthias Braun75260352015-02-24 18:52:04 +00001693
Chad Rosier7e8dd512016-05-14 18:56:28 +00001694 uint64_t SrlImm = 0;
1695 if (!isIntImmediate(N->getOperand(1), SrlImm))
Tim Northover3b0846e2014-05-24 12:50:23 +00001696 return false;
1697
Chad Rosier7e8dd512016-05-14 18:56:28 +00001698 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001699 "bad amount in shift node!");
Chad Rosier7e8dd512016-05-14 18:56:28 +00001700 int immr = SrlImm - ShlImm;
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001701 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
Chad Rosier7e8dd512016-05-14 18:56:28 +00001702 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001703 // SRA requires a signed extraction
1704 if (VT == MVT::i32)
1705 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1706 else
1707 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1708 return true;
1709}
1710
Chad Rosierbe879ea2016-06-03 20:05:49 +00001711bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
1712 assert(N->getOpcode() == ISD::SIGN_EXTEND);
1713
1714 EVT VT = N->getValueType(0);
1715 EVT NarrowVT = N->getOperand(0)->getValueType(0);
1716 if (VT != MVT::i64 || NarrowVT != MVT::i32)
1717 return false;
1718
1719 uint64_t ShiftImm;
1720 SDValue Op = N->getOperand(0);
1721 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
1722 return false;
1723
1724 SDLoc dl(N);
1725 // Extend the incoming operand of the shift to 64-bits.
1726 SDValue Opd0 = Widen(CurDAG, Op.getOperand(0));
1727 unsigned Immr = ShiftImm;
1728 unsigned Imms = NarrowVT.getSizeInBits() - 1;
1729 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1730 CurDAG->getTargetConstant(Imms, dl, VT)};
1731 CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops);
1732 return true;
1733}
1734
Tim Northover3b0846e2014-05-24 12:50:23 +00001735static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001736 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001737 unsigned NumberOfIgnoredLowBits = 0,
1738 bool BiggerPattern = false) {
1739 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1740 return false;
1741
1742 switch (N->getOpcode()) {
1743 default:
1744 if (!N->isMachineOpcode())
1745 return false;
1746 break;
1747 case ISD::AND:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001748 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001749 NumberOfIgnoredLowBits, BiggerPattern);
1750 case ISD::SRL:
1751 case ISD::SRA:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001752 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
Chad Rosier2d658702016-06-03 15:00:09 +00001753
1754 case ISD::SIGN_EXTEND_INREG:
1755 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
Tim Northover3b0846e2014-05-24 12:50:23 +00001756 }
1757
1758 unsigned NOpc = N->getMachineOpcode();
1759 switch (NOpc) {
1760 default:
1761 return false;
1762 case AArch64::SBFMWri:
1763 case AArch64::UBFMWri:
1764 case AArch64::SBFMXri:
1765 case AArch64::UBFMXri:
1766 Opc = NOpc;
1767 Opd0 = N->getOperand(0);
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001768 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1769 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
Tim Northover3b0846e2014-05-24 12:50:23 +00001770 return true;
1771 }
1772 // Unreachable
1773 return false;
1774}
1775
Justin Bogner283e3bd2016-05-12 23:10:30 +00001776bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001777 unsigned Opc, Immr, Imms;
Tim Northover3b0846e2014-05-24 12:50:23 +00001778 SDValue Opd0;
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001779 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
Justin Bogner283e3bd2016-05-12 23:10:30 +00001780 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001781
1782 EVT VT = N->getValueType(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001783 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001784
1785 // If the bit extract operation is 64bit but the original type is 32bit, we
1786 // need to add one EXTRACT_SUBREG.
1787 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001788 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
1789 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001790
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001791 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1792 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Justin Bogner283e3bd2016-05-12 23:10:30 +00001793 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
1794 MVT::i32, SDValue(BFM, 0), SubReg));
1795 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001796 }
1797
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001798 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1799 CurDAG->getTargetConstant(Imms, dl, VT)};
Justin Bogner283e3bd2016-05-12 23:10:30 +00001800 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1801 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001802}
1803
1804/// Does DstMask form a complementary pair with the mask provided by
1805/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1806/// this asks whether DstMask zeroes precisely those bits that will be set by
1807/// the other half.
Benjamin Kramerc321e532016-06-08 19:09:22 +00001808static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
Tim Northover3b0846e2014-05-24 12:50:23 +00001809 unsigned NumberOfIgnoredHighBits, EVT VT) {
1810 assert((VT == MVT::i32 || VT == MVT::i64) &&
1811 "i32 or i64 mask type expected!");
1812 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1813
1814 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1815 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1816
1817 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1818 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1819}
1820
1821// Look for bits that will be useful for later uses.
1822// A bit is consider useless as soon as it is dropped and never used
1823// before it as been dropped.
1824// E.g., looking for useful bit of x
1825// 1. y = x & 0x7
1826// 2. z = y >> 2
1827// After #1, x useful bits are 0x7, then the useful bits of x, live through
1828// y.
1829// After #2, the useful bits of x are 0x4.
1830// However, if x is used on an unpredicatable instruction, then all its bits
1831// are useful.
1832// E.g.
1833// 1. y = x & 0x7
1834// 2. z = y >> 2
1835// 3. str x, [@x]
1836static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1837
1838static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1839 unsigned Depth) {
1840 uint64_t Imm =
1841 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1842 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1843 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1844 getUsefulBits(Op, UsefulBits, Depth + 1);
1845}
1846
1847static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1848 uint64_t Imm, uint64_t MSB,
1849 unsigned Depth) {
1850 // inherit the bitwidth value
1851 APInt OpUsefulBits(UsefulBits);
1852 OpUsefulBits = 1;
1853
1854 if (MSB >= Imm) {
1855 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1856 --OpUsefulBits;
1857 // The interesting part will be in the lower part of the result
1858 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1859 // The interesting part was starting at Imm in the argument
1860 OpUsefulBits = OpUsefulBits.shl(Imm);
1861 } else {
1862 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1863 --OpUsefulBits;
1864 // The interesting part will be shifted in the result
1865 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1866 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1867 // The interesting part was at zero in the argument
Craig Topperfc947bc2017-04-18 17:14:21 +00001868 OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
Tim Northover3b0846e2014-05-24 12:50:23 +00001869 }
1870
1871 UsefulBits &= OpUsefulBits;
1872}
1873
1874static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1875 unsigned Depth) {
1876 uint64_t Imm =
1877 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1878 uint64_t MSB =
1879 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1880
1881 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1882}
1883
1884static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1885 unsigned Depth) {
1886 uint64_t ShiftTypeAndValue =
1887 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1888 APInt Mask(UsefulBits);
1889 Mask.clearAllBits();
1890 Mask.flipAllBits();
1891
1892 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1893 // Shift Left
1894 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1895 Mask = Mask.shl(ShiftAmt);
1896 getUsefulBits(Op, Mask, Depth + 1);
Craig Topperfc947bc2017-04-18 17:14:21 +00001897 Mask.lshrInPlace(ShiftAmt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001898 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1899 // Shift Right
1900 // We do not handle AArch64_AM::ASR, because the sign will change the
1901 // number of useful bits
1902 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
Craig Topperfc947bc2017-04-18 17:14:21 +00001903 Mask.lshrInPlace(ShiftAmt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001904 getUsefulBits(Op, Mask, Depth + 1);
1905 Mask = Mask.shl(ShiftAmt);
1906 } else
1907 return;
1908
1909 UsefulBits &= Mask;
1910}
1911
1912static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1913 unsigned Depth) {
1914 uint64_t Imm =
1915 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1916 uint64_t MSB =
1917 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1918
Tim Northover3b0846e2014-05-24 12:50:23 +00001919 APInt OpUsefulBits(UsefulBits);
1920 OpUsefulBits = 1;
1921
Silviu Barangaaab65b12016-11-30 17:04:22 +00001922 APInt ResultUsefulBits(UsefulBits.getBitWidth(), 0);
1923 ResultUsefulBits.flipAllBits();
1924 APInt Mask(UsefulBits.getBitWidth(), 0);
1925
1926 getUsefulBits(Op, ResultUsefulBits, Depth + 1);
1927
Tim Northover3b0846e2014-05-24 12:50:23 +00001928 if (MSB >= Imm) {
Silviu Barangaaab65b12016-11-30 17:04:22 +00001929 // The instruction is a BFXIL.
1930 uint64_t Width = MSB - Imm + 1;
1931 uint64_t LSB = Imm;
1932
1933 OpUsefulBits = OpUsefulBits.shl(Width);
Tim Northover3b0846e2014-05-24 12:50:23 +00001934 --OpUsefulBits;
Silviu Barangaaab65b12016-11-30 17:04:22 +00001935
1936 if (Op.getOperand(1) == Orig) {
1937 // Copy the low bits from the result to bits starting from LSB.
1938 Mask = ResultUsefulBits & OpUsefulBits;
1939 Mask = Mask.shl(LSB);
1940 }
1941
1942 if (Op.getOperand(0) == Orig)
1943 // Bits starting from LSB in the input contribute to the result.
1944 Mask |= (ResultUsefulBits & ~OpUsefulBits);
Tim Northover3b0846e2014-05-24 12:50:23 +00001945 } else {
Silviu Barangaaab65b12016-11-30 17:04:22 +00001946 // The instruction is a BFI.
1947 uint64_t Width = MSB + 1;
1948 uint64_t LSB = UsefulBits.getBitWidth() - Imm;
1949
1950 OpUsefulBits = OpUsefulBits.shl(Width);
Tim Northover3b0846e2014-05-24 12:50:23 +00001951 --OpUsefulBits;
Silviu Barangaaab65b12016-11-30 17:04:22 +00001952 OpUsefulBits = OpUsefulBits.shl(LSB);
1953
1954 if (Op.getOperand(1) == Orig) {
1955 // Copy the bits from the result to the zero bits.
1956 Mask = ResultUsefulBits & OpUsefulBits;
Craig Topperfc947bc2017-04-18 17:14:21 +00001957 Mask.lshrInPlace(LSB);
Silviu Barangaaab65b12016-11-30 17:04:22 +00001958 }
1959
1960 if (Op.getOperand(0) == Orig)
1961 Mask |= (ResultUsefulBits & ~OpUsefulBits);
Tim Northover3b0846e2014-05-24 12:50:23 +00001962 }
Silviu Barangaaab65b12016-11-30 17:04:22 +00001963
1964 UsefulBits &= Mask;
Tim Northover3b0846e2014-05-24 12:50:23 +00001965}
1966
1967static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1968 SDValue Orig, unsigned Depth) {
1969
1970 // Users of this node should have already been instruction selected
1971 // FIXME: Can we turn that into an assert?
1972 if (!UserNode->isMachineOpcode())
1973 return;
1974
1975 switch (UserNode->getMachineOpcode()) {
1976 default:
1977 return;
1978 case AArch64::ANDSWri:
1979 case AArch64::ANDSXri:
1980 case AArch64::ANDWri:
1981 case AArch64::ANDXri:
1982 // We increment Depth only when we call the getUsefulBits
1983 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1984 Depth);
1985 case AArch64::UBFMWri:
1986 case AArch64::UBFMXri:
1987 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1988
1989 case AArch64::ORRWrs:
1990 case AArch64::ORRXrs:
1991 if (UserNode->getOperand(1) != Orig)
1992 return;
1993 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1994 Depth);
1995 case AArch64::BFMWri:
1996 case AArch64::BFMXri:
1997 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
Chad Rosier23a1a9a2016-05-11 20:19:54 +00001998
Chad Rosier23a1a9a2016-05-11 20:19:54 +00001999 case AArch64::STRBBui:
Chad Rosier9926a5e2016-05-12 01:42:01 +00002000 case AArch64::STURBBi:
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002001 if (UserNode->getOperand(0) != Orig)
2002 return;
2003 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
2004 return;
2005
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002006 case AArch64::STRHHui:
Chad Rosier9926a5e2016-05-12 01:42:01 +00002007 case AArch64::STURHHi:
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002008 if (UserNode->getOperand(0) != Orig)
2009 return;
2010 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
2011 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002012 }
2013}
2014
2015static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
2016 if (Depth >= 6)
2017 return;
2018 // Initialize UsefulBits
2019 if (!Depth) {
Sanjay Patel5f6bb6c2016-09-14 15:43:44 +00002020 unsigned Bitwidth = Op.getScalarValueSizeInBits();
Tim Northover3b0846e2014-05-24 12:50:23 +00002021 // At the beginning, assume every produced bits is useful
2022 UsefulBits = APInt(Bitwidth, 0);
2023 UsefulBits.flipAllBits();
2024 }
2025 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
2026
2027 for (SDNode *Node : Op.getNode()->uses()) {
2028 // A use cannot produce useful bits
2029 APInt UsefulBitsForUse = APInt(UsefulBits);
2030 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
2031 UsersUsefulBits |= UsefulBitsForUse;
2032 }
2033 // UsefulBits contains the produced bits that are meaningful for the
2034 // current definition, thus a user cannot make a bit meaningful at
2035 // this point
2036 UsefulBits &= UsersUsefulBits;
2037}
2038
2039/// Create a machine node performing a notional SHL of Op by ShlAmount. If
2040/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
2041/// 0, return Op unchanged.
2042static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
2043 if (ShlAmount == 0)
2044 return Op;
2045
2046 EVT VT = Op.getValueType();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002047 SDLoc dl(Op);
Tim Northover3b0846e2014-05-24 12:50:23 +00002048 unsigned BitWidth = VT.getSizeInBits();
2049 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2050
2051 SDNode *ShiftNode;
2052 if (ShlAmount > 0) {
2053 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
2054 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002055 UBFMOpc, dl, VT, Op,
2056 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
2057 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00002058 } else {
2059 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
2060 assert(ShlAmount < 0 && "expected right shift");
2061 int ShrAmount = -ShlAmount;
2062 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002063 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
2064 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00002065 }
2066
2067 return SDValue(ShiftNode, 0);
2068}
2069
2070/// Does this tree qualify as an attempt to move a bitfield into position,
2071/// essentially "(and (shl VAL, N), Mask)".
2072static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
Geoff Berry43ec15e2015-09-18 17:11:53 +00002073 bool BiggerPattern,
Tim Northover3b0846e2014-05-24 12:50:23 +00002074 SDValue &Src, int &ShiftAmount,
2075 int &MaskWidth) {
2076 EVT VT = Op.getValueType();
2077 unsigned BitWidth = VT.getSizeInBits();
2078 (void)BitWidth;
2079 assert(BitWidth == 32 || BitWidth == 64);
2080
2081 APInt KnownZero, KnownOne;
2082 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
2083
2084 // Non-zero in the sense that they're not provably zero, which is the key
2085 // point if we want to use this value
2086 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
2087
2088 // Discard a constant AND mask if present. It's safe because the node will
2089 // already have been factored into the computeKnownBits calculation above.
2090 uint64_t AndImm;
2091 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
2092 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
2093 Op = Op.getOperand(0);
2094 }
2095
Geoff Berry43ec15e2015-09-18 17:11:53 +00002096 // Don't match if the SHL has more than one use, since then we'll end up
2097 // generating SHL+UBFIZ instead of just keeping SHL+AND.
2098 if (!BiggerPattern && !Op.hasOneUse())
2099 return false;
2100
Tim Northover3b0846e2014-05-24 12:50:23 +00002101 uint64_t ShlImm;
2102 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
2103 return false;
2104 Op = Op.getOperand(0);
2105
2106 if (!isShiftedMask_64(NonZeroBits))
2107 return false;
2108
2109 ShiftAmount = countTrailingZeros(NonZeroBits);
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00002110 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
Tim Northover3b0846e2014-05-24 12:50:23 +00002111
2112 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
2113 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
Geoff Berry43ec15e2015-09-18 17:11:53 +00002114 // amount. BiggerPattern is true when this pattern is being matched for BFI,
2115 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
2116 // which case it is not profitable to insert an extra shift.
2117 if (ShlImm - ShiftAmount != 0 && !BiggerPattern)
2118 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002119 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
2120
2121 return true;
2122}
2123
Chad Rosier02f25a92016-05-19 14:19:47 +00002124static bool isShiftedMask(uint64_t Mask, EVT VT) {
2125 assert(VT == MVT::i32 || VT == MVT::i64);
2126 if (VT == MVT::i32)
2127 return isShiftedMask_32(Mask);
2128 return isShiftedMask_64(Mask);
2129}
2130
Chad Rosier816a67d2016-05-26 13:27:56 +00002131// Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
2132// inserted only sets known zero bits.
2133static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
2134 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2135
2136 EVT VT = N->getValueType(0);
2137 if (VT != MVT::i32 && VT != MVT::i64)
2138 return false;
2139
2140 unsigned BitWidth = VT.getSizeInBits();
2141
2142 uint64_t OrImm;
2143 if (!isOpcWithIntImmediate(N, ISD::OR, OrImm))
2144 return false;
2145
2146 // Skip this transformation if the ORR immediate can be encoded in the ORR.
2147 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
2148 // performance neutral.
2149 if (AArch64_AM::isLogicalImmediate(OrImm, BitWidth))
2150 return false;
2151
2152 uint64_t MaskImm;
2153 SDValue And = N->getOperand(0);
2154 // Must be a single use AND with an immediate operand.
2155 if (!And.hasOneUse() ||
2156 !isOpcWithIntImmediate(And.getNode(), ISD::AND, MaskImm))
2157 return false;
2158
2159 // Compute the Known Zero for the AND as this allows us to catch more general
2160 // cases than just looking for AND with imm.
2161 APInt KnownZero, KnownOne;
2162 CurDAG->computeKnownBits(And, KnownZero, KnownOne);
2163
2164 // Non-zero in the sense that they're not provably zero, which is the key
2165 // point if we want to use this value.
2166 uint64_t NotKnownZero = (~KnownZero).getZExtValue();
2167
2168 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
2169 if (!isShiftedMask(KnownZero.getZExtValue(), VT))
2170 return false;
2171
2172 // The bits being inserted must only set those bits that are known to be zero.
2173 if ((OrImm & NotKnownZero) != 0) {
2174 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
2175 // currently handle this case.
2176 return false;
2177 }
2178
2179 // BFI/BFXIL dst, src, #lsb, #width.
2180 int LSB = countTrailingOnes(NotKnownZero);
2181 int Width = BitWidth - APInt(BitWidth, NotKnownZero).countPopulation();
2182
2183 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
2184 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2185 unsigned ImmS = Width - 1;
2186
2187 // If we're creating a BFI instruction avoid cases where we need more
2188 // instructions to materialize the BFI constant as compared to the original
2189 // ORR. A BFXIL will use the same constant as the original ORR, so the code
2190 // should be no worse in this case.
2191 bool IsBFI = LSB != 0;
2192 uint64_t BFIImm = OrImm >> LSB;
2193 if (IsBFI && !AArch64_AM::isLogicalImmediate(BFIImm, BitWidth)) {
2194 // We have a BFI instruction and we know the constant can't be materialized
2195 // with a ORR-immediate with the zero register.
2196 unsigned OrChunks = 0, BFIChunks = 0;
2197 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
2198 if (((OrImm >> Shift) & 0xFFFF) != 0)
2199 ++OrChunks;
2200 if (((BFIImm >> Shift) & 0xFFFF) != 0)
2201 ++BFIChunks;
2202 }
2203 if (BFIChunks > OrChunks)
2204 return false;
2205 }
2206
2207 // Materialize the constant to be inserted.
2208 SDLoc DL(N);
2209 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
2210 SDNode *MOVI = CurDAG->getMachineNode(
2211 MOVIOpc, DL, VT, CurDAG->getTargetConstant(BFIImm, DL, VT));
2212
2213 // Create the BFI/BFXIL instruction.
2214 SDValue Ops[] = {And.getOperand(0), SDValue(MOVI, 0),
2215 CurDAG->getTargetConstant(ImmR, DL, VT),
2216 CurDAG->getTargetConstant(ImmS, DL, VT)};
2217 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2218 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2219 return true;
2220}
2221
Justin Bogner283e3bd2016-05-12 23:10:30 +00002222static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
2223 SelectionDAG *CurDAG) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002224 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2225
Tim Northover3b0846e2014-05-24 12:50:23 +00002226 EVT VT = N->getValueType(0);
Chad Rosier042ac2c2016-05-12 19:38:18 +00002227 if (VT != MVT::i32 && VT != MVT::i64)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002228 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002229
Chad Rosier91294c52016-05-18 17:43:11 +00002230 unsigned BitWidth = VT.getSizeInBits();
2231
Tim Northover3b0846e2014-05-24 12:50:23 +00002232 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
2233 // have the expected shape. Try to undo that.
Tim Northover3b0846e2014-05-24 12:50:23 +00002234
2235 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
2236 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
2237
Chad Rosiere0062022016-05-18 23:51:17 +00002238 // Given a OR operation, check if we have the following pattern
2239 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
2240 // isBitfieldExtractOp)
2241 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
2242 // countTrailingZeros(mask2) == imm2 - imm + 1
2243 // f = d | c
2244 // if yes, replace the OR instruction with:
2245 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
2246
Geoff Berry43ec15e2015-09-18 17:11:53 +00002247 // OR is commutative, check all combinations of operand order and values of
2248 // BiggerPattern, i.e.
2249 // Opd0, Opd1, BiggerPattern=false
2250 // Opd1, Opd0, BiggerPattern=false
2251 // Opd0, Opd1, BiggerPattern=true
2252 // Opd1, Opd0, BiggerPattern=true
2253 // Several of these combinations may match, so check with BiggerPattern=false
2254 // first since that will produce better results by matching more instructions
2255 // and/or inserting fewer extra instructions.
2256 for (int I = 0; I < 4; ++I) {
2257
Chad Rosier91294c52016-05-18 17:43:11 +00002258 SDValue Dst, Src;
2259 unsigned ImmR, ImmS;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002260 bool BiggerPattern = I / 2;
Tim Northover01dff9d2016-07-05 18:02:57 +00002261 SDValue OrOpd0Val = N->getOperand(I % 2);
2262 SDNode *OrOpd0 = OrOpd0Val.getNode();
Geoff Berry43ec15e2015-09-18 17:11:53 +00002263 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
2264 SDNode *OrOpd1 = OrOpd1Val.getNode();
2265
Tim Northover3b0846e2014-05-24 12:50:23 +00002266 unsigned BFXOpc;
2267 int DstLSB, Width;
2268 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
Geoff Berry43ec15e2015-09-18 17:11:53 +00002269 NumberOfIgnoredLowBits, BiggerPattern)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002270 // Check that the returned opcode is compatible with the pattern,
2271 // i.e., same type and zero extended (U and not S)
2272 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
2273 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
2274 continue;
2275
2276 // Compute the width of the bitfield insertion
2277 DstLSB = 0;
2278 Width = ImmS - ImmR + 1;
2279 // FIXME: This constraint is to catch bitfield insertion we may
2280 // want to widen the pattern if we want to grab general bitfied
2281 // move case
2282 if (Width <= 0)
2283 continue;
2284
2285 // If the mask on the insertee is correct, we have a BFXIL operation. We
2286 // can share the ImmR and ImmS values from the already-computed UBFM.
Tim Northover01dff9d2016-07-05 18:02:57 +00002287 } else if (isBitfieldPositioningOp(CurDAG, OrOpd0Val,
Geoff Berry43ec15e2015-09-18 17:11:53 +00002288 BiggerPattern,
2289 Src, DstLSB, Width)) {
Chad Rosier91294c52016-05-18 17:43:11 +00002290 ImmR = (BitWidth - DstLSB) % BitWidth;
Tim Northover3b0846e2014-05-24 12:50:23 +00002291 ImmS = Width - 1;
2292 } else
2293 continue;
2294
2295 // Check the second part of the pattern
2296 EVT VT = OrOpd1->getValueType(0);
2297 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2298
2299 // Compute the Known Zero for the candidate of the first operand.
2300 // This allows to catch more general case than just looking for
2301 // AND with imm. Indeed, simplify-demanded-bits may have removed
2302 // the AND instruction because it proves it was useless.
2303 APInt KnownZero, KnownOne;
2304 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
2305
2306 // Check if there is enough room for the second operand to appear
2307 // in the first one
2308 APInt BitsToBeInserted =
2309 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
2310
2311 if ((BitsToBeInserted & ~KnownZero) != 0)
2312 continue;
2313
2314 // Set the first operand
2315 uint64_t Imm;
2316 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2317 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2318 // In that case, we can eliminate the AND
2319 Dst = OrOpd1->getOperand(0);
2320 else
2321 // Maybe the AND has been removed by simplify-demanded-bits
2322 // or is useful because it discards more bits
2323 Dst = OrOpd1Val;
2324
2325 // both parts match
Chad Rosier042ac2c2016-05-12 19:38:18 +00002326 SDLoc DL(N);
2327 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT),
2328 CurDAG->getTargetConstant(ImmS, DL, VT)};
2329 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002330 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2331 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00002332 }
Chad Rosier02f25a92016-05-19 14:19:47 +00002333
2334 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
2335 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
2336 // mask (e.g., 0x000ffff0).
2337 uint64_t Mask0Imm, Mask1Imm;
2338 SDValue And0 = N->getOperand(0);
2339 SDValue And1 = N->getOperand(1);
2340 if (And0.hasOneUse() && And1.hasOneUse() &&
2341 isOpcWithIntImmediate(And0.getNode(), ISD::AND, Mask0Imm) &&
2342 isOpcWithIntImmediate(And1.getNode(), ISD::AND, Mask1Imm) &&
2343 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
2344 (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) {
2345
Chad Rosier02f25a92016-05-19 14:19:47 +00002346 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
2347 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
2348 // bits to be inserted.
2349 if (isShiftedMask(Mask0Imm, VT)) {
2350 std::swap(And0, And1);
2351 std::swap(Mask0Imm, Mask1Imm);
2352 }
2353
2354 SDValue Src = And1->getOperand(0);
2355 SDValue Dst = And0->getOperand(0);
2356 unsigned LSB = countTrailingZeros(Mask1Imm);
2357 int Width = BitWidth - APInt(BitWidth, Mask0Imm).countPopulation();
2358
2359 // The BFXIL inserts the low-order bits from a source register, so right
2360 // shift the needed bits into place.
2361 SDLoc DL(N);
2362 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
2363 SDNode *LSR = CurDAG->getMachineNode(
2364 ShiftOpc, DL, VT, Src, CurDAG->getTargetConstant(LSB, DL, VT),
2365 CurDAG->getTargetConstant(BitWidth - 1, DL, VT));
2366
2367 // BFXIL is an alias of BFM, so translate to BFM operands.
2368 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2369 unsigned ImmS = Width - 1;
2370
2371 // Create the BFXIL instruction.
2372 SDValue Ops[] = {Dst, SDValue(LSR, 0),
2373 CurDAG->getTargetConstant(ImmR, DL, VT),
2374 CurDAG->getTargetConstant(ImmS, DL, VT)};
2375 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2376 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2377 return true;
2378 }
2379
Justin Bogner283e3bd2016-05-12 23:10:30 +00002380 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002381}
2382
Justin Bogner283e3bd2016-05-12 23:10:30 +00002383bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002384 if (N->getOpcode() != ISD::OR)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002385 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002386
Weiming Zhao56ab5182015-12-01 19:17:49 +00002387 APInt NUsefulBits;
2388 getUsefulBits(SDValue(N, 0), NUsefulBits);
Tim Northover3b0846e2014-05-24 12:50:23 +00002389
Weiming Zhao56ab5182015-12-01 19:17:49 +00002390 // If all bits are not useful, just return UNDEF.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002391 if (!NUsefulBits) {
2392 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2393 return true;
2394 }
Weiming Zhao56ab5182015-12-01 19:17:49 +00002395
Chad Rosier816a67d2016-05-26 13:27:56 +00002396 if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG))
2397 return true;
2398
2399 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
Tim Northover3b0846e2014-05-24 12:50:23 +00002400}
2401
Geoff Berry43ec15e2015-09-18 17:11:53 +00002402/// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2403/// equivalent of a left shift by a constant amount followed by an and masking
2404/// out a contiguous set of bits.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002405bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
Geoff Berry43ec15e2015-09-18 17:11:53 +00002406 if (N->getOpcode() != ISD::AND)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002407 return false;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002408
2409 EVT VT = N->getValueType(0);
Chad Rosier08d99082016-05-13 22:53:13 +00002410 if (VT != MVT::i32 && VT != MVT::i64)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002411 return false;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002412
2413 SDValue Op0;
2414 int DstLSB, Width;
2415 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
2416 Op0, DstLSB, Width))
Justin Bogner283e3bd2016-05-12 23:10:30 +00002417 return false;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002418
2419 // ImmR is the rotate right amount.
2420 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2421 // ImmS is the most significant bit of the source to be moved.
2422 unsigned ImmS = Width - 1;
2423
2424 SDLoc DL(N);
2425 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
2426 CurDAG->getTargetConstant(ImmS, DL, VT)};
Chad Rosier08d99082016-05-13 22:53:13 +00002427 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002428 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2429 return true;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002430}
2431
Tim Northover3b0846e2014-05-24 12:50:23 +00002432bool
2433AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2434 unsigned RegWidth) {
2435 APFloat FVal(0.0);
2436 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2437 FVal = CN->getValueAPF();
2438 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2439 // Some otherwise illegal constants are allowed in this case.
2440 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2441 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2442 return false;
2443
2444 ConstantPoolSDNode *CN =
2445 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2446 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2447 } else
2448 return false;
2449
2450 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2451 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2452 // x-register.
2453 //
2454 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2455 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2456 // integers.
2457 bool IsExact;
2458
2459 // fbits is between 1 and 64 in the worst-case, which means the fmul
2460 // could have 2^64 as an actual operand. Need 65 bits of precision.
2461 APSInt IntVal(65, true);
2462 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2463
2464 // N.b. isPowerOf2 also checks for > 0.
2465 if (!IsExact || !IntVal.isPowerOf2()) return false;
2466 unsigned FBits = IntVal.logBase2();
2467
2468 // Checks above should have guaranteed that we haven't lost information in
2469 // finding FBits, but it must still be in range.
2470 if (FBits == 0 || FBits > RegWidth) return false;
2471
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002472 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00002473 return true;
2474}
2475
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002476// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2477// of the string and obtains the integer values from them and combines these
2478// into a single value to be used in the MRS/MSR instruction.
2479static int getIntOperandFromRegisterString(StringRef RegString) {
2480 SmallVector<StringRef, 5> Fields;
Chandler Carruthe4405e92015-09-10 06:12:31 +00002481 RegString.split(Fields, ':');
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002482
2483 if (Fields.size() == 1)
2484 return -1;
2485
2486 assert(Fields.size() == 5
2487 && "Invalid number of fields in read register string");
2488
2489 SmallVector<int, 5> Ops;
2490 bool AllIntFields = true;
2491
2492 for (StringRef Field : Fields) {
2493 unsigned IntField;
2494 AllIntFields &= !Field.getAsInteger(10, IntField);
2495 Ops.push_back(IntField);
2496 }
2497
2498 assert(AllIntFields &&
2499 "Unexpected non-integer value in special register string.");
2500
2501 // Need to combine the integer fields of the string into a single value
2502 // based on the bit encoding of MRS/MSR instruction.
2503 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2504 (Ops[3] << 3) | (Ops[4]);
2505}
2506
2507// Lower the read_register intrinsic to an MRS instruction node if the special
2508// register string argument is either of the form detailed in the ALCE (the
2509// form described in getIntOperandsFromRegsterString) or is a named register
2510// known by the MRS SysReg mapper.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002511bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002512 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2513 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2514 SDLoc DL(N);
2515
2516 int Reg = getIntOperandFromRegisterString(RegString->getString());
Justin Bogner283e3bd2016-05-12 23:10:30 +00002517 if (Reg != -1) {
2518 ReplaceNode(N, CurDAG->getMachineNode(
2519 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2520 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2521 N->getOperand(0)));
2522 return true;
2523 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002524
2525 // Use the sysreg mapper to map the remaining possible strings to the
2526 // value for the register to be used for the instruction operand.
Tim Northovere6ae6762016-07-05 21:23:04 +00002527 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
2528 if (TheReg && TheReg->Readable &&
2529 TheReg->haveFeatures(Subtarget->getFeatureBits()))
2530 Reg = TheReg->Encoding;
2531 else
2532 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
2533
2534 if (Reg != -1) {
Justin Bogner283e3bd2016-05-12 23:10:30 +00002535 ReplaceNode(N, CurDAG->getMachineNode(
2536 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2537 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2538 N->getOperand(0)));
2539 return true;
2540 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002541
Justin Bogner283e3bd2016-05-12 23:10:30 +00002542 return false;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002543}
2544
2545// Lower the write_register intrinsic to an MSR instruction node if the special
2546// register string argument is either of the form detailed in the ALCE (the
2547// form described in getIntOperandsFromRegsterString) or is a named register
2548// known by the MSR SysReg mapper.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002549bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002550 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2551 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2552 SDLoc DL(N);
2553
2554 int Reg = getIntOperandFromRegisterString(RegString->getString());
Justin Bogner283e3bd2016-05-12 23:10:30 +00002555 if (Reg != -1) {
2556 ReplaceNode(
2557 N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002558 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
Justin Bogner283e3bd2016-05-12 23:10:30 +00002559 N->getOperand(2), N->getOperand(0)));
2560 return true;
2561 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002562
2563 // Check if the register was one of those allowed as the pstatefield value in
2564 // the MSR (immediate) instruction. To accept the values allowed in the
2565 // pstatefield for the MSR (immediate) instruction, we also require that an
2566 // immediate value has been provided as an argument, we know that this is
2567 // the case as it has been ensured by semantic checking.
Tim Northovere6ae6762016-07-05 21:23:04 +00002568 auto PMapper = AArch64PState::lookupPStateByName(RegString->getString());;
2569 if (PMapper) {
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002570 assert (isa<ConstantSDNode>(N->getOperand(2))
2571 && "Expected a constant integer expression.");
Tim Northovere6ae6762016-07-05 21:23:04 +00002572 unsigned Reg = PMapper->Encoding;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002573 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
Alexandros Lamprineas1bab1912015-10-05 13:42:31 +00002574 unsigned State;
Oliver Stannard911ea202015-11-26 15:32:30 +00002575 if (Reg == AArch64PState::PAN || Reg == AArch64PState::UAO) {
Alexandros Lamprineas1bab1912015-10-05 13:42:31 +00002576 assert(Immed < 2 && "Bad imm");
2577 State = AArch64::MSRpstateImm1;
2578 } else {
2579 assert(Immed < 16 && "Bad imm");
2580 State = AArch64::MSRpstateImm4;
2581 }
Justin Bogner283e3bd2016-05-12 23:10:30 +00002582 ReplaceNode(N, CurDAG->getMachineNode(
2583 State, DL, MVT::Other,
2584 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2585 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
2586 N->getOperand(0)));
2587 return true;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002588 }
2589
2590 // Use the sysreg mapper to attempt to map the remaining possible strings
2591 // to the value for the register to be used for the MSR (register)
2592 // instruction operand.
Tim Northovere6ae6762016-07-05 21:23:04 +00002593 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
2594 if (TheReg && TheReg->Writeable &&
2595 TheReg->haveFeatures(Subtarget->getFeatureBits()))
2596 Reg = TheReg->Encoding;
2597 else
2598 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
2599 if (Reg != -1) {
2600 ReplaceNode(N, CurDAG->getMachineNode(
2601 AArch64::MSR, DL, MVT::Other,
2602 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2603 N->getOperand(2), N->getOperand(0)));
Justin Bogner283e3bd2016-05-12 23:10:30 +00002604 return true;
2605 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002606
Justin Bogner283e3bd2016-05-12 23:10:30 +00002607 return false;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002608}
2609
Tim Northovercdf15292016-04-14 17:03:29 +00002610/// We've got special pseudo-instructions for these
2611void AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
2612 unsigned Opcode;
2613 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
2614 if (MemTy == MVT::i8)
2615 Opcode = AArch64::CMP_SWAP_8;
2616 else if (MemTy == MVT::i16)
2617 Opcode = AArch64::CMP_SWAP_16;
2618 else if (MemTy == MVT::i32)
2619 Opcode = AArch64::CMP_SWAP_32;
2620 else if (MemTy == MVT::i64)
2621 Opcode = AArch64::CMP_SWAP_64;
2622 else
2623 llvm_unreachable("Unknown AtomicCmpSwap type");
2624
2625 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
2626 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
2627 N->getOperand(0)};
2628 SDNode *CmpSwap = CurDAG->getMachineNode(
2629 Opcode, SDLoc(N),
2630 CurDAG->getVTList(RegTy, MVT::i32, MVT::Other), Ops);
2631
2632 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2633 MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
2634 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
2635
2636 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
2637 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00002638 CurDAG->RemoveDeadNode(N);
Tim Northovercdf15292016-04-14 17:03:29 +00002639}
2640
Justin Bogner283e3bd2016-05-12 23:10:30 +00002641void AArch64DAGToDAGISel::Select(SDNode *Node) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002642 // Dump information about the Node being selected
2643 DEBUG(errs() << "Selecting: ");
2644 DEBUG(Node->dump(CurDAG));
2645 DEBUG(errs() << "\n");
2646
2647 // If we have a custom node, we already have selected!
2648 if (Node->isMachineOpcode()) {
2649 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2650 Node->setNodeId(-1);
Justin Bogner283e3bd2016-05-12 23:10:30 +00002651 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002652 }
2653
2654 // Few custom selection stuff.
Tim Northover3b0846e2014-05-24 12:50:23 +00002655 EVT VT = Node->getValueType(0);
2656
2657 switch (Node->getOpcode()) {
2658 default:
2659 break;
2660
Tim Northovercdf15292016-04-14 17:03:29 +00002661 case ISD::ATOMIC_CMP_SWAP:
2662 SelectCMP_SWAP(Node);
Quentin Colombet35a47012017-04-01 01:26:17 +00002663 return;
Tim Northovercdf15292016-04-14 17:03:29 +00002664
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002665 case ISD::READ_REGISTER:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002666 if (tryReadRegister(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002667 return;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002668 break;
2669
2670 case ISD::WRITE_REGISTER:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002671 if (tryWriteRegister(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002672 return;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002673 break;
2674
Tim Northover3b0846e2014-05-24 12:50:23 +00002675 case ISD::ADD:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002676 if (tryMLAV64LaneV128(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002677 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002678 break;
2679
2680 case ISD::LOAD: {
2681 // Try to select as an indexed load. Fall through to normal processing
2682 // if we can't.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002683 if (tryIndexedLoad(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002684 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002685 break;
2686 }
2687
2688 case ISD::SRL:
2689 case ISD::AND:
2690 case ISD::SRA:
Chad Rosier2d658702016-06-03 15:00:09 +00002691 case ISD::SIGN_EXTEND_INREG:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002692 if (tryBitfieldExtractOp(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002693 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002694 if (tryBitfieldInsertInZeroOp(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002695 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002696 break;
2697
Chad Rosierbe879ea2016-06-03 20:05:49 +00002698 case ISD::SIGN_EXTEND:
2699 if (tryBitfieldExtractOpFromSExt(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002700 return;
Chad Rosierbe879ea2016-06-03 20:05:49 +00002701 break;
2702
Tim Northover3b0846e2014-05-24 12:50:23 +00002703 case ISD::OR:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002704 if (tryBitfieldInsertOp(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002705 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002706 break;
2707
2708 case ISD::EXTRACT_VECTOR_ELT: {
2709 // Extracting lane zero is a special case where we can just use a plain
2710 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2711 // the rest of the compiler, especially the register allocator and copyi
2712 // propagation, to reason about, so is preferred when it's possible to
2713 // use it.
2714 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2715 // Bail and use the default Select() for non-zero lanes.
2716 if (LaneNode->getZExtValue() != 0)
2717 break;
2718 // If the element type is not the same as the result type, likewise
2719 // bail and use the default Select(), as there's more to do than just
2720 // a cross-class COPY. This catches extracts of i8 and i16 elements
2721 // since they will need an explicit zext.
2722 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2723 break;
2724 unsigned SubReg;
2725 switch (Node->getOperand(0)
2726 .getValueType()
2727 .getVectorElementType()
2728 .getSizeInBits()) {
2729 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002730 llvm_unreachable("Unexpected vector element type!");
Tim Northover3b0846e2014-05-24 12:50:23 +00002731 case 64:
2732 SubReg = AArch64::dsub;
2733 break;
2734 case 32:
2735 SubReg = AArch64::ssub;
2736 break;
Oliver Stannard89d15422014-08-27 16:16:04 +00002737 case 16:
2738 SubReg = AArch64::hsub;
2739 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00002740 case 8:
2741 llvm_unreachable("unexpected zext-requiring extract element!");
2742 }
2743 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2744 Node->getOperand(0));
2745 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2746 DEBUG(Extract->dumpr(CurDAG));
2747 DEBUG(dbgs() << "\n");
Justin Bogner283e3bd2016-05-12 23:10:30 +00002748 ReplaceNode(Node, Extract.getNode());
Quentin Colombet35a47012017-04-01 01:26:17 +00002749 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002750 }
2751 case ISD::Constant: {
2752 // Materialize zero constants as copies from WZR/XZR. This allows
2753 // the coalescer to propagate these into other instructions.
2754 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2755 if (ConstNode->isNullValue()) {
Justin Bogner283e3bd2016-05-12 23:10:30 +00002756 if (VT == MVT::i32) {
2757 SDValue New = CurDAG->getCopyFromReg(
2758 CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32);
2759 ReplaceNode(Node, New.getNode());
Quentin Colombet35a47012017-04-01 01:26:17 +00002760 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002761 } else if (VT == MVT::i64) {
2762 SDValue New = CurDAG->getCopyFromReg(
2763 CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64);
2764 ReplaceNode(Node, New.getNode());
Quentin Colombet35a47012017-04-01 01:26:17 +00002765 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002766 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002767 }
2768 break;
2769 }
2770
2771 case ISD::FrameIndex: {
2772 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2773 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2774 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2775 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +00002776 SDValue TFI = CurDAG->getTargetFrameIndex(
2777 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002778 SDLoc DL(Node);
2779 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2780 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
Justin Bogner283e3bd2016-05-12 23:10:30 +00002781 CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
Quentin Colombet35a47012017-04-01 01:26:17 +00002782 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002783 }
2784 case ISD::INTRINSIC_W_CHAIN: {
2785 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2786 switch (IntNo) {
2787 default:
2788 break;
2789 case Intrinsic::aarch64_ldaxp:
2790 case Intrinsic::aarch64_ldxp: {
2791 unsigned Op =
2792 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2793 SDValue MemAddr = Node->getOperand(2);
2794 SDLoc DL(Node);
2795 SDValue Chain = Node->getOperand(0);
2796
2797 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2798 MVT::Other, MemAddr, Chain);
2799
2800 // Transfer memoperands.
2801 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2802 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2803 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
Justin Bogner283e3bd2016-05-12 23:10:30 +00002804 ReplaceNode(Node, Ld);
Quentin Colombet35a47012017-04-01 01:26:17 +00002805 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002806 }
2807 case Intrinsic::aarch64_stlxp:
2808 case Intrinsic::aarch64_stxp: {
2809 unsigned Op =
2810 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2811 SDLoc DL(Node);
2812 SDValue Chain = Node->getOperand(0);
2813 SDValue ValLo = Node->getOperand(2);
2814 SDValue ValHi = Node->getOperand(3);
2815 SDValue MemAddr = Node->getOperand(4);
2816
2817 // Place arguments in the right order.
Benjamin Kramerea68a942015-02-19 15:26:17 +00002818 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00002819
2820 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2821 // Transfer memoperands.
2822 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2823 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2824 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2825
Justin Bogner283e3bd2016-05-12 23:10:30 +00002826 ReplaceNode(Node, St);
Quentin Colombet35a47012017-04-01 01:26:17 +00002827 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002828 }
2829 case Intrinsic::aarch64_neon_ld1x2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002830 if (VT == MVT::v8i8) {
2831 SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002832 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002833 } else if (VT == MVT::v16i8) {
2834 SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002835 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002836 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2837 SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002838 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002839 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2840 SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002841 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002842 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2843 SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002844 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002845 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2846 SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002847 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002848 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2849 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002850 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002851 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2852 SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002853 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002854 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002855 break;
2856 case Intrinsic::aarch64_neon_ld1x3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002857 if (VT == MVT::v8i8) {
2858 SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002859 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002860 } else if (VT == MVT::v16i8) {
2861 SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002862 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002863 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2864 SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002865 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002866 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2867 SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002868 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002869 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2870 SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002871 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002872 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2873 SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002874 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002875 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2876 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002877 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002878 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2879 SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002880 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002881 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002882 break;
2883 case Intrinsic::aarch64_neon_ld1x4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002884 if (VT == MVT::v8i8) {
2885 SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002886 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002887 } else if (VT == MVT::v16i8) {
2888 SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002889 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002890 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2891 SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002892 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002893 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2894 SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002895 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002896 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2897 SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002898 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002899 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2900 SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002901 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002902 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2903 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002904 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002905 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2906 SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002907 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002908 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002909 break;
2910 case Intrinsic::aarch64_neon_ld2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002911 if (VT == MVT::v8i8) {
2912 SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002913 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002914 } else if (VT == MVT::v16i8) {
2915 SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002916 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002917 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2918 SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002919 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002920 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2921 SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002922 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002923 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2924 SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002925 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002926 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2927 SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002928 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002929 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2930 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002931 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002932 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2933 SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002934 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002935 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002936 break;
2937 case Intrinsic::aarch64_neon_ld3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002938 if (VT == MVT::v8i8) {
2939 SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002940 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002941 } else if (VT == MVT::v16i8) {
2942 SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002943 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002944 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2945 SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002946 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002947 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2948 SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002949 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002950 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2951 SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002952 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002953 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2954 SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002955 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002956 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2957 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002958 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002959 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2960 SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002961 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002962 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002963 break;
2964 case Intrinsic::aarch64_neon_ld4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002965 if (VT == MVT::v8i8) {
2966 SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002967 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002968 } else if (VT == MVT::v16i8) {
2969 SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002970 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002971 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2972 SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002973 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002974 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2975 SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002976 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002977 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2978 SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002979 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002980 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2981 SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002982 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002983 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2984 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002985 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002986 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2987 SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002988 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002989 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002990 break;
2991 case Intrinsic::aarch64_neon_ld2r:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002992 if (VT == MVT::v8i8) {
2993 SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002994 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002995 } else if (VT == MVT::v16i8) {
2996 SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002997 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002998 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2999 SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003000 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003001 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3002 SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003003 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003004 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3005 SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003006 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003007 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3008 SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003009 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003010 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3011 SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003012 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003013 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3014 SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003015 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003016 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003017 break;
3018 case Intrinsic::aarch64_neon_ld3r:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003019 if (VT == MVT::v8i8) {
3020 SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003021 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003022 } else if (VT == MVT::v16i8) {
3023 SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003024 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003025 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3026 SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003027 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003028 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3029 SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003030 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003031 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3032 SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003033 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003034 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3035 SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003036 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003037 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3038 SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003039 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003040 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3041 SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003042 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003043 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003044 break;
3045 case Intrinsic::aarch64_neon_ld4r:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003046 if (VT == MVT::v8i8) {
3047 SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003048 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003049 } else if (VT == MVT::v16i8) {
3050 SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003051 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003052 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3053 SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003054 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003055 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3056 SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003057 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003058 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3059 SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003060 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003061 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3062 SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003063 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003064 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3065 SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003066 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003067 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3068 SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003069 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003070 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003071 break;
3072 case Intrinsic::aarch64_neon_ld2lane:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003073 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3074 SelectLoadLane(Node, 2, AArch64::LD2i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003075 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003076 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3077 VT == MVT::v8f16) {
3078 SelectLoadLane(Node, 2, AArch64::LD2i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003079 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003080 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3081 VT == MVT::v2f32) {
3082 SelectLoadLane(Node, 2, AArch64::LD2i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003083 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003084 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3085 VT == MVT::v1f64) {
3086 SelectLoadLane(Node, 2, AArch64::LD2i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003087 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003088 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003089 break;
3090 case Intrinsic::aarch64_neon_ld3lane:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003091 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3092 SelectLoadLane(Node, 3, AArch64::LD3i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003093 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003094 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3095 VT == MVT::v8f16) {
3096 SelectLoadLane(Node, 3, AArch64::LD3i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003097 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003098 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3099 VT == MVT::v2f32) {
3100 SelectLoadLane(Node, 3, AArch64::LD3i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003101 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003102 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3103 VT == MVT::v1f64) {
3104 SelectLoadLane(Node, 3, AArch64::LD3i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003105 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003106 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003107 break;
3108 case Intrinsic::aarch64_neon_ld4lane:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003109 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3110 SelectLoadLane(Node, 4, AArch64::LD4i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003111 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003112 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3113 VT == MVT::v8f16) {
3114 SelectLoadLane(Node, 4, AArch64::LD4i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003115 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003116 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3117 VT == MVT::v2f32) {
3118 SelectLoadLane(Node, 4, AArch64::LD4i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003119 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003120 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3121 VT == MVT::v1f64) {
3122 SelectLoadLane(Node, 4, AArch64::LD4i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003123 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003124 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003125 break;
3126 }
3127 } break;
3128 case ISD::INTRINSIC_WO_CHAIN: {
3129 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
3130 switch (IntNo) {
3131 default:
3132 break;
3133 case Intrinsic::aarch64_neon_tbl2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003134 SelectTable(Node, 2,
3135 VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
3136 false);
Quentin Colombet35a47012017-04-01 01:26:17 +00003137 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003138 case Intrinsic::aarch64_neon_tbl3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003139 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
3140 : AArch64::TBLv16i8Three,
3141 false);
Quentin Colombet35a47012017-04-01 01:26:17 +00003142 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003143 case Intrinsic::aarch64_neon_tbl4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003144 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
3145 : AArch64::TBLv16i8Four,
3146 false);
Quentin Colombet35a47012017-04-01 01:26:17 +00003147 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003148 case Intrinsic::aarch64_neon_tbx2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003149 SelectTable(Node, 2,
3150 VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
3151 true);
Quentin Colombet35a47012017-04-01 01:26:17 +00003152 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003153 case Intrinsic::aarch64_neon_tbx3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003154 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
3155 : AArch64::TBXv16i8Three,
3156 true);
Quentin Colombet35a47012017-04-01 01:26:17 +00003157 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003158 case Intrinsic::aarch64_neon_tbx4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003159 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
3160 : AArch64::TBXv16i8Four,
3161 true);
Quentin Colombet35a47012017-04-01 01:26:17 +00003162 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003163 case Intrinsic::aarch64_neon_smull:
3164 case Intrinsic::aarch64_neon_umull:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003165 if (tryMULLV64LaneV128(IntNo, Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00003166 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003167 break;
3168 }
3169 break;
3170 }
3171 case ISD::INTRINSIC_VOID: {
3172 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
3173 if (Node->getNumOperands() >= 3)
3174 VT = Node->getOperand(2)->getValueType(0);
3175 switch (IntNo) {
3176 default:
3177 break;
3178 case Intrinsic::aarch64_neon_st1x2: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003179 if (VT == MVT::v8i8) {
3180 SelectStore(Node, 2, AArch64::ST1Twov8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003181 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003182 } else if (VT == MVT::v16i8) {
3183 SelectStore(Node, 2, AArch64::ST1Twov16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003184 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003185 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3186 SelectStore(Node, 2, AArch64::ST1Twov4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003187 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003188 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3189 SelectStore(Node, 2, AArch64::ST1Twov8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003190 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003191 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3192 SelectStore(Node, 2, AArch64::ST1Twov2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003193 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003194 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3195 SelectStore(Node, 2, AArch64::ST1Twov4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003196 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003197 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3198 SelectStore(Node, 2, AArch64::ST1Twov2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003199 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003200 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3201 SelectStore(Node, 2, AArch64::ST1Twov1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003202 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003203 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003204 break;
3205 }
3206 case Intrinsic::aarch64_neon_st1x3: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003207 if (VT == MVT::v8i8) {
3208 SelectStore(Node, 3, AArch64::ST1Threev8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003209 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003210 } else if (VT == MVT::v16i8) {
3211 SelectStore(Node, 3, AArch64::ST1Threev16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003212 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003213 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3214 SelectStore(Node, 3, AArch64::ST1Threev4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003215 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003216 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3217 SelectStore(Node, 3, AArch64::ST1Threev8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003218 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003219 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3220 SelectStore(Node, 3, AArch64::ST1Threev2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003221 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003222 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3223 SelectStore(Node, 3, AArch64::ST1Threev4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003224 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003225 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3226 SelectStore(Node, 3, AArch64::ST1Threev2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003227 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003228 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3229 SelectStore(Node, 3, AArch64::ST1Threev1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003230 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003231 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003232 break;
3233 }
3234 case Intrinsic::aarch64_neon_st1x4: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003235 if (VT == MVT::v8i8) {
3236 SelectStore(Node, 4, AArch64::ST1Fourv8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003237 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003238 } else if (VT == MVT::v16i8) {
3239 SelectStore(Node, 4, AArch64::ST1Fourv16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003240 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003241 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3242 SelectStore(Node, 4, AArch64::ST1Fourv4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003243 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003244 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3245 SelectStore(Node, 4, AArch64::ST1Fourv8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003246 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003247 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3248 SelectStore(Node, 4, AArch64::ST1Fourv2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003249 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003250 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3251 SelectStore(Node, 4, AArch64::ST1Fourv4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003252 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003253 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3254 SelectStore(Node, 4, AArch64::ST1Fourv2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003255 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003256 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3257 SelectStore(Node, 4, AArch64::ST1Fourv1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003258 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003259 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003260 break;
3261 }
3262 case Intrinsic::aarch64_neon_st2: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003263 if (VT == MVT::v8i8) {
3264 SelectStore(Node, 2, AArch64::ST2Twov8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003265 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003266 } else if (VT == MVT::v16i8) {
3267 SelectStore(Node, 2, AArch64::ST2Twov16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003268 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003269 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3270 SelectStore(Node, 2, AArch64::ST2Twov4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003271 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003272 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3273 SelectStore(Node, 2, AArch64::ST2Twov8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003274 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003275 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3276 SelectStore(Node, 2, AArch64::ST2Twov2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003277 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003278 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3279 SelectStore(Node, 2, AArch64::ST2Twov4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003280 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003281 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3282 SelectStore(Node, 2, AArch64::ST2Twov2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003283 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003284 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3285 SelectStore(Node, 2, AArch64::ST1Twov1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003286 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003287 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003288 break;
3289 }
3290 case Intrinsic::aarch64_neon_st3: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003291 if (VT == MVT::v8i8) {
3292 SelectStore(Node, 3, AArch64::ST3Threev8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003293 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003294 } else if (VT == MVT::v16i8) {
3295 SelectStore(Node, 3, AArch64::ST3Threev16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003296 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003297 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3298 SelectStore(Node, 3, AArch64::ST3Threev4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003299 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003300 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3301 SelectStore(Node, 3, AArch64::ST3Threev8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003302 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003303 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3304 SelectStore(Node, 3, AArch64::ST3Threev2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003305 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003306 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3307 SelectStore(Node, 3, AArch64::ST3Threev4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003308 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003309 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3310 SelectStore(Node, 3, AArch64::ST3Threev2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003311 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003312 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3313 SelectStore(Node, 3, AArch64::ST1Threev1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003314 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003315 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003316 break;
3317 }
3318 case Intrinsic::aarch64_neon_st4: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003319 if (VT == MVT::v8i8) {
3320 SelectStore(Node, 4, AArch64::ST4Fourv8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003321 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003322 } else if (VT == MVT::v16i8) {
3323 SelectStore(Node, 4, AArch64::ST4Fourv16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003324 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003325 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3326 SelectStore(Node, 4, AArch64::ST4Fourv4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003327 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003328 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3329 SelectStore(Node, 4, AArch64::ST4Fourv8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003330 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003331 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3332 SelectStore(Node, 4, AArch64::ST4Fourv2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003333 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003334 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3335 SelectStore(Node, 4, AArch64::ST4Fourv4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003336 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003337 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3338 SelectStore(Node, 4, AArch64::ST4Fourv2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003339 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003340 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3341 SelectStore(Node, 4, AArch64::ST1Fourv1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003342 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003343 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003344 break;
3345 }
3346 case Intrinsic::aarch64_neon_st2lane: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003347 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3348 SelectStoreLane(Node, 2, AArch64::ST2i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003349 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003350 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3351 VT == MVT::v8f16) {
3352 SelectStoreLane(Node, 2, AArch64::ST2i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003353 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003354 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3355 VT == MVT::v2f32) {
3356 SelectStoreLane(Node, 2, AArch64::ST2i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003357 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003358 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3359 VT == MVT::v1f64) {
3360 SelectStoreLane(Node, 2, AArch64::ST2i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003361 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003362 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003363 break;
3364 }
3365 case Intrinsic::aarch64_neon_st3lane: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003366 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3367 SelectStoreLane(Node, 3, AArch64::ST3i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003368 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003369 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3370 VT == MVT::v8f16) {
3371 SelectStoreLane(Node, 3, AArch64::ST3i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003372 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003373 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3374 VT == MVT::v2f32) {
3375 SelectStoreLane(Node, 3, AArch64::ST3i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003376 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003377 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3378 VT == MVT::v1f64) {
3379 SelectStoreLane(Node, 3, AArch64::ST3i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003380 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003381 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003382 break;
3383 }
3384 case Intrinsic::aarch64_neon_st4lane: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003385 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3386 SelectStoreLane(Node, 4, AArch64::ST4i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003387 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003388 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3389 VT == MVT::v8f16) {
3390 SelectStoreLane(Node, 4, AArch64::ST4i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003391 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003392 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3393 VT == MVT::v2f32) {
3394 SelectStoreLane(Node, 4, AArch64::ST4i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003395 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003396 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3397 VT == MVT::v1f64) {
3398 SelectStoreLane(Node, 4, AArch64::ST4i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003399 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003400 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003401 break;
3402 }
3403 }
Mehdi Aminia7583982015-08-23 00:42:57 +00003404 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00003405 }
3406 case AArch64ISD::LD2post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003407 if (VT == MVT::v8i8) {
3408 SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003409 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003410 } else if (VT == MVT::v16i8) {
3411 SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003412 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003413 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3414 SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003415 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003416 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3417 SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003418 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003419 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3420 SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003421 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003422 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3423 SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003424 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003425 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3426 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003427 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003428 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3429 SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003430 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003431 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003432 break;
3433 }
3434 case AArch64ISD::LD3post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003435 if (VT == MVT::v8i8) {
3436 SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003437 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003438 } else if (VT == MVT::v16i8) {
3439 SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003440 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003441 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3442 SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003443 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003444 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3445 SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003446 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003447 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3448 SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003449 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003450 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3451 SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003452 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003453 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3454 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003455 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003456 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3457 SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003458 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003459 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003460 break;
3461 }
3462 case AArch64ISD::LD4post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003463 if (VT == MVT::v8i8) {
3464 SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003465 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003466 } else if (VT == MVT::v16i8) {
3467 SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003468 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003469 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3470 SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003471 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003472 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3473 SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003474 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003475 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3476 SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003477 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003478 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3479 SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003480 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003481 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3482 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003483 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003484 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3485 SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003486 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003487 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003488 break;
3489 }
3490 case AArch64ISD::LD1x2post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003491 if (VT == MVT::v8i8) {
3492 SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003493 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003494 } else if (VT == MVT::v16i8) {
3495 SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003496 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003497 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3498 SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003499 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003500 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3501 SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003502 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003503 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3504 SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003505 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003506 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3507 SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003508 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003509 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3510 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003511 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003512 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3513 SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003514 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003515 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003516 break;
3517 }
3518 case AArch64ISD::LD1x3post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003519 if (VT == MVT::v8i8) {
3520 SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003521 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003522 } else if (VT == MVT::v16i8) {
3523 SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003524 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003525 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3526 SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003527 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003528 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3529 SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003530 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003531 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3532 SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003533 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003534 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3535 SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003536 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003537 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3538 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003539 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003540 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3541 SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003542 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003543 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003544 break;
3545 }
3546 case AArch64ISD::LD1x4post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003547 if (VT == MVT::v8i8) {
3548 SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003549 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003550 } else if (VT == MVT::v16i8) {
3551 SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003552 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003553 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3554 SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003555 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003556 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3557 SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003558 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003559 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3560 SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003561 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003562 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3563 SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003564 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003565 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3566 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003567 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003568 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3569 SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003570 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003571 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003572 break;
3573 }
3574 case AArch64ISD::LD1DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003575 if (VT == MVT::v8i8) {
3576 SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003577 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003578 } else if (VT == MVT::v16i8) {
3579 SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003580 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003581 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3582 SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003583 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003584 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3585 SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003586 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003587 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3588 SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003589 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003590 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3591 SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003592 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003593 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3594 SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003595 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003596 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3597 SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003598 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003599 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003600 break;
3601 }
3602 case AArch64ISD::LD2DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003603 if (VT == MVT::v8i8) {
3604 SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003605 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003606 } else if (VT == MVT::v16i8) {
3607 SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003608 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003609 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3610 SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003611 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003612 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3613 SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003614 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003615 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3616 SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003617 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003618 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3619 SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003620 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003621 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3622 SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003623 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003624 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3625 SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003626 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003627 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003628 break;
3629 }
3630 case AArch64ISD::LD3DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003631 if (VT == MVT::v8i8) {
3632 SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003633 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003634 } else if (VT == MVT::v16i8) {
3635 SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003636 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003637 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3638 SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003639 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003640 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3641 SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003642 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003643 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3644 SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003645 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003646 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3647 SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003648 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003649 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3650 SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003651 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003652 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3653 SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003654 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003655 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003656 break;
3657 }
3658 case AArch64ISD::LD4DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003659 if (VT == MVT::v8i8) {
3660 SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003661 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003662 } else if (VT == MVT::v16i8) {
3663 SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003664 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003665 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3666 SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003667 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003668 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3669 SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003670 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003671 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3672 SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003673 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003674 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3675 SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003676 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003677 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3678 SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003679 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003680 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3681 SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003682 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003683 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003684 break;
3685 }
3686 case AArch64ISD::LD1LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003687 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3688 SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003689 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003690 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3691 VT == MVT::v8f16) {
3692 SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003693 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003694 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3695 VT == MVT::v2f32) {
3696 SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003697 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003698 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3699 VT == MVT::v1f64) {
3700 SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003701 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003702 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003703 break;
3704 }
3705 case AArch64ISD::LD2LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003706 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3707 SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003708 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003709 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3710 VT == MVT::v8f16) {
3711 SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003712 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003713 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3714 VT == MVT::v2f32) {
3715 SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003716 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003717 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3718 VT == MVT::v1f64) {
3719 SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003720 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003721 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003722 break;
3723 }
3724 case AArch64ISD::LD3LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003725 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3726 SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003727 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003728 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3729 VT == MVT::v8f16) {
3730 SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003731 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003732 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3733 VT == MVT::v2f32) {
3734 SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003735 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003736 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3737 VT == MVT::v1f64) {
3738 SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003739 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003740 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003741 break;
3742 }
3743 case AArch64ISD::LD4LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003744 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3745 SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003746 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003747 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3748 VT == MVT::v8f16) {
3749 SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003750 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003751 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3752 VT == MVT::v2f32) {
3753 SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003754 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003755 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3756 VT == MVT::v1f64) {
3757 SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003758 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003759 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003760 break;
3761 }
3762 case AArch64ISD::ST2post: {
3763 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003764 if (VT == MVT::v8i8) {
3765 SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003766 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003767 } else if (VT == MVT::v16i8) {
3768 SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003769 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003770 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3771 SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003772 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003773 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3774 SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003775 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003776 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3777 SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003778 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003779 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3780 SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003781 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003782 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3783 SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003784 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003785 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3786 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003787 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003788 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003789 break;
3790 }
3791 case AArch64ISD::ST3post: {
3792 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003793 if (VT == MVT::v8i8) {
3794 SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003795 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003796 } else if (VT == MVT::v16i8) {
3797 SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003798 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003799 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3800 SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003801 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003802 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3803 SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003804 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003805 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3806 SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003807 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003808 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3809 SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003810 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003811 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3812 SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003813 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003814 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3815 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003816 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003817 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003818 break;
3819 }
3820 case AArch64ISD::ST4post: {
3821 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003822 if (VT == MVT::v8i8) {
3823 SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003824 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003825 } else if (VT == MVT::v16i8) {
3826 SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003827 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003828 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3829 SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003830 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003831 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3832 SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003833 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003834 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3835 SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003836 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003837 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3838 SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003839 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003840 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3841 SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003842 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003843 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3844 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003845 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003846 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003847 break;
3848 }
3849 case AArch64ISD::ST1x2post: {
3850 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003851 if (VT == MVT::v8i8) {
3852 SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003853 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003854 } else if (VT == MVT::v16i8) {
3855 SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003856 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003857 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3858 SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003859 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003860 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3861 SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003862 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003863 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3864 SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003865 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003866 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3867 SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003868 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003869 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3870 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003871 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003872 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3873 SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003874 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003875 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003876 break;
3877 }
3878 case AArch64ISD::ST1x3post: {
3879 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003880 if (VT == MVT::v8i8) {
3881 SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003882 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003883 } else if (VT == MVT::v16i8) {
3884 SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003885 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003886 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3887 SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003888 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003889 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3890 SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003891 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003892 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3893 SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003894 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003895 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3896 SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003897 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003898 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3899 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003900 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003901 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3902 SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003903 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003904 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003905 break;
3906 }
3907 case AArch64ISD::ST1x4post: {
3908 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003909 if (VT == MVT::v8i8) {
3910 SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003911 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003912 } else if (VT == MVT::v16i8) {
3913 SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003914 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003915 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3916 SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003917 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003918 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3919 SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003920 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003921 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3922 SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003923 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003924 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3925 SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003926 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003927 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3928 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003929 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003930 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3931 SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003932 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003933 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003934 break;
3935 }
3936 case AArch64ISD::ST2LANEpost: {
3937 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003938 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3939 SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003940 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003941 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3942 VT == MVT::v8f16) {
3943 SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003944 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003945 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3946 VT == MVT::v2f32) {
3947 SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003948 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003949 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3950 VT == MVT::v1f64) {
3951 SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003952 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003953 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003954 break;
3955 }
3956 case AArch64ISD::ST3LANEpost: {
3957 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003958 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3959 SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003960 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003961 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3962 VT == MVT::v8f16) {
3963 SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003964 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003965 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3966 VT == MVT::v2f32) {
3967 SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003968 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003969 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3970 VT == MVT::v1f64) {
3971 SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003972 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003973 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003974 break;
3975 }
3976 case AArch64ISD::ST4LANEpost: {
3977 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003978 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3979 SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003980 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003981 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3982 VT == MVT::v8f16) {
3983 SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003984 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003985 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3986 VT == MVT::v2f32) {
3987 SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003988 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003989 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3990 VT == MVT::v1f64) {
3991 SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003992 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003993 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003994 break;
3995 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003996 }
3997
3998 // Select the default instruction
Justin Bogner283e3bd2016-05-12 23:10:30 +00003999 SelectCode(Node);
Tim Northover3b0846e2014-05-24 12:50:23 +00004000}
4001
4002/// createAArch64ISelDag - This pass converts a legalized DAG into a
4003/// AArch64-specific DAG, ready for instruction scheduling.
4004FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
4005 CodeGenOpt::Level OptLevel) {
4006 return new AArch64DAGToDAGISel(TM, OptLevel);
4007}