blob: b18fb30eb2d4868ae2c4f315a30d94096ffcaacd [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the AArch64 target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64TargetMachine.h"
15#include "MCTargetDesc/AArch64AddressingModes.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/CodeGen/SelectionDAGISel.h"
18#include "llvm/IR/Function.h" // To access function attributes.
19#include "llvm/IR/GlobalValue.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000023#include "llvm/Support/KnownBits.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000024#include "llvm/Support/MathExtras.h"
25#include "llvm/Support/raw_ostream.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "aarch64-isel"
30
31//===--------------------------------------------------------------------===//
32/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
33/// instructions for SelectionDAG operations.
34///
35namespace {
36
37class AArch64DAGToDAGISel : public SelectionDAGISel {
Tim Northover3b0846e2014-05-24 12:50:23 +000038
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
42
43 bool ForCodeSize;
44
45public:
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
Stephen Canon8216d882015-09-22 11:43:17 +000048 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr),
Tim Northover3b0846e2014-05-24 12:50:23 +000049 ForCodeSize(false) {}
50
Mehdi Amini117296c2016-10-01 02:56:57 +000051 StringRef getPassName() const override {
Tim Northover3b0846e2014-05-24 12:50:23 +000052 return "AArch64 Instruction Selection";
53 }
54
55 bool runOnMachineFunction(MachineFunction &MF) override {
Sanjay Patel924879a2015-08-04 15:49:57 +000056 ForCodeSize = MF.getFunction()->optForSize();
Eric Christopher1e513342015-01-30 23:46:40 +000057 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
Tim Northover3b0846e2014-05-24 12:50:23 +000058 return SelectionDAGISel::runOnMachineFunction(MF);
59 }
60
Justin Bogner283e3bd2016-05-12 23:10:30 +000061 void Select(SDNode *Node) override;
Tim Northover3b0846e2014-05-24 12:50:23 +000062
63 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
64 /// inline asm expressions.
65 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
Daniel Sanders60f1db02015-03-13 12:45:09 +000066 unsigned ConstraintID,
Tim Northover3b0846e2014-05-24 12:50:23 +000067 std::vector<SDValue> &OutOps) override;
68
Justin Bogner283e3bd2016-05-12 23:10:30 +000069 bool tryMLAV64LaneV128(SDNode *N);
70 bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +000071 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
76 }
77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
79 }
Ahmed Bougachab8886b52015-09-10 01:42:28 +000080 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
82 }
83 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
85 }
86 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
88 }
89 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
91 }
92 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
94 }
Tim Northover3b0846e2014-05-24 12:50:23 +000095 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeIndexed(N, 1, Base, OffImm);
97 }
98 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeIndexed(N, 2, Base, OffImm);
100 }
101 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeIndexed(N, 4, Base, OffImm);
103 }
104 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeIndexed(N, 8, Base, OffImm);
106 }
107 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeIndexed(N, 16, Base, OffImm);
109 }
110 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
111 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
112 }
113 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
114 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
115 }
116 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
117 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
118 }
119 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
120 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
121 }
122 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
123 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
124 }
125
126 template<int Width>
127 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
128 SDValue &SignExtend, SDValue &DoShift) {
129 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
130 }
131
132 template<int Width>
133 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
134 SDValue &SignExtend, SDValue &DoShift) {
135 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
136 }
137
138
139 /// Form sequences of consecutive 64/128-bit registers for use in NEON
140 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
141 /// between 1 and 4 elements. If it contains a single element that is returned
142 /// unchanged; otherwise a REG_SEQUENCE value is returned.
143 SDValue createDTuple(ArrayRef<SDValue> Vecs);
144 SDValue createQTuple(ArrayRef<SDValue> Vecs);
145
146 /// Generic helper for the createDTuple/createQTuple
147 /// functions. Those should almost always be called instead.
Benjamin Kramerea68a942015-02-19 15:26:17 +0000148 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
149 const unsigned SubRegs[]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000150
Justin Bogner283e3bd2016-05-12 23:10:30 +0000151 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000152
Justin Bogner283e3bd2016-05-12 23:10:30 +0000153 bool tryIndexedLoad(SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000154
Justin Bogner283e3bd2016-05-12 23:10:30 +0000155 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
Tim Northover3b0846e2014-05-24 12:50:23 +0000156 unsigned SubRegIdx);
Justin Bogner283e3bd2016-05-12 23:10:30 +0000157 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
Tim Northover3b0846e2014-05-24 12:50:23 +0000158 unsigned SubRegIdx);
Justin Bogner283e3bd2016-05-12 23:10:30 +0000159 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
160 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000161
Justin Bogner283e3bd2016-05-12 23:10:30 +0000162 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
163 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
164 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
165 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000166
Justin Bogner283e3bd2016-05-12 23:10:30 +0000167 bool tryBitfieldExtractOp(SDNode *N);
Chad Rosierbe879ea2016-06-03 20:05:49 +0000168 bool tryBitfieldExtractOpFromSExt(SDNode *N);
Justin Bogner283e3bd2016-05-12 23:10:30 +0000169 bool tryBitfieldInsertOp(SDNode *N);
170 bool tryBitfieldInsertInZeroOp(SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000171
Justin Bogner283e3bd2016-05-12 23:10:30 +0000172 bool tryReadRegister(SDNode *N);
173 bool tryWriteRegister(SDNode *N);
Luke Cheeseman85fd06d2015-06-01 12:02:47 +0000174
Tim Northover3b0846e2014-05-24 12:50:23 +0000175// Include the pieces autogenerated from the target description.
176#include "AArch64GenDAGISel.inc"
177
178private:
179 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
180 SDValue &Shift);
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000181 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
182 SDValue &OffImm);
Tim Northover3b0846e2014-05-24 12:50:23 +0000183 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
184 SDValue &OffImm);
185 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
186 SDValue &OffImm);
187 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
188 SDValue &Offset, SDValue &SignExtend,
189 SDValue &DoShift);
190 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
191 SDValue &Offset, SDValue &SignExtend,
192 SDValue &DoShift);
193 bool isWorthFolding(SDValue V) const;
194 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
195 SDValue &Offset, SDValue &SignExtend);
196
197 template<unsigned RegWidth>
198 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
199 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
200 }
201
202 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
Tim Northovercdf15292016-04-14 17:03:29 +0000203
204 void SelectCMP_SWAP(SDNode *N);
205
Tim Northover3b0846e2014-05-24 12:50:23 +0000206};
207} // end anonymous namespace
208
209/// isIntImmediate - This method tests to see if the node is a constant
210/// operand. If so Imm will receive the 32-bit value.
211static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
212 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
213 Imm = C->getZExtValue();
214 return true;
215 }
216 return false;
217}
218
219// isIntImmediate - This method tests to see if a constant operand.
220// If so Imm will receive the value.
221static bool isIntImmediate(SDValue N, uint64_t &Imm) {
222 return isIntImmediate(N.getNode(), Imm);
223}
224
225// isOpcWithIntImmediate - This method tests to see if the node is a specific
226// opcode and that it has a immediate integer right operand.
227// If so Imm will receive the 32 bit value.
228static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
229 uint64_t &Imm) {
230 return N->getOpcode() == Opc &&
231 isIntImmediate(N->getOperand(1).getNode(), Imm);
232}
233
234bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
Daniel Sanders60f1db02015-03-13 12:45:09 +0000235 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000236 switch(ConstraintID) {
237 default:
238 llvm_unreachable("Unexpected asm memory constraint");
239 case InlineAsm::Constraint_i:
240 case InlineAsm::Constraint_m:
241 case InlineAsm::Constraint_Q:
242 // Require the address to be in a register. That is safe for all AArch64
243 // variants and it is hard to do anything much smarter without knowing
244 // how the operand is used.
245 OutOps.push_back(Op);
246 return false;
247 }
248 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000249}
250
251/// SelectArithImmed - Select an immediate value that can be represented as
252/// a 12-bit value shifted left by either 0 or 12. If so, return true with
253/// Val set to the 12-bit value and Shift set to the shifter operand.
254bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
255 SDValue &Shift) {
256 // This function is called from the addsub_shifted_imm ComplexPattern,
257 // which lists [imm] as the list of opcode it's interested in, however
258 // we still need to check whether the operand is actually an immediate
259 // here because the ComplexPattern opcode list is only used in
260 // root-level opcode matching.
261 if (!isa<ConstantSDNode>(N.getNode()))
262 return false;
263
264 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
265 unsigned ShiftAmt;
266
267 if (Immed >> 12 == 0) {
268 ShiftAmt = 0;
269 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
270 ShiftAmt = 12;
271 Immed = Immed >> 12;
272 } else
273 return false;
274
275 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000276 SDLoc dl(N);
277 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
278 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000279 return true;
280}
281
282/// SelectNegArithImmed - As above, but negates the value before trying to
283/// select it.
284bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
285 SDValue &Shift) {
286 // This function is called from the addsub_shifted_imm ComplexPattern,
287 // which lists [imm] as the list of opcode it's interested in, however
288 // we still need to check whether the operand is actually an immediate
289 // here because the ComplexPattern opcode list is only used in
290 // root-level opcode matching.
291 if (!isa<ConstantSDNode>(N.getNode()))
292 return false;
293
294 // The immediate operand must be a 24-bit zero-extended immediate.
295 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
296
297 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
298 // have the opposite effect on the C flag, so this pattern mustn't match under
299 // those circumstances.
300 if (Immed == 0)
301 return false;
302
303 if (N.getValueType() == MVT::i32)
304 Immed = ~((uint32_t)Immed) + 1;
305 else
306 Immed = ~Immed + 1ULL;
307 if (Immed & 0xFFFFFFFFFF000000ULL)
308 return false;
309
310 Immed &= 0xFFFFFFULL;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000311 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
312 Shift);
Tim Northover3b0846e2014-05-24 12:50:23 +0000313}
314
315/// getShiftTypeForNode - Translate a shift node to the corresponding
316/// ShiftType value.
317static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
318 switch (N.getOpcode()) {
319 default:
320 return AArch64_AM::InvalidShiftExtend;
321 case ISD::SHL:
322 return AArch64_AM::LSL;
323 case ISD::SRL:
324 return AArch64_AM::LSR;
325 case ISD::SRA:
326 return AArch64_AM::ASR;
327 case ISD::ROTR:
328 return AArch64_AM::ROR;
329 }
330}
331
Balaram Makam2aba753e2017-03-31 18:16:53 +0000332/// \brief Determine whether it is worth it to fold SHL into the addressing
333/// mode.
334static bool isWorthFoldingSHL(SDValue V) {
335 assert(V.getOpcode() == ISD::SHL && "invalid opcode");
336 // It is worth folding logical shift of up to three places.
337 auto *CSD = dyn_cast<ConstantSDNode>(V.getOperand(1));
338 if (!CSD)
339 return false;
340 unsigned ShiftVal = CSD->getZExtValue();
341 if (ShiftVal > 3)
342 return false;
343
344 // Check if this particular node is reused in any non-memory related
345 // operation. If yes, do not try to fold this node into the address
346 // computation, since the computation will be kept.
347 const SDNode *Node = V.getNode();
348 for (SDNode *UI : Node->uses())
349 if (!isa<MemSDNode>(*UI))
350 for (SDNode *UII : UI->uses())
351 if (!isa<MemSDNode>(*UII))
352 return false;
353 return true;
354}
355
Eric Christopher25dbdeb2015-03-07 01:39:09 +0000356/// \brief Determine whether it is worth to fold V into an extended register.
Tim Northover3b0846e2014-05-24 12:50:23 +0000357bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
Balaram Makam2aba753e2017-03-31 18:16:53 +0000358 // Trivial if we are optimizing for code size or if there is only
359 // one use of the value.
360 if (ForCodeSize || V.hasOneUse())
361 return true;
362 // If a subtarget has a fastpath LSL we can fold a logical shift into
363 // the addressing mode and save a cycle.
364 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::SHL &&
365 isWorthFoldingSHL(V))
366 return true;
367 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::ADD) {
368 const SDValue LHS = V.getOperand(0);
369 const SDValue RHS = V.getOperand(1);
370 if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(LHS))
371 return true;
372 if (RHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(RHS))
373 return true;
374 }
375
376 // It hurts otherwise, since the value will be reused.
377 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000378}
379
380/// SelectShiftedRegister - Select a "shifted register" operand. If the value
381/// is not shifted, set the Shift operand to default of "LSL 0". The logical
382/// instructions allow the shifted register to be rotated, but the arithmetic
383/// instructions do not. The AllowROR parameter specifies whether ROR is
384/// supported.
385bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
386 SDValue &Reg, SDValue &Shift) {
387 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
388 if (ShType == AArch64_AM::InvalidShiftExtend)
389 return false;
390 if (!AllowROR && ShType == AArch64_AM::ROR)
391 return false;
392
393 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
Sanjay Patelb1f0a0f2016-09-14 16:05:51 +0000394 unsigned BitSize = N.getValueSizeInBits();
Tim Northover3b0846e2014-05-24 12:50:23 +0000395 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
396 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
397
398 Reg = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000399 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000400 return isWorthFolding(N);
401 }
402
403 return false;
404}
405
406/// getExtendTypeForNode - Translate an extend node to the corresponding
407/// ExtendType value.
408static AArch64_AM::ShiftExtendType
409getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
410 if (N.getOpcode() == ISD::SIGN_EXTEND ||
411 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
412 EVT SrcVT;
413 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
414 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
415 else
416 SrcVT = N.getOperand(0).getValueType();
417
418 if (!IsLoadStore && SrcVT == MVT::i8)
419 return AArch64_AM::SXTB;
420 else if (!IsLoadStore && SrcVT == MVT::i16)
421 return AArch64_AM::SXTH;
422 else if (SrcVT == MVT::i32)
423 return AArch64_AM::SXTW;
424 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
425
426 return AArch64_AM::InvalidShiftExtend;
427 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
428 N.getOpcode() == ISD::ANY_EXTEND) {
429 EVT SrcVT = N.getOperand(0).getValueType();
430 if (!IsLoadStore && SrcVT == MVT::i8)
431 return AArch64_AM::UXTB;
432 else if (!IsLoadStore && SrcVT == MVT::i16)
433 return AArch64_AM::UXTH;
434 else if (SrcVT == MVT::i32)
435 return AArch64_AM::UXTW;
436 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
437
438 return AArch64_AM::InvalidShiftExtend;
439 } else if (N.getOpcode() == ISD::AND) {
440 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
441 if (!CSD)
442 return AArch64_AM::InvalidShiftExtend;
443 uint64_t AndMask = CSD->getZExtValue();
444
445 switch (AndMask) {
446 default:
447 return AArch64_AM::InvalidShiftExtend;
448 case 0xFF:
449 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
450 case 0xFFFF:
451 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
452 case 0xFFFFFFFF:
453 return AArch64_AM::UXTW;
454 }
455 }
456
457 return AArch64_AM::InvalidShiftExtend;
458}
459
460// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
461static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
462 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
463 DL->getOpcode() != AArch64ISD::DUPLANE32)
464 return false;
465
466 SDValue SV = DL->getOperand(0);
467 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
468 return false;
469
470 SDValue EV = SV.getOperand(1);
471 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
472 return false;
473
474 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
475 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
476 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
477 LaneOp = EV.getOperand(0);
478
479 return true;
480}
481
Chad Rosier6c1f0932015-09-17 13:10:27 +0000482// Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
Tim Northover3b0846e2014-05-24 12:50:23 +0000483// high lane extract.
484static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
485 SDValue &LaneOp, int &LaneIdx) {
486
487 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
488 std::swap(Op0, Op1);
489 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
490 return false;
491 }
492 StdOp = Op1;
493 return true;
494}
495
496/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
497/// is a lane in the upper half of a 128-bit vector. Recognize and select this
498/// so that we don't emit unnecessary lane extracts.
Justin Bogner283e3bd2016-05-12 23:10:30 +0000499bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000500 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000501 SDValue Op0 = N->getOperand(0);
502 SDValue Op1 = N->getOperand(1);
503 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
504 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
505 int LaneIdx = -1; // Will hold the lane index.
506
507 if (Op1.getOpcode() != ISD::MUL ||
508 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
509 LaneIdx)) {
510 std::swap(Op0, Op1);
511 if (Op1.getOpcode() != ISD::MUL ||
512 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
513 LaneIdx))
Justin Bogner283e3bd2016-05-12 23:10:30 +0000514 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000515 }
516
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000517 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000518
519 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
520
521 unsigned MLAOpc = ~0U;
522
523 switch (N->getSimpleValueType(0).SimpleTy) {
524 default:
525 llvm_unreachable("Unrecognized MLA.");
526 case MVT::v4i16:
527 MLAOpc = AArch64::MLAv4i16_indexed;
528 break;
529 case MVT::v8i16:
530 MLAOpc = AArch64::MLAv8i16_indexed;
531 break;
532 case MVT::v2i32:
533 MLAOpc = AArch64::MLAv2i32_indexed;
534 break;
535 case MVT::v4i32:
536 MLAOpc = AArch64::MLAv4i32_indexed;
537 break;
538 }
539
Justin Bogner283e3bd2016-05-12 23:10:30 +0000540 ReplaceNode(N, CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops));
541 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000542}
543
Justin Bogner283e3bd2016-05-12 23:10:30 +0000544bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo, SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000545 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000546 SDValue SMULLOp0;
547 SDValue SMULLOp1;
548 int LaneIdx;
549
550 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
551 LaneIdx))
Justin Bogner283e3bd2016-05-12 23:10:30 +0000552 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000553
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000554 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000555
556 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
557
558 unsigned SMULLOpc = ~0U;
559
560 if (IntNo == Intrinsic::aarch64_neon_smull) {
561 switch (N->getSimpleValueType(0).SimpleTy) {
562 default:
563 llvm_unreachable("Unrecognized SMULL.");
564 case MVT::v4i32:
565 SMULLOpc = AArch64::SMULLv4i16_indexed;
566 break;
567 case MVT::v2i64:
568 SMULLOpc = AArch64::SMULLv2i32_indexed;
569 break;
570 }
571 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
572 switch (N->getSimpleValueType(0).SimpleTy) {
573 default:
574 llvm_unreachable("Unrecognized SMULL.");
575 case MVT::v4i32:
576 SMULLOpc = AArch64::UMULLv4i16_indexed;
577 break;
578 case MVT::v2i64:
579 SMULLOpc = AArch64::UMULLv2i32_indexed;
580 break;
581 }
582 } else
583 llvm_unreachable("Unrecognized intrinsic.");
584
Justin Bogner283e3bd2016-05-12 23:10:30 +0000585 ReplaceNode(N, CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops));
586 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000587}
588
589/// Instructions that accept extend modifiers like UXTW expect the register
590/// being extended to be a GPR32, but the incoming DAG might be acting on a
591/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
592/// this is the case.
593static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
594 if (N.getValueType() == MVT::i32)
595 return N;
596
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000597 SDLoc dl(N);
598 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000599 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000600 dl, MVT::i32, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000601 return SDValue(Node, 0);
602}
603
604
605/// SelectArithExtendedRegister - Select a "extended register" operand. This
606/// operand folds in an extend followed by an optional left shift.
607bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
608 SDValue &Shift) {
609 unsigned ShiftVal = 0;
610 AArch64_AM::ShiftExtendType Ext;
611
612 if (N.getOpcode() == ISD::SHL) {
613 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
614 if (!CSD)
615 return false;
616 ShiftVal = CSD->getZExtValue();
617 if (ShiftVal > 4)
618 return false;
619
620 Ext = getExtendTypeForNode(N.getOperand(0));
621 if (Ext == AArch64_AM::InvalidShiftExtend)
622 return false;
623
624 Reg = N.getOperand(0).getOperand(0);
625 } else {
626 Ext = getExtendTypeForNode(N);
627 if (Ext == AArch64_AM::InvalidShiftExtend)
628 return false;
629
630 Reg = N.getOperand(0);
Geoff Berry256fcf92016-09-26 15:34:47 +0000631
632 // Don't match if free 32-bit -> 64-bit zext can be used instead.
633 if (Ext == AArch64_AM::UXTW &&
634 Reg->getValueType(0).getSizeInBits() == 32 && isDef32(*Reg.getNode()))
635 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000636 }
637
638 // AArch64 mandates that the RHS of the operation must use the smallest
Chad Rosier6c1f0932015-09-17 13:10:27 +0000639 // register class that could contain the size being extended from. Thus,
Tim Northover3b0846e2014-05-24 12:50:23 +0000640 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
641 // there might not be an actual 32-bit value in the program. We can
642 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
643 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
644 Reg = narrowIfNeeded(CurDAG, Reg);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000645 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
646 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000647 return isWorthFolding(N);
648}
649
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000650/// If there's a use of this ADDlow that's not itself a load/store then we'll
651/// need to create a real ADD instruction from it anyway and there's no point in
652/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
653/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
Chad Rosier6c1f0932015-09-17 13:10:27 +0000654/// leads to duplicated ADRP instructions.
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000655static bool isWorthFoldingADDlow(SDValue N) {
656 for (auto Use : N->uses()) {
657 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
658 Use->getOpcode() != ISD::ATOMIC_LOAD &&
659 Use->getOpcode() != ISD::ATOMIC_STORE)
660 return false;
661
662 // ldar and stlr have much more restrictive addressing modes (just a
663 // register).
JF Bastien800f87a2016-04-06 21:19:33 +0000664 if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getOrdering()))
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000665 return false;
666 }
667
668 return true;
669}
670
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000671/// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit
672/// immediate" address. The "Size" argument is the size in bytes of the memory
673/// reference, which determines the scale.
674bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size,
675 SDValue &Base,
676 SDValue &OffImm) {
677 SDLoc dl(N);
Ahmed Bougacha05541452015-09-10 01:54:43 +0000678 const DataLayout &DL = CurDAG->getDataLayout();
679 const TargetLowering *TLI = getTargetLowering();
680 if (N.getOpcode() == ISD::FrameIndex) {
681 int FI = cast<FrameIndexSDNode>(N)->getIndex();
682 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
683 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
684 return true;
685 }
686
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000687 // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed
688 // selected here doesn't support labels/immediates, only base+offset.
689
690 if (CurDAG->isBaseWithConstantOffset(N)) {
691 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
692 int64_t RHSC = RHS->getSExtValue();
693 unsigned Scale = Log2_32(Size);
Steven Wue3b1f2b2015-09-10 16:32:28 +0000694 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(0x40 << Scale) &&
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000695 RHSC < (0x40 << Scale)) {
696 Base = N.getOperand(0);
Ahmed Bougacha05541452015-09-10 01:54:43 +0000697 if (Base.getOpcode() == ISD::FrameIndex) {
698 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
699 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
700 }
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000701 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
702 return true;
703 }
704 }
705 }
706
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000707 // Base only. The address will be materialized into a register before
708 // the memory is accessed.
709 // add x0, Xbase, #offset
710 // stp x1, x2, [x0]
711 Base = N;
712 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
713 return true;
714}
715
Tim Northover3b0846e2014-05-24 12:50:23 +0000716/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
717/// immediate" address. The "Size" argument is the size in bytes of the memory
718/// reference, which determines the scale.
719bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
720 SDValue &Base, SDValue &OffImm) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000721 SDLoc dl(N);
Mehdi Amini44ede332015-07-09 02:09:04 +0000722 const DataLayout &DL = CurDAG->getDataLayout();
Tim Northover3b0846e2014-05-24 12:50:23 +0000723 const TargetLowering *TLI = getTargetLowering();
724 if (N.getOpcode() == ISD::FrameIndex) {
725 int FI = cast<FrameIndexSDNode>(N)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000726 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000727 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000728 return true;
729 }
730
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000731 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000732 GlobalAddressSDNode *GAN =
733 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
734 Base = N.getOperand(0);
735 OffImm = N.getOperand(1);
736 if (!GAN)
737 return true;
738
739 const GlobalValue *GV = GAN->getGlobal();
740 unsigned Alignment = GV->getAlignment();
Manuel Jacob5f6eaac2016-01-16 20:30:46 +0000741 Type *Ty = GV->getValueType();
Tim Northover4a8ac262014-12-02 23:53:43 +0000742 if (Alignment == 0 && Ty->isSized())
Mehdi Amini44ede332015-07-09 02:09:04 +0000743 Alignment = DL.getABITypeAlignment(Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000744
745 if (Alignment >= Size)
746 return true;
747 }
748
749 if (CurDAG->isBaseWithConstantOffset(N)) {
750 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
751 int64_t RHSC = (int64_t)RHS->getZExtValue();
752 unsigned Scale = Log2_32(Size);
753 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
754 Base = N.getOperand(0);
755 if (Base.getOpcode() == ISD::FrameIndex) {
756 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000757 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Tim Northover3b0846e2014-05-24 12:50:23 +0000758 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000759 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000760 return true;
761 }
762 }
763 }
764
765 // Before falling back to our general case, check if the unscaled
766 // instructions can handle this. If so, that's preferable.
767 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
768 return false;
769
770 // Base only. The address will be materialized into a register before
771 // the memory is accessed.
772 // add x0, Xbase, #offset
773 // ldr x0, [x0]
774 Base = N;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000775 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000776 return true;
777}
778
779/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
780/// immediate" address. This should only match when there is an offset that
781/// is not valid for a scaled immediate addressing mode. The "Size" argument
782/// is the size in bytes of the memory reference, which is needed here to know
783/// what is valid for a scaled immediate.
784bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
785 SDValue &Base,
786 SDValue &OffImm) {
787 if (!CurDAG->isBaseWithConstantOffset(N))
788 return false;
789 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
790 int64_t RHSC = RHS->getSExtValue();
791 // If the offset is valid as a scaled immediate, don't match here.
792 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
793 RHSC < (0x1000 << Log2_32(Size)))
794 return false;
795 if (RHSC >= -256 && RHSC < 256) {
796 Base = N.getOperand(0);
797 if (Base.getOpcode() == ISD::FrameIndex) {
798 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
799 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +0000800 Base = CurDAG->getTargetFrameIndex(
801 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Tim Northover3b0846e2014-05-24 12:50:23 +0000802 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000803 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000804 return true;
805 }
806 }
807 return false;
808}
809
810static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000811 SDLoc dl(N);
812 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000813 SDValue ImpDef = SDValue(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000814 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000815 MachineSDNode *Node = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000816 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000817 return SDValue(Node, 0);
818}
819
820/// \brief Check if the given SHL node (\p N), can be used to form an
821/// extended register for an addressing mode.
822bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
823 bool WantExtend, SDValue &Offset,
824 SDValue &SignExtend) {
825 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
826 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
827 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
828 return false;
829
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000830 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000831 if (WantExtend) {
832 AArch64_AM::ShiftExtendType Ext =
833 getExtendTypeForNode(N.getOperand(0), true);
834 if (Ext == AArch64_AM::InvalidShiftExtend)
835 return false;
836
837 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000838 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
839 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000840 } else {
841 Offset = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000842 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000843 }
844
845 unsigned LegalShiftVal = Log2_32(Size);
846 unsigned ShiftVal = CSD->getZExtValue();
847
848 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
849 return false;
850
Eric Christopher114fa1c2016-02-29 22:50:49 +0000851 return isWorthFolding(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000852}
853
854bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
855 SDValue &Base, SDValue &Offset,
856 SDValue &SignExtend,
857 SDValue &DoShift) {
858 if (N.getOpcode() != ISD::ADD)
859 return false;
860 SDValue LHS = N.getOperand(0);
861 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000862 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000863
864 // We don't want to match immediate adds here, because they are better lowered
865 // to the register-immediate addressing modes.
866 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
867 return false;
868
869 // Check if this particular node is reused in any non-memory related
870 // operation. If yes, do not try to fold this node into the address
871 // computation, since the computation will be kept.
872 const SDNode *Node = N.getNode();
873 for (SDNode *UI : Node->uses()) {
874 if (!isa<MemSDNode>(*UI))
875 return false;
876 }
877
878 // Remember if it is worth folding N when it produces extended register.
879 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
880
881 // Try to match a shifted extend on the RHS.
882 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
883 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
884 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000885 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000886 return true;
887 }
888
889 // Try to match a shifted extend on the LHS.
890 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
891 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
892 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000893 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000894 return true;
895 }
896
897 // There was no shift, whatever else we find.
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000898 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000899
900 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
901 // Try to match an unshifted extend on the LHS.
902 if (IsExtendedRegisterWorthFolding &&
903 (Ext = getExtendTypeForNode(LHS, true)) !=
904 AArch64_AM::InvalidShiftExtend) {
905 Base = RHS;
906 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000907 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
908 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000909 if (isWorthFolding(LHS))
910 return true;
911 }
912
913 // Try to match an unshifted extend on the RHS.
914 if (IsExtendedRegisterWorthFolding &&
915 (Ext = getExtendTypeForNode(RHS, true)) !=
916 AArch64_AM::InvalidShiftExtend) {
917 Base = LHS;
918 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000919 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
920 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000921 if (isWorthFolding(RHS))
922 return true;
923 }
924
925 return false;
926}
927
Hao Liu3cb826c2014-10-14 06:50:36 +0000928// Check if the given immediate is preferred by ADD. If an immediate can be
929// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
930// encoded by one MOVZ, return true.
931static bool isPreferredADD(int64_t ImmOff) {
932 // Constant in [0x0, 0xfff] can be encoded in ADD.
933 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
934 return true;
935 // Check if it can be encoded in an "ADD LSL #12".
936 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
937 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
938 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
939 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
940 return false;
941}
942
Tim Northover3b0846e2014-05-24 12:50:23 +0000943bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
944 SDValue &Base, SDValue &Offset,
945 SDValue &SignExtend,
946 SDValue &DoShift) {
947 if (N.getOpcode() != ISD::ADD)
948 return false;
949 SDValue LHS = N.getOperand(0);
950 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000951 SDLoc DL(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000952
Tim Northover3b0846e2014-05-24 12:50:23 +0000953 // Check if this particular node is reused in any non-memory related
954 // operation. If yes, do not try to fold this node into the address
955 // computation, since the computation will be kept.
956 const SDNode *Node = N.getNode();
957 for (SDNode *UI : Node->uses()) {
958 if (!isa<MemSDNode>(*UI))
959 return false;
960 }
961
Hao Liu3cb826c2014-10-14 06:50:36 +0000962 // Watch out if RHS is a wide immediate, it can not be selected into
963 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
964 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
965 // instructions like:
966 // MOV X0, WideImmediate
967 // ADD X1, BaseReg, X0
968 // LDR X2, [X1, 0]
969 // For such situation, using [BaseReg, XReg] addressing mode can save one
970 // ADD/SUB:
971 // MOV X0, WideImmediate
972 // LDR X2, [BaseReg, X0]
973 if (isa<ConstantSDNode>(RHS)) {
Benjamin Kramer619c4e52015-04-10 11:24:51 +0000974 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
Hao Liu3cb826c2014-10-14 06:50:36 +0000975 unsigned Scale = Log2_32(Size);
Chad Rosier6c1f0932015-09-17 13:10:27 +0000976 // Skip the immediate can be selected by load/store addressing mode.
Hao Liu3cb826c2014-10-14 06:50:36 +0000977 // Also skip the immediate can be encoded by a single ADD (SUB is also
978 // checked by using -ImmOff).
979 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
980 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
981 return false;
982
Hao Liu3cb826c2014-10-14 06:50:36 +0000983 SDValue Ops[] = { RHS };
984 SDNode *MOVI =
985 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
986 SDValue MOVIV = SDValue(MOVI, 0);
987 // This ADD of two X register will be selected into [Reg+Reg] mode.
988 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
989 }
990
Tim Northover3b0846e2014-05-24 12:50:23 +0000991 // Remember if it is worth folding N when it produces extended register.
992 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
993
994 // Try to match a shifted extend on the RHS.
995 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
996 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
997 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000998 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000999 return true;
1000 }
1001
1002 // Try to match a shifted extend on the LHS.
1003 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1004 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
1005 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001006 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001007 return true;
1008 }
1009
1010 // Match any non-shifted, non-extend, non-immediate add expression.
1011 Base = LHS;
1012 Offset = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001013 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
1014 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001015 // Reg1 + Reg2 is free: no check needed.
1016 return true;
1017}
1018
1019SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +00001020 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +00001021 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +00001022 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
1023 AArch64::dsub2, AArch64::dsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +00001024
1025 return createTuple(Regs, RegClassIDs, SubRegs);
1026}
1027
1028SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +00001029 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +00001030 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +00001031 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
1032 AArch64::qsub2, AArch64::qsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +00001033
1034 return createTuple(Regs, RegClassIDs, SubRegs);
1035}
1036
1037SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
Benjamin Kramerea68a942015-02-19 15:26:17 +00001038 const unsigned RegClassIDs[],
1039 const unsigned SubRegs[]) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001040 // There's no special register-class for a vector-list of 1 element: it's just
1041 // a vector.
1042 if (Regs.size() == 1)
1043 return Regs[0];
1044
1045 assert(Regs.size() >= 2 && Regs.size() <= 4);
1046
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001047 SDLoc DL(Regs[0]);
Tim Northover3b0846e2014-05-24 12:50:23 +00001048
1049 SmallVector<SDValue, 4> Ops;
1050
1051 // First operand of REG_SEQUENCE is the desired RegClass.
1052 Ops.push_back(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001053 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +00001054
1055 // Then we get pairs of source & subregister-position for the components.
1056 for (unsigned i = 0; i < Regs.size(); ++i) {
1057 Ops.push_back(Regs[i]);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001058 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +00001059 }
1060
1061 SDNode *N =
1062 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1063 return SDValue(N, 0);
1064}
1065
Justin Bogner283e3bd2016-05-12 23:10:30 +00001066void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1067 bool isExt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001068 SDLoc dl(N);
1069 EVT VT = N->getValueType(0);
1070
1071 unsigned ExtOff = isExt;
1072
1073 // Form a REG_SEQUENCE to force register allocation.
1074 unsigned Vec0Off = ExtOff + 1;
1075 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1076 N->op_begin() + Vec0Off + NumVecs);
1077 SDValue RegSeq = createQTuple(Regs);
1078
1079 SmallVector<SDValue, 6> Ops;
1080 if (isExt)
1081 Ops.push_back(N->getOperand(1));
1082 Ops.push_back(RegSeq);
1083 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
Justin Bogner283e3bd2016-05-12 23:10:30 +00001084 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
Tim Northover3b0846e2014-05-24 12:50:23 +00001085}
1086
Justin Bogner283e3bd2016-05-12 23:10:30 +00001087bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001088 LoadSDNode *LD = cast<LoadSDNode>(N);
1089 if (LD->isUnindexed())
Justin Bogner283e3bd2016-05-12 23:10:30 +00001090 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001091 EVT VT = LD->getMemoryVT();
1092 EVT DstVT = N->getValueType(0);
1093 ISD::MemIndexedMode AM = LD->getAddressingMode();
1094 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1095
1096 // We're not doing validity checking here. That was done when checking
1097 // if we should mark the load as indexed or not. We're just selecting
1098 // the right instruction.
1099 unsigned Opcode = 0;
1100
1101 ISD::LoadExtType ExtType = LD->getExtensionType();
1102 bool InsertTo64 = false;
1103 if (VT == MVT::i64)
1104 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1105 else if (VT == MVT::i32) {
1106 if (ExtType == ISD::NON_EXTLOAD)
1107 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1108 else if (ExtType == ISD::SEXTLOAD)
1109 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1110 else {
1111 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1112 InsertTo64 = true;
1113 // The result of the load is only i32. It's the subreg_to_reg that makes
1114 // it into an i64.
1115 DstVT = MVT::i32;
1116 }
1117 } else if (VT == MVT::i16) {
1118 if (ExtType == ISD::SEXTLOAD) {
1119 if (DstVT == MVT::i64)
1120 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1121 else
1122 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1123 } else {
1124 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1125 InsertTo64 = DstVT == MVT::i64;
1126 // The result of the load is only i32. It's the subreg_to_reg that makes
1127 // it into an i64.
1128 DstVT = MVT::i32;
1129 }
1130 } else if (VT == MVT::i8) {
1131 if (ExtType == ISD::SEXTLOAD) {
1132 if (DstVT == MVT::i64)
1133 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1134 else
1135 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1136 } else {
1137 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1138 InsertTo64 = DstVT == MVT::i64;
1139 // The result of the load is only i32. It's the subreg_to_reg that makes
1140 // it into an i64.
1141 DstVT = MVT::i32;
1142 }
Ahmed Bougachae0e12db2015-08-04 01:29:38 +00001143 } else if (VT == MVT::f16) {
1144 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +00001145 } else if (VT == MVT::f32) {
1146 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1147 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1148 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1149 } else if (VT.is128BitVector()) {
1150 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1151 } else
Justin Bogner283e3bd2016-05-12 23:10:30 +00001152 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001153 SDValue Chain = LD->getChain();
1154 SDValue Base = LD->getBasePtr();
1155 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1156 int OffsetVal = (int)OffsetOp->getZExtValue();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001157 SDLoc dl(N);
1158 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +00001159 SDValue Ops[] = { Base, Offset, Chain };
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001160 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
Tim Northover3b0846e2014-05-24 12:50:23 +00001161 MVT::Other, Ops);
1162 // Either way, we're replacing the node, so tell the caller that.
Tim Northover3b0846e2014-05-24 12:50:23 +00001163 SDValue LoadedVal = SDValue(Res, 1);
1164 if (InsertTo64) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001165 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001166 LoadedVal =
1167 SDValue(CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001168 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1169 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1170 SubReg),
Tim Northover3b0846e2014-05-24 12:50:23 +00001171 0);
1172 }
1173
1174 ReplaceUses(SDValue(N, 0), LoadedVal);
1175 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1176 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00001177 CurDAG->RemoveDeadNode(N);
Justin Bogner283e3bd2016-05-12 23:10:30 +00001178 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001179}
1180
Justin Bogner283e3bd2016-05-12 23:10:30 +00001181void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1182 unsigned SubRegIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001183 SDLoc dl(N);
1184 EVT VT = N->getValueType(0);
1185 SDValue Chain = N->getOperand(0);
1186
Benjamin Kramerea68a942015-02-19 15:26:17 +00001187 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1188 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001189
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001190 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001191
1192 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1193 SDValue SuperReg = SDValue(Ld, 0);
1194 for (unsigned i = 0; i < NumVecs; ++i)
1195 ReplaceUses(SDValue(N, i),
1196 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1197
1198 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
Sanjin Sijaric6f020d92016-11-07 22:39:02 +00001199
1200 // Transfer memoperands.
1201 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1202 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1203 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
1204
Justin Bogner3525da72016-05-12 20:54:27 +00001205 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001206}
1207
Justin Bogner283e3bd2016-05-12 23:10:30 +00001208void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1209 unsigned Opc, unsigned SubRegIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001210 SDLoc dl(N);
1211 EVT VT = N->getValueType(0);
1212 SDValue Chain = N->getOperand(0);
1213
Benjamin Kramerea68a942015-02-19 15:26:17 +00001214 SDValue Ops[] = {N->getOperand(1), // Mem operand
1215 N->getOperand(2), // Incremental
1216 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001217
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001218 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1219 MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001220
1221 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1222
1223 // Update uses of write back register
1224 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1225
1226 // Update uses of vector list
1227 SDValue SuperReg = SDValue(Ld, 1);
1228 if (NumVecs == 1)
1229 ReplaceUses(SDValue(N, 0), SuperReg);
1230 else
1231 for (unsigned i = 0; i < NumVecs; ++i)
1232 ReplaceUses(SDValue(N, i),
1233 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1234
1235 // Update the chain
1236 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00001237 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001238}
1239
Justin Bogner283e3bd2016-05-12 23:10:30 +00001240void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1241 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001242 SDLoc dl(N);
1243 EVT VT = N->getOperand(2)->getValueType(0);
1244
1245 // Form a REG_SEQUENCE to force register allocation.
1246 bool Is128Bit = VT.getSizeInBits() == 128;
1247 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1248 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1249
Benjamin Kramerea68a942015-02-19 15:26:17 +00001250 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001251 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1252
Sanjin Sijaric6f020d92016-11-07 22:39:02 +00001253 // Transfer memoperands.
1254 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1255 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1256 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1257
Justin Bogner283e3bd2016-05-12 23:10:30 +00001258 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001259}
1260
Justin Bogner283e3bd2016-05-12 23:10:30 +00001261void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1262 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001263 SDLoc dl(N);
1264 EVT VT = N->getOperand(2)->getValueType(0);
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001265 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1266 MVT::Other}; // Type for the Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001267
1268 // Form a REG_SEQUENCE to force register allocation.
1269 bool Is128Bit = VT.getSizeInBits() == 128;
1270 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1271 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1272
Benjamin Kramerea68a942015-02-19 15:26:17 +00001273 SDValue Ops[] = {RegSeq,
1274 N->getOperand(NumVecs + 1), // base register
1275 N->getOperand(NumVecs + 2), // Incremental
1276 N->getOperand(0)}; // Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001277 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1278
Justin Bogner283e3bd2016-05-12 23:10:30 +00001279 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001280}
1281
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001282namespace {
Tim Northover3b0846e2014-05-24 12:50:23 +00001283/// WidenVector - Given a value in the V64 register class, produce the
1284/// equivalent value in the V128 register class.
1285class WidenVector {
1286 SelectionDAG &DAG;
1287
1288public:
1289 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1290
1291 SDValue operator()(SDValue V64Reg) {
1292 EVT VT = V64Reg.getValueType();
1293 unsigned NarrowSize = VT.getVectorNumElements();
1294 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1295 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1296 SDLoc DL(V64Reg);
1297
1298 SDValue Undef =
1299 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1300 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1301 }
1302};
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001303} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +00001304
1305/// NarrowVector - Given a value in the V128 register class, produce the
1306/// equivalent value in the V64 register class.
1307static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1308 EVT VT = V128Reg.getValueType();
1309 unsigned WideSize = VT.getVectorNumElements();
1310 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1311 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1312
1313 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1314 V128Reg);
1315}
1316
Justin Bogner283e3bd2016-05-12 23:10:30 +00001317void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1318 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001319 SDLoc dl(N);
1320 EVT VT = N->getValueType(0);
1321 bool Narrow = VT.getSizeInBits() == 64;
1322
1323 // Form a REG_SEQUENCE to force register allocation.
1324 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1325
1326 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001327 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001328 WidenVector(*CurDAG));
1329
1330 SDValue RegSeq = createQTuple(Regs);
1331
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001332 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001333
1334 unsigned LaneNo =
1335 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1336
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001337 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001338 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001339 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1340 SDValue SuperReg = SDValue(Ld, 0);
1341
1342 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
Craig Topper26260942015-10-18 05:15:34 +00001343 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1344 AArch64::qsub2, AArch64::qsub3 };
Tim Northover3b0846e2014-05-24 12:50:23 +00001345 for (unsigned i = 0; i < NumVecs; ++i) {
1346 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1347 if (Narrow)
1348 NV = NarrowVector(NV, *CurDAG);
1349 ReplaceUses(SDValue(N, i), NV);
1350 }
1351
1352 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
Justin Bogner3525da72016-05-12 20:54:27 +00001353 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001354}
1355
Justin Bogner283e3bd2016-05-12 23:10:30 +00001356void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1357 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001358 SDLoc dl(N);
1359 EVT VT = N->getValueType(0);
1360 bool Narrow = VT.getSizeInBits() == 64;
1361
1362 // Form a REG_SEQUENCE to force register allocation.
1363 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1364
1365 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001366 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001367 WidenVector(*CurDAG));
1368
1369 SDValue RegSeq = createQTuple(Regs);
1370
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001371 const EVT ResTys[] = {MVT::i64, // Type of the write back register
Ahmed Bougachae14a4d42015-04-17 23:43:33 +00001372 RegSeq->getValueType(0), MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001373
1374 unsigned LaneNo =
1375 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1376
Benjamin Kramerea68a942015-02-19 15:26:17 +00001377 SDValue Ops[] = {RegSeq,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001378 CurDAG->getTargetConstant(LaneNo, dl,
1379 MVT::i64), // Lane Number
Benjamin Kramerea68a942015-02-19 15:26:17 +00001380 N->getOperand(NumVecs + 2), // Base register
1381 N->getOperand(NumVecs + 3), // Incremental
1382 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001383 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1384
1385 // Update uses of the write back register
1386 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1387
1388 // Update uses of the vector list
1389 SDValue SuperReg = SDValue(Ld, 1);
1390 if (NumVecs == 1) {
1391 ReplaceUses(SDValue(N, 0),
1392 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1393 } else {
1394 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
Craig Topper26260942015-10-18 05:15:34 +00001395 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1396 AArch64::qsub2, AArch64::qsub3 };
Tim Northover3b0846e2014-05-24 12:50:23 +00001397 for (unsigned i = 0; i < NumVecs; ++i) {
1398 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1399 SuperReg);
1400 if (Narrow)
1401 NV = NarrowVector(NV, *CurDAG);
1402 ReplaceUses(SDValue(N, i), NV);
1403 }
1404 }
1405
1406 // Update the Chain
1407 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00001408 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001409}
1410
Justin Bogner283e3bd2016-05-12 23:10:30 +00001411void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1412 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001413 SDLoc dl(N);
1414 EVT VT = N->getOperand(2)->getValueType(0);
1415 bool Narrow = VT.getSizeInBits() == 64;
1416
1417 // Form a REG_SEQUENCE to force register allocation.
1418 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1419
1420 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001421 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001422 WidenVector(*CurDAG));
1423
1424 SDValue RegSeq = createQTuple(Regs);
1425
1426 unsigned LaneNo =
1427 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1428
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001429 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001430 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001431 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1432
1433 // Transfer memoperands.
1434 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1435 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1436 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1437
Justin Bogner283e3bd2016-05-12 23:10:30 +00001438 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001439}
1440
Justin Bogner283e3bd2016-05-12 23:10:30 +00001441void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1442 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001443 SDLoc dl(N);
1444 EVT VT = N->getOperand(2)->getValueType(0);
1445 bool Narrow = VT.getSizeInBits() == 64;
1446
1447 // Form a REG_SEQUENCE to force register allocation.
1448 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1449
1450 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001451 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001452 WidenVector(*CurDAG));
1453
1454 SDValue RegSeq = createQTuple(Regs);
1455
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001456 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1457 MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001458
1459 unsigned LaneNo =
1460 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1461
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001462 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001463 N->getOperand(NumVecs + 2), // Base Register
1464 N->getOperand(NumVecs + 3), // Incremental
1465 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001466 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1467
1468 // Transfer memoperands.
1469 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1470 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1471 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1472
Justin Bogner283e3bd2016-05-12 23:10:30 +00001473 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001474}
1475
1476static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1477 unsigned &Opc, SDValue &Opd0,
1478 unsigned &LSB, unsigned &MSB,
1479 unsigned NumberOfIgnoredLowBits,
1480 bool BiggerPattern) {
1481 assert(N->getOpcode() == ISD::AND &&
1482 "N must be a AND operation to call this function");
1483
1484 EVT VT = N->getValueType(0);
1485
1486 // Here we can test the type of VT and return false when the type does not
1487 // match, but since it is done prior to that call in the current context
1488 // we turned that into an assert to avoid redundant code.
1489 assert((VT == MVT::i32 || VT == MVT::i64) &&
1490 "Type checking must have been done before calling this function");
1491
1492 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1493 // changed the AND node to a 32-bit mask operation. We'll have to
1494 // undo that as part of the transform here if we want to catch all
1495 // the opportunities.
1496 // Currently the NumberOfIgnoredLowBits argument helps to recover
1497 // form these situations when matching bigger pattern (bitfield insert).
1498
1499 // For unsigned extracts, check for a shift right and mask
Chad Rosier7e8dd512016-05-14 18:56:28 +00001500 uint64_t AndImm = 0;
1501 if (!isOpcWithIntImmediate(N, ISD::AND, AndImm))
Tim Northover3b0846e2014-05-24 12:50:23 +00001502 return false;
1503
1504 const SDNode *Op0 = N->getOperand(0).getNode();
1505
1506 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1507 // simplified. Try to undo that
Chad Rosier7e8dd512016-05-14 18:56:28 +00001508 AndImm |= (1 << NumberOfIgnoredLowBits) - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001509
1510 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
Chad Rosier7e8dd512016-05-14 18:56:28 +00001511 if (AndImm & (AndImm + 1))
Tim Northover3b0846e2014-05-24 12:50:23 +00001512 return false;
1513
1514 bool ClampMSB = false;
Chad Rosier7e8dd512016-05-14 18:56:28 +00001515 uint64_t SrlImm = 0;
Tim Northover3b0846e2014-05-24 12:50:23 +00001516 // Handle the SRL + ANY_EXTEND case.
1517 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
Chad Rosier7e8dd512016-05-14 18:56:28 +00001518 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001519 // Extend the incoming operand of the SRL to 64-bit.
1520 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1521 // Make sure to clamp the MSB so that we preserve the semantics of the
1522 // original operations.
1523 ClampMSB = true;
1524 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1525 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
Chad Rosier7e8dd512016-05-14 18:56:28 +00001526 SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001527 // If the shift result was truncated, we can still combine them.
1528 Opd0 = Op0->getOperand(0).getOperand(0);
1529
1530 // Use the type of SRL node.
1531 VT = Opd0->getValueType(0);
Chad Rosier7e8dd512016-05-14 18:56:28 +00001532 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001533 Opd0 = Op0->getOperand(0);
1534 } else if (BiggerPattern) {
1535 // Let's pretend a 0 shift right has been performed.
1536 // The resulting code will be at least as good as the original one
1537 // plus it may expose more opportunities for bitfield insert pattern.
1538 // FIXME: Currently we limit this to the bigger pattern, because
Chad Rosier6c1f0932015-09-17 13:10:27 +00001539 // some optimizations expect AND and not UBFM.
Tim Northover3b0846e2014-05-24 12:50:23 +00001540 Opd0 = N->getOperand(0);
1541 } else
1542 return false;
1543
Matthias Braun75260352015-02-24 18:52:04 +00001544 // Bail out on large immediates. This happens when no proper
1545 // combining/constant folding was performed.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001546 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
Matthias Braun02892ec2015-02-25 18:03:50 +00001547 DEBUG((dbgs() << N
1548 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001549 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001550 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001551
Chad Rosier7e8dd512016-05-14 18:56:28 +00001552 LSB = SrlImm;
1553 MSB = SrlImm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(AndImm)
1554 : countTrailingOnes<uint64_t>(AndImm)) -
Tim Northover3b0846e2014-05-24 12:50:23 +00001555 1;
1556 if (ClampMSB)
1557 // Since we're moving the extend before the right shift operation, we need
1558 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1559 // the zeros which would get shifted in with the original right shift
1560 // operation.
1561 MSB = MSB > 31 ? 31 : MSB;
1562
1563 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1564 return true;
1565}
1566
Chad Rosier2d658702016-06-03 15:00:09 +00001567static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
1568 SDValue &Opd0, unsigned &Immr,
1569 unsigned &Imms) {
1570 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
1571
1572 EVT VT = N->getValueType(0);
1573 unsigned BitWidth = VT.getSizeInBits();
1574 assert((VT == MVT::i32 || VT == MVT::i64) &&
1575 "Type checking must have been done before calling this function");
1576
1577 SDValue Op = N->getOperand(0);
1578 if (Op->getOpcode() == ISD::TRUNCATE) {
1579 Op = Op->getOperand(0);
1580 VT = Op->getValueType(0);
1581 BitWidth = VT.getSizeInBits();
1582 }
1583
1584 uint64_t ShiftImm;
1585 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) &&
1586 !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
1587 return false;
1588
1589 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
1590 if (ShiftImm + Width > BitWidth)
1591 return false;
1592
1593 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
1594 Opd0 = Op.getOperand(0);
1595 Immr = ShiftImm;
1596 Imms = ShiftImm + Width - 1;
1597 return true;
1598}
1599
David Xu052b9d92014-09-02 09:33:56 +00001600static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1601 SDValue &Opd0, unsigned &LSB,
1602 unsigned &MSB) {
1603 // We are looking for the following pattern which basically extracts several
1604 // continuous bits from the source value and places it from the LSB of the
1605 // destination value, all other bits of the destination value or set to zero:
Tim Northover3b0846e2014-05-24 12:50:23 +00001606 //
1607 // Value2 = AND Value, MaskImm
1608 // SRL Value2, ShiftImm
1609 //
David Xu052b9d92014-09-02 09:33:56 +00001610 // with MaskImm >> ShiftImm to search for the bit width.
Tim Northover3b0846e2014-05-24 12:50:23 +00001611 //
1612 // This gets selected into a single UBFM:
1613 //
Chad Rosier7e8dd512016-05-14 18:56:28 +00001614 // UBFM Value, ShiftImm, BitWide + SrlImm -1
Tim Northover3b0846e2014-05-24 12:50:23 +00001615 //
1616
1617 if (N->getOpcode() != ISD::SRL)
1618 return false;
1619
Chad Rosier7e8dd512016-05-14 18:56:28 +00001620 uint64_t AndMask = 0;
1621 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask))
Tim Northover3b0846e2014-05-24 12:50:23 +00001622 return false;
1623
1624 Opd0 = N->getOperand(0).getOperand(0);
1625
Chad Rosier7e8dd512016-05-14 18:56:28 +00001626 uint64_t SrlImm = 0;
1627 if (!isIntImmediate(N->getOperand(1), SrlImm))
Tim Northover3b0846e2014-05-24 12:50:23 +00001628 return false;
1629
David Xu052b9d92014-09-02 09:33:56 +00001630 // Check whether we really have several bits extract here.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001631 unsigned BitWide = 64 - countLeadingOnes(~(AndMask >> SrlImm));
1632 if (BitWide && isMask_64(AndMask >> SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001633 if (N->getValueType(0) == MVT::i32)
1634 Opc = AArch64::UBFMWri;
1635 else
1636 Opc = AArch64::UBFMXri;
1637
Chad Rosier7e8dd512016-05-14 18:56:28 +00001638 LSB = SrlImm;
1639 MSB = BitWide + SrlImm - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001640 return true;
1641 }
1642
1643 return false;
1644}
1645
1646static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001647 unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001648 bool BiggerPattern) {
1649 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1650 "N must be a SHR/SRA operation to call this function");
1651
1652 EVT VT = N->getValueType(0);
1653
1654 // Here we can test the type of VT and return false when the type does not
1655 // match, but since it is done prior to that call in the current context
1656 // we turned that into an assert to avoid redundant code.
1657 assert((VT == MVT::i32 || VT == MVT::i64) &&
1658 "Type checking must have been done before calling this function");
1659
David Xu052b9d92014-09-02 09:33:56 +00001660 // Check for AND + SRL doing several bits extract.
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001661 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
Tim Northover3b0846e2014-05-24 12:50:23 +00001662 return true;
1663
Chad Rosierc73d5592016-05-16 12:55:01 +00001664 // We're looking for a shift of a shift.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001665 uint64_t ShlImm = 0;
1666 uint64_t TruncBits = 0;
1667 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001668 Opd0 = N->getOperand(0).getOperand(0);
1669 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1670 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1671 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1672 // be considered as setting high 32 bits as zero. Our strategy here is to
1673 // always generate 64bit UBFM. This consistency will help the CSE pass
1674 // later find more redundancy.
1675 Opd0 = N->getOperand(0).getOperand(0);
Chad Rosier7e8dd512016-05-14 18:56:28 +00001676 TruncBits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
Tim Northover3b0846e2014-05-24 12:50:23 +00001677 VT = Opd0->getValueType(0);
1678 assert(VT == MVT::i64 && "the promoted type should be i64");
1679 } else if (BiggerPattern) {
1680 // Let's pretend a 0 shift left has been performed.
1681 // FIXME: Currently we limit this to the bigger pattern case,
1682 // because some optimizations expect AND and not UBFM
1683 Opd0 = N->getOperand(0);
1684 } else
1685 return false;
1686
Matthias Braun75260352015-02-24 18:52:04 +00001687 // Missing combines/constant folding may have left us with strange
1688 // constants.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001689 if (ShlImm >= VT.getSizeInBits()) {
Matthias Braun02892ec2015-02-25 18:03:50 +00001690 DEBUG((dbgs() << N
1691 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001692 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001693 }
Matthias Braun75260352015-02-24 18:52:04 +00001694
Chad Rosier7e8dd512016-05-14 18:56:28 +00001695 uint64_t SrlImm = 0;
1696 if (!isIntImmediate(N->getOperand(1), SrlImm))
Tim Northover3b0846e2014-05-24 12:50:23 +00001697 return false;
1698
Chad Rosier7e8dd512016-05-14 18:56:28 +00001699 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001700 "bad amount in shift node!");
Chad Rosier7e8dd512016-05-14 18:56:28 +00001701 int immr = SrlImm - ShlImm;
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001702 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
Chad Rosier7e8dd512016-05-14 18:56:28 +00001703 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001704 // SRA requires a signed extraction
1705 if (VT == MVT::i32)
1706 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1707 else
1708 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1709 return true;
1710}
1711
Chad Rosierbe879ea2016-06-03 20:05:49 +00001712bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
1713 assert(N->getOpcode() == ISD::SIGN_EXTEND);
1714
1715 EVT VT = N->getValueType(0);
1716 EVT NarrowVT = N->getOperand(0)->getValueType(0);
1717 if (VT != MVT::i64 || NarrowVT != MVT::i32)
1718 return false;
1719
1720 uint64_t ShiftImm;
1721 SDValue Op = N->getOperand(0);
1722 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
1723 return false;
1724
1725 SDLoc dl(N);
1726 // Extend the incoming operand of the shift to 64-bits.
1727 SDValue Opd0 = Widen(CurDAG, Op.getOperand(0));
1728 unsigned Immr = ShiftImm;
1729 unsigned Imms = NarrowVT.getSizeInBits() - 1;
1730 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1731 CurDAG->getTargetConstant(Imms, dl, VT)};
1732 CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops);
1733 return true;
1734}
1735
Tim Northover3b0846e2014-05-24 12:50:23 +00001736static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001737 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001738 unsigned NumberOfIgnoredLowBits = 0,
1739 bool BiggerPattern = false) {
1740 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1741 return false;
1742
1743 switch (N->getOpcode()) {
1744 default:
1745 if (!N->isMachineOpcode())
1746 return false;
1747 break;
1748 case ISD::AND:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001749 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001750 NumberOfIgnoredLowBits, BiggerPattern);
1751 case ISD::SRL:
1752 case ISD::SRA:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001753 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
Chad Rosier2d658702016-06-03 15:00:09 +00001754
1755 case ISD::SIGN_EXTEND_INREG:
1756 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
Tim Northover3b0846e2014-05-24 12:50:23 +00001757 }
1758
1759 unsigned NOpc = N->getMachineOpcode();
1760 switch (NOpc) {
1761 default:
1762 return false;
1763 case AArch64::SBFMWri:
1764 case AArch64::UBFMWri:
1765 case AArch64::SBFMXri:
1766 case AArch64::UBFMXri:
1767 Opc = NOpc;
1768 Opd0 = N->getOperand(0);
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001769 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1770 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
Tim Northover3b0846e2014-05-24 12:50:23 +00001771 return true;
1772 }
1773 // Unreachable
1774 return false;
1775}
1776
Justin Bogner283e3bd2016-05-12 23:10:30 +00001777bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001778 unsigned Opc, Immr, Imms;
Tim Northover3b0846e2014-05-24 12:50:23 +00001779 SDValue Opd0;
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001780 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
Justin Bogner283e3bd2016-05-12 23:10:30 +00001781 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001782
1783 EVT VT = N->getValueType(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001784 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001785
1786 // If the bit extract operation is 64bit but the original type is 32bit, we
1787 // need to add one EXTRACT_SUBREG.
1788 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001789 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
1790 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001791
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001792 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1793 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Justin Bogner283e3bd2016-05-12 23:10:30 +00001794 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
1795 MVT::i32, SDValue(BFM, 0), SubReg));
1796 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001797 }
1798
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001799 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1800 CurDAG->getTargetConstant(Imms, dl, VT)};
Justin Bogner283e3bd2016-05-12 23:10:30 +00001801 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1802 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001803}
1804
1805/// Does DstMask form a complementary pair with the mask provided by
1806/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1807/// this asks whether DstMask zeroes precisely those bits that will be set by
1808/// the other half.
Benjamin Kramerc321e532016-06-08 19:09:22 +00001809static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
Tim Northover3b0846e2014-05-24 12:50:23 +00001810 unsigned NumberOfIgnoredHighBits, EVT VT) {
1811 assert((VT == MVT::i32 || VT == MVT::i64) &&
1812 "i32 or i64 mask type expected!");
1813 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1814
1815 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1816 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1817
1818 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1819 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1820}
1821
1822// Look for bits that will be useful for later uses.
1823// A bit is consider useless as soon as it is dropped and never used
1824// before it as been dropped.
1825// E.g., looking for useful bit of x
1826// 1. y = x & 0x7
1827// 2. z = y >> 2
1828// After #1, x useful bits are 0x7, then the useful bits of x, live through
1829// y.
1830// After #2, the useful bits of x are 0x4.
1831// However, if x is used on an unpredicatable instruction, then all its bits
1832// are useful.
1833// E.g.
1834// 1. y = x & 0x7
1835// 2. z = y >> 2
1836// 3. str x, [@x]
1837static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1838
1839static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1840 unsigned Depth) {
1841 uint64_t Imm =
1842 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1843 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1844 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1845 getUsefulBits(Op, UsefulBits, Depth + 1);
1846}
1847
1848static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1849 uint64_t Imm, uint64_t MSB,
1850 unsigned Depth) {
1851 // inherit the bitwidth value
1852 APInt OpUsefulBits(UsefulBits);
1853 OpUsefulBits = 1;
1854
1855 if (MSB >= Imm) {
Craig Topper24e71012017-04-28 03:36:24 +00001856 OpUsefulBits <<= MSB - Imm + 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001857 --OpUsefulBits;
1858 // The interesting part will be in the lower part of the result
1859 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1860 // The interesting part was starting at Imm in the argument
Craig Topper24e71012017-04-28 03:36:24 +00001861 OpUsefulBits <<= Imm;
Tim Northover3b0846e2014-05-24 12:50:23 +00001862 } else {
Craig Topper24e71012017-04-28 03:36:24 +00001863 OpUsefulBits <<= MSB + 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001864 --OpUsefulBits;
1865 // The interesting part will be shifted in the result
Craig Topper24e71012017-04-28 03:36:24 +00001866 OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
Tim Northover3b0846e2014-05-24 12:50:23 +00001867 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1868 // The interesting part was at zero in the argument
Craig Topperfc947bc2017-04-18 17:14:21 +00001869 OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
Tim Northover3b0846e2014-05-24 12:50:23 +00001870 }
1871
1872 UsefulBits &= OpUsefulBits;
1873}
1874
1875static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1876 unsigned Depth) {
1877 uint64_t Imm =
1878 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1879 uint64_t MSB =
1880 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1881
1882 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1883}
1884
1885static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1886 unsigned Depth) {
1887 uint64_t ShiftTypeAndValue =
1888 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1889 APInt Mask(UsefulBits);
1890 Mask.clearAllBits();
1891 Mask.flipAllBits();
1892
1893 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1894 // Shift Left
1895 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
Craig Topper24e71012017-04-28 03:36:24 +00001896 Mask <<= ShiftAmt;
Tim Northover3b0846e2014-05-24 12:50:23 +00001897 getUsefulBits(Op, Mask, Depth + 1);
Craig Topperfc947bc2017-04-18 17:14:21 +00001898 Mask.lshrInPlace(ShiftAmt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001899 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1900 // Shift Right
1901 // We do not handle AArch64_AM::ASR, because the sign will change the
1902 // number of useful bits
1903 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
Craig Topperfc947bc2017-04-18 17:14:21 +00001904 Mask.lshrInPlace(ShiftAmt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001905 getUsefulBits(Op, Mask, Depth + 1);
Craig Topper24e71012017-04-28 03:36:24 +00001906 Mask <<= ShiftAmt;
Tim Northover3b0846e2014-05-24 12:50:23 +00001907 } else
1908 return;
1909
1910 UsefulBits &= Mask;
1911}
1912
1913static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1914 unsigned Depth) {
1915 uint64_t Imm =
1916 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1917 uint64_t MSB =
1918 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1919
Tim Northover3b0846e2014-05-24 12:50:23 +00001920 APInt OpUsefulBits(UsefulBits);
1921 OpUsefulBits = 1;
1922
Silviu Barangaaab65b12016-11-30 17:04:22 +00001923 APInt ResultUsefulBits(UsefulBits.getBitWidth(), 0);
1924 ResultUsefulBits.flipAllBits();
1925 APInt Mask(UsefulBits.getBitWidth(), 0);
1926
1927 getUsefulBits(Op, ResultUsefulBits, Depth + 1);
1928
Tim Northover3b0846e2014-05-24 12:50:23 +00001929 if (MSB >= Imm) {
Silviu Barangaaab65b12016-11-30 17:04:22 +00001930 // The instruction is a BFXIL.
1931 uint64_t Width = MSB - Imm + 1;
1932 uint64_t LSB = Imm;
1933
Craig Topper24e71012017-04-28 03:36:24 +00001934 OpUsefulBits <<= Width;
Tim Northover3b0846e2014-05-24 12:50:23 +00001935 --OpUsefulBits;
Silviu Barangaaab65b12016-11-30 17:04:22 +00001936
1937 if (Op.getOperand(1) == Orig) {
1938 // Copy the low bits from the result to bits starting from LSB.
1939 Mask = ResultUsefulBits & OpUsefulBits;
Craig Topper24e71012017-04-28 03:36:24 +00001940 Mask <<= LSB;
Silviu Barangaaab65b12016-11-30 17:04:22 +00001941 }
1942
1943 if (Op.getOperand(0) == Orig)
1944 // Bits starting from LSB in the input contribute to the result.
1945 Mask |= (ResultUsefulBits & ~OpUsefulBits);
Tim Northover3b0846e2014-05-24 12:50:23 +00001946 } else {
Silviu Barangaaab65b12016-11-30 17:04:22 +00001947 // The instruction is a BFI.
1948 uint64_t Width = MSB + 1;
1949 uint64_t LSB = UsefulBits.getBitWidth() - Imm;
1950
Craig Topper24e71012017-04-28 03:36:24 +00001951 OpUsefulBits <<= Width;
Tim Northover3b0846e2014-05-24 12:50:23 +00001952 --OpUsefulBits;
Craig Topper24e71012017-04-28 03:36:24 +00001953 OpUsefulBits <<= LSB;
Silviu Barangaaab65b12016-11-30 17:04:22 +00001954
1955 if (Op.getOperand(1) == Orig) {
1956 // Copy the bits from the result to the zero bits.
1957 Mask = ResultUsefulBits & OpUsefulBits;
Craig Topperfc947bc2017-04-18 17:14:21 +00001958 Mask.lshrInPlace(LSB);
Silviu Barangaaab65b12016-11-30 17:04:22 +00001959 }
1960
1961 if (Op.getOperand(0) == Orig)
1962 Mask |= (ResultUsefulBits & ~OpUsefulBits);
Tim Northover3b0846e2014-05-24 12:50:23 +00001963 }
Silviu Barangaaab65b12016-11-30 17:04:22 +00001964
1965 UsefulBits &= Mask;
Tim Northover3b0846e2014-05-24 12:50:23 +00001966}
1967
1968static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1969 SDValue Orig, unsigned Depth) {
1970
1971 // Users of this node should have already been instruction selected
1972 // FIXME: Can we turn that into an assert?
1973 if (!UserNode->isMachineOpcode())
1974 return;
1975
1976 switch (UserNode->getMachineOpcode()) {
1977 default:
1978 return;
1979 case AArch64::ANDSWri:
1980 case AArch64::ANDSXri:
1981 case AArch64::ANDWri:
1982 case AArch64::ANDXri:
1983 // We increment Depth only when we call the getUsefulBits
1984 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1985 Depth);
1986 case AArch64::UBFMWri:
1987 case AArch64::UBFMXri:
1988 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1989
1990 case AArch64::ORRWrs:
1991 case AArch64::ORRXrs:
1992 if (UserNode->getOperand(1) != Orig)
1993 return;
1994 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1995 Depth);
1996 case AArch64::BFMWri:
1997 case AArch64::BFMXri:
1998 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
Chad Rosier23a1a9a2016-05-11 20:19:54 +00001999
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002000 case AArch64::STRBBui:
Chad Rosier9926a5e2016-05-12 01:42:01 +00002001 case AArch64::STURBBi:
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002002 if (UserNode->getOperand(0) != Orig)
2003 return;
2004 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
2005 return;
2006
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002007 case AArch64::STRHHui:
Chad Rosier9926a5e2016-05-12 01:42:01 +00002008 case AArch64::STURHHi:
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002009 if (UserNode->getOperand(0) != Orig)
2010 return;
2011 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
2012 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002013 }
2014}
2015
2016static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
2017 if (Depth >= 6)
2018 return;
2019 // Initialize UsefulBits
2020 if (!Depth) {
Sanjay Patel5f6bb6c2016-09-14 15:43:44 +00002021 unsigned Bitwidth = Op.getScalarValueSizeInBits();
Tim Northover3b0846e2014-05-24 12:50:23 +00002022 // At the beginning, assume every produced bits is useful
2023 UsefulBits = APInt(Bitwidth, 0);
2024 UsefulBits.flipAllBits();
2025 }
2026 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
2027
2028 for (SDNode *Node : Op.getNode()->uses()) {
2029 // A use cannot produce useful bits
2030 APInt UsefulBitsForUse = APInt(UsefulBits);
2031 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
2032 UsersUsefulBits |= UsefulBitsForUse;
2033 }
2034 // UsefulBits contains the produced bits that are meaningful for the
2035 // current definition, thus a user cannot make a bit meaningful at
2036 // this point
2037 UsefulBits &= UsersUsefulBits;
2038}
2039
2040/// Create a machine node performing a notional SHL of Op by ShlAmount. If
2041/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
2042/// 0, return Op unchanged.
2043static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
2044 if (ShlAmount == 0)
2045 return Op;
2046
2047 EVT VT = Op.getValueType();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002048 SDLoc dl(Op);
Tim Northover3b0846e2014-05-24 12:50:23 +00002049 unsigned BitWidth = VT.getSizeInBits();
2050 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2051
2052 SDNode *ShiftNode;
2053 if (ShlAmount > 0) {
2054 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
2055 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002056 UBFMOpc, dl, VT, Op,
2057 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
2058 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00002059 } else {
2060 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
2061 assert(ShlAmount < 0 && "expected right shift");
2062 int ShrAmount = -ShlAmount;
2063 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002064 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
2065 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00002066 }
2067
2068 return SDValue(ShiftNode, 0);
2069}
2070
2071/// Does this tree qualify as an attempt to move a bitfield into position,
2072/// essentially "(and (shl VAL, N), Mask)".
2073static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
Geoff Berry43ec15e2015-09-18 17:11:53 +00002074 bool BiggerPattern,
Tim Northover3b0846e2014-05-24 12:50:23 +00002075 SDValue &Src, int &ShiftAmount,
2076 int &MaskWidth) {
2077 EVT VT = Op.getValueType();
2078 unsigned BitWidth = VT.getSizeInBits();
2079 (void)BitWidth;
2080 assert(BitWidth == 32 || BitWidth == 64);
2081
Craig Topperd0af7e82017-04-28 05:31:46 +00002082 KnownBits Known;
2083 CurDAG->computeKnownBits(Op, Known);
Tim Northover3b0846e2014-05-24 12:50:23 +00002084
2085 // Non-zero in the sense that they're not provably zero, which is the key
2086 // point if we want to use this value
Craig Topperd0af7e82017-04-28 05:31:46 +00002087 uint64_t NonZeroBits = (~Known.Zero).getZExtValue();
Tim Northover3b0846e2014-05-24 12:50:23 +00002088
2089 // Discard a constant AND mask if present. It's safe because the node will
2090 // already have been factored into the computeKnownBits calculation above.
2091 uint64_t AndImm;
2092 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
Craig Topperd0af7e82017-04-28 05:31:46 +00002093 assert((~APInt(BitWidth, AndImm) & ~Known.Zero) == 0);
Tim Northover3b0846e2014-05-24 12:50:23 +00002094 Op = Op.getOperand(0);
2095 }
2096
Geoff Berry43ec15e2015-09-18 17:11:53 +00002097 // Don't match if the SHL has more than one use, since then we'll end up
2098 // generating SHL+UBFIZ instead of just keeping SHL+AND.
2099 if (!BiggerPattern && !Op.hasOneUse())
2100 return false;
2101
Tim Northover3b0846e2014-05-24 12:50:23 +00002102 uint64_t ShlImm;
2103 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
2104 return false;
2105 Op = Op.getOperand(0);
2106
2107 if (!isShiftedMask_64(NonZeroBits))
2108 return false;
2109
2110 ShiftAmount = countTrailingZeros(NonZeroBits);
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00002111 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
Tim Northover3b0846e2014-05-24 12:50:23 +00002112
2113 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
2114 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
Geoff Berry43ec15e2015-09-18 17:11:53 +00002115 // amount. BiggerPattern is true when this pattern is being matched for BFI,
2116 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
2117 // which case it is not profitable to insert an extra shift.
2118 if (ShlImm - ShiftAmount != 0 && !BiggerPattern)
2119 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002120 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
2121
2122 return true;
2123}
2124
Chad Rosier02f25a92016-05-19 14:19:47 +00002125static bool isShiftedMask(uint64_t Mask, EVT VT) {
2126 assert(VT == MVT::i32 || VT == MVT::i64);
2127 if (VT == MVT::i32)
2128 return isShiftedMask_32(Mask);
2129 return isShiftedMask_64(Mask);
2130}
2131
Chad Rosier816a67d2016-05-26 13:27:56 +00002132// Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
2133// inserted only sets known zero bits.
2134static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
2135 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2136
2137 EVT VT = N->getValueType(0);
2138 if (VT != MVT::i32 && VT != MVT::i64)
2139 return false;
2140
2141 unsigned BitWidth = VT.getSizeInBits();
2142
2143 uint64_t OrImm;
2144 if (!isOpcWithIntImmediate(N, ISD::OR, OrImm))
2145 return false;
2146
2147 // Skip this transformation if the ORR immediate can be encoded in the ORR.
2148 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
2149 // performance neutral.
2150 if (AArch64_AM::isLogicalImmediate(OrImm, BitWidth))
2151 return false;
2152
2153 uint64_t MaskImm;
2154 SDValue And = N->getOperand(0);
2155 // Must be a single use AND with an immediate operand.
2156 if (!And.hasOneUse() ||
2157 !isOpcWithIntImmediate(And.getNode(), ISD::AND, MaskImm))
2158 return false;
2159
2160 // Compute the Known Zero for the AND as this allows us to catch more general
2161 // cases than just looking for AND with imm.
Craig Topperd0af7e82017-04-28 05:31:46 +00002162 KnownBits Known;
2163 CurDAG->computeKnownBits(And, Known);
Chad Rosier816a67d2016-05-26 13:27:56 +00002164
2165 // Non-zero in the sense that they're not provably zero, which is the key
2166 // point if we want to use this value.
Craig Topperd0af7e82017-04-28 05:31:46 +00002167 uint64_t NotKnownZero = (~Known.Zero).getZExtValue();
Chad Rosier816a67d2016-05-26 13:27:56 +00002168
2169 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
Craig Topperd0af7e82017-04-28 05:31:46 +00002170 if (!isShiftedMask(Known.Zero.getZExtValue(), VT))
Chad Rosier816a67d2016-05-26 13:27:56 +00002171 return false;
2172
2173 // The bits being inserted must only set those bits that are known to be zero.
2174 if ((OrImm & NotKnownZero) != 0) {
2175 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
2176 // currently handle this case.
2177 return false;
2178 }
2179
2180 // BFI/BFXIL dst, src, #lsb, #width.
2181 int LSB = countTrailingOnes(NotKnownZero);
2182 int Width = BitWidth - APInt(BitWidth, NotKnownZero).countPopulation();
2183
2184 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
2185 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2186 unsigned ImmS = Width - 1;
2187
2188 // If we're creating a BFI instruction avoid cases where we need more
2189 // instructions to materialize the BFI constant as compared to the original
2190 // ORR. A BFXIL will use the same constant as the original ORR, so the code
2191 // should be no worse in this case.
2192 bool IsBFI = LSB != 0;
2193 uint64_t BFIImm = OrImm >> LSB;
2194 if (IsBFI && !AArch64_AM::isLogicalImmediate(BFIImm, BitWidth)) {
2195 // We have a BFI instruction and we know the constant can't be materialized
2196 // with a ORR-immediate with the zero register.
2197 unsigned OrChunks = 0, BFIChunks = 0;
2198 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
2199 if (((OrImm >> Shift) & 0xFFFF) != 0)
2200 ++OrChunks;
2201 if (((BFIImm >> Shift) & 0xFFFF) != 0)
2202 ++BFIChunks;
2203 }
2204 if (BFIChunks > OrChunks)
2205 return false;
2206 }
2207
2208 // Materialize the constant to be inserted.
2209 SDLoc DL(N);
2210 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
2211 SDNode *MOVI = CurDAG->getMachineNode(
2212 MOVIOpc, DL, VT, CurDAG->getTargetConstant(BFIImm, DL, VT));
2213
2214 // Create the BFI/BFXIL instruction.
2215 SDValue Ops[] = {And.getOperand(0), SDValue(MOVI, 0),
2216 CurDAG->getTargetConstant(ImmR, DL, VT),
2217 CurDAG->getTargetConstant(ImmS, DL, VT)};
2218 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2219 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2220 return true;
2221}
2222
Justin Bogner283e3bd2016-05-12 23:10:30 +00002223static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
2224 SelectionDAG *CurDAG) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002225 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2226
Tim Northover3b0846e2014-05-24 12:50:23 +00002227 EVT VT = N->getValueType(0);
Chad Rosier042ac2c2016-05-12 19:38:18 +00002228 if (VT != MVT::i32 && VT != MVT::i64)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002229 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002230
Chad Rosier91294c52016-05-18 17:43:11 +00002231 unsigned BitWidth = VT.getSizeInBits();
2232
Tim Northover3b0846e2014-05-24 12:50:23 +00002233 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
2234 // have the expected shape. Try to undo that.
Tim Northover3b0846e2014-05-24 12:50:23 +00002235
2236 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
2237 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
2238
Chad Rosiere0062022016-05-18 23:51:17 +00002239 // Given a OR operation, check if we have the following pattern
2240 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
2241 // isBitfieldExtractOp)
2242 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
2243 // countTrailingZeros(mask2) == imm2 - imm + 1
2244 // f = d | c
2245 // if yes, replace the OR instruction with:
2246 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
2247
Geoff Berry43ec15e2015-09-18 17:11:53 +00002248 // OR is commutative, check all combinations of operand order and values of
2249 // BiggerPattern, i.e.
2250 // Opd0, Opd1, BiggerPattern=false
2251 // Opd1, Opd0, BiggerPattern=false
2252 // Opd0, Opd1, BiggerPattern=true
2253 // Opd1, Opd0, BiggerPattern=true
2254 // Several of these combinations may match, so check with BiggerPattern=false
2255 // first since that will produce better results by matching more instructions
2256 // and/or inserting fewer extra instructions.
2257 for (int I = 0; I < 4; ++I) {
2258
Chad Rosier91294c52016-05-18 17:43:11 +00002259 SDValue Dst, Src;
2260 unsigned ImmR, ImmS;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002261 bool BiggerPattern = I / 2;
Tim Northover01dff9d2016-07-05 18:02:57 +00002262 SDValue OrOpd0Val = N->getOperand(I % 2);
2263 SDNode *OrOpd0 = OrOpd0Val.getNode();
Geoff Berry43ec15e2015-09-18 17:11:53 +00002264 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
2265 SDNode *OrOpd1 = OrOpd1Val.getNode();
2266
Tim Northover3b0846e2014-05-24 12:50:23 +00002267 unsigned BFXOpc;
2268 int DstLSB, Width;
2269 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
Geoff Berry43ec15e2015-09-18 17:11:53 +00002270 NumberOfIgnoredLowBits, BiggerPattern)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002271 // Check that the returned opcode is compatible with the pattern,
2272 // i.e., same type and zero extended (U and not S)
2273 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
2274 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
2275 continue;
2276
2277 // Compute the width of the bitfield insertion
2278 DstLSB = 0;
2279 Width = ImmS - ImmR + 1;
2280 // FIXME: This constraint is to catch bitfield insertion we may
2281 // want to widen the pattern if we want to grab general bitfied
2282 // move case
2283 if (Width <= 0)
2284 continue;
2285
2286 // If the mask on the insertee is correct, we have a BFXIL operation. We
2287 // can share the ImmR and ImmS values from the already-computed UBFM.
Tim Northover01dff9d2016-07-05 18:02:57 +00002288 } else if (isBitfieldPositioningOp(CurDAG, OrOpd0Val,
Geoff Berry43ec15e2015-09-18 17:11:53 +00002289 BiggerPattern,
2290 Src, DstLSB, Width)) {
Chad Rosier91294c52016-05-18 17:43:11 +00002291 ImmR = (BitWidth - DstLSB) % BitWidth;
Tim Northover3b0846e2014-05-24 12:50:23 +00002292 ImmS = Width - 1;
2293 } else
2294 continue;
2295
2296 // Check the second part of the pattern
2297 EVT VT = OrOpd1->getValueType(0);
2298 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2299
2300 // Compute the Known Zero for the candidate of the first operand.
2301 // This allows to catch more general case than just looking for
2302 // AND with imm. Indeed, simplify-demanded-bits may have removed
2303 // the AND instruction because it proves it was useless.
Craig Topperd0af7e82017-04-28 05:31:46 +00002304 KnownBits Known;
2305 CurDAG->computeKnownBits(OrOpd1Val, Known);
Tim Northover3b0846e2014-05-24 12:50:23 +00002306
2307 // Check if there is enough room for the second operand to appear
2308 // in the first one
2309 APInt BitsToBeInserted =
Craig Topperd0af7e82017-04-28 05:31:46 +00002310 APInt::getBitsSet(Known.getBitWidth(), DstLSB, DstLSB + Width);
Tim Northover3b0846e2014-05-24 12:50:23 +00002311
Craig Topperd0af7e82017-04-28 05:31:46 +00002312 if ((BitsToBeInserted & ~Known.Zero) != 0)
Tim Northover3b0846e2014-05-24 12:50:23 +00002313 continue;
2314
2315 // Set the first operand
2316 uint64_t Imm;
2317 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2318 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2319 // In that case, we can eliminate the AND
2320 Dst = OrOpd1->getOperand(0);
2321 else
2322 // Maybe the AND has been removed by simplify-demanded-bits
2323 // or is useful because it discards more bits
2324 Dst = OrOpd1Val;
2325
2326 // both parts match
Chad Rosier042ac2c2016-05-12 19:38:18 +00002327 SDLoc DL(N);
2328 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT),
2329 CurDAG->getTargetConstant(ImmS, DL, VT)};
2330 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002331 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2332 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00002333 }
Chad Rosier02f25a92016-05-19 14:19:47 +00002334
2335 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
2336 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
2337 // mask (e.g., 0x000ffff0).
2338 uint64_t Mask0Imm, Mask1Imm;
2339 SDValue And0 = N->getOperand(0);
2340 SDValue And1 = N->getOperand(1);
2341 if (And0.hasOneUse() && And1.hasOneUse() &&
2342 isOpcWithIntImmediate(And0.getNode(), ISD::AND, Mask0Imm) &&
2343 isOpcWithIntImmediate(And1.getNode(), ISD::AND, Mask1Imm) &&
2344 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
2345 (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) {
2346
Chad Rosier02f25a92016-05-19 14:19:47 +00002347 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
2348 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
2349 // bits to be inserted.
2350 if (isShiftedMask(Mask0Imm, VT)) {
2351 std::swap(And0, And1);
2352 std::swap(Mask0Imm, Mask1Imm);
2353 }
2354
2355 SDValue Src = And1->getOperand(0);
2356 SDValue Dst = And0->getOperand(0);
2357 unsigned LSB = countTrailingZeros(Mask1Imm);
2358 int Width = BitWidth - APInt(BitWidth, Mask0Imm).countPopulation();
2359
2360 // The BFXIL inserts the low-order bits from a source register, so right
2361 // shift the needed bits into place.
2362 SDLoc DL(N);
2363 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
2364 SDNode *LSR = CurDAG->getMachineNode(
2365 ShiftOpc, DL, VT, Src, CurDAG->getTargetConstant(LSB, DL, VT),
2366 CurDAG->getTargetConstant(BitWidth - 1, DL, VT));
2367
2368 // BFXIL is an alias of BFM, so translate to BFM operands.
2369 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2370 unsigned ImmS = Width - 1;
2371
2372 // Create the BFXIL instruction.
2373 SDValue Ops[] = {Dst, SDValue(LSR, 0),
2374 CurDAG->getTargetConstant(ImmR, DL, VT),
2375 CurDAG->getTargetConstant(ImmS, DL, VT)};
2376 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2377 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2378 return true;
2379 }
2380
Justin Bogner283e3bd2016-05-12 23:10:30 +00002381 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002382}
2383
Justin Bogner283e3bd2016-05-12 23:10:30 +00002384bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002385 if (N->getOpcode() != ISD::OR)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002386 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002387
Weiming Zhao56ab5182015-12-01 19:17:49 +00002388 APInt NUsefulBits;
2389 getUsefulBits(SDValue(N, 0), NUsefulBits);
Tim Northover3b0846e2014-05-24 12:50:23 +00002390
Weiming Zhao56ab5182015-12-01 19:17:49 +00002391 // If all bits are not useful, just return UNDEF.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002392 if (!NUsefulBits) {
2393 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2394 return true;
2395 }
Weiming Zhao56ab5182015-12-01 19:17:49 +00002396
Chad Rosier816a67d2016-05-26 13:27:56 +00002397 if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG))
2398 return true;
2399
2400 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
Tim Northover3b0846e2014-05-24 12:50:23 +00002401}
2402
Geoff Berry43ec15e2015-09-18 17:11:53 +00002403/// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2404/// equivalent of a left shift by a constant amount followed by an and masking
2405/// out a contiguous set of bits.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002406bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
Geoff Berry43ec15e2015-09-18 17:11:53 +00002407 if (N->getOpcode() != ISD::AND)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002408 return false;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002409
2410 EVT VT = N->getValueType(0);
Chad Rosier08d99082016-05-13 22:53:13 +00002411 if (VT != MVT::i32 && VT != MVT::i64)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002412 return false;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002413
2414 SDValue Op0;
2415 int DstLSB, Width;
2416 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
2417 Op0, DstLSB, Width))
Justin Bogner283e3bd2016-05-12 23:10:30 +00002418 return false;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002419
2420 // ImmR is the rotate right amount.
2421 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2422 // ImmS is the most significant bit of the source to be moved.
2423 unsigned ImmS = Width - 1;
2424
2425 SDLoc DL(N);
2426 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
2427 CurDAG->getTargetConstant(ImmS, DL, VT)};
Chad Rosier08d99082016-05-13 22:53:13 +00002428 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002429 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2430 return true;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002431}
2432
Tim Northover3b0846e2014-05-24 12:50:23 +00002433bool
2434AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2435 unsigned RegWidth) {
2436 APFloat FVal(0.0);
2437 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2438 FVal = CN->getValueAPF();
2439 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2440 // Some otherwise illegal constants are allowed in this case.
2441 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2442 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2443 return false;
2444
2445 ConstantPoolSDNode *CN =
2446 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2447 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2448 } else
2449 return false;
2450
2451 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2452 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2453 // x-register.
2454 //
2455 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2456 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2457 // integers.
2458 bool IsExact;
2459
2460 // fbits is between 1 and 64 in the worst-case, which means the fmul
2461 // could have 2^64 as an actual operand. Need 65 bits of precision.
2462 APSInt IntVal(65, true);
2463 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2464
2465 // N.b. isPowerOf2 also checks for > 0.
2466 if (!IsExact || !IntVal.isPowerOf2()) return false;
2467 unsigned FBits = IntVal.logBase2();
2468
2469 // Checks above should have guaranteed that we haven't lost information in
2470 // finding FBits, but it must still be in range.
2471 if (FBits == 0 || FBits > RegWidth) return false;
2472
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002473 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00002474 return true;
2475}
2476
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002477// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2478// of the string and obtains the integer values from them and combines these
2479// into a single value to be used in the MRS/MSR instruction.
2480static int getIntOperandFromRegisterString(StringRef RegString) {
2481 SmallVector<StringRef, 5> Fields;
Chandler Carruthe4405e92015-09-10 06:12:31 +00002482 RegString.split(Fields, ':');
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002483
2484 if (Fields.size() == 1)
2485 return -1;
2486
2487 assert(Fields.size() == 5
2488 && "Invalid number of fields in read register string");
2489
2490 SmallVector<int, 5> Ops;
2491 bool AllIntFields = true;
2492
2493 for (StringRef Field : Fields) {
2494 unsigned IntField;
2495 AllIntFields &= !Field.getAsInteger(10, IntField);
2496 Ops.push_back(IntField);
2497 }
2498
2499 assert(AllIntFields &&
2500 "Unexpected non-integer value in special register string.");
2501
2502 // Need to combine the integer fields of the string into a single value
2503 // based on the bit encoding of MRS/MSR instruction.
2504 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2505 (Ops[3] << 3) | (Ops[4]);
2506}
2507
2508// Lower the read_register intrinsic to an MRS instruction node if the special
2509// register string argument is either of the form detailed in the ALCE (the
2510// form described in getIntOperandsFromRegsterString) or is a named register
2511// known by the MRS SysReg mapper.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002512bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002513 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2514 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2515 SDLoc DL(N);
2516
2517 int Reg = getIntOperandFromRegisterString(RegString->getString());
Justin Bogner283e3bd2016-05-12 23:10:30 +00002518 if (Reg != -1) {
2519 ReplaceNode(N, CurDAG->getMachineNode(
2520 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2521 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2522 N->getOperand(0)));
2523 return true;
2524 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002525
2526 // Use the sysreg mapper to map the remaining possible strings to the
2527 // value for the register to be used for the instruction operand.
Tim Northovere6ae6762016-07-05 21:23:04 +00002528 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
2529 if (TheReg && TheReg->Readable &&
2530 TheReg->haveFeatures(Subtarget->getFeatureBits()))
2531 Reg = TheReg->Encoding;
2532 else
2533 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
2534
2535 if (Reg != -1) {
Justin Bogner283e3bd2016-05-12 23:10:30 +00002536 ReplaceNode(N, CurDAG->getMachineNode(
2537 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2538 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2539 N->getOperand(0)));
2540 return true;
2541 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002542
Justin Bogner283e3bd2016-05-12 23:10:30 +00002543 return false;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002544}
2545
2546// Lower the write_register intrinsic to an MSR instruction node if the special
2547// register string argument is either of the form detailed in the ALCE (the
2548// form described in getIntOperandsFromRegsterString) or is a named register
2549// known by the MSR SysReg mapper.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002550bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002551 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2552 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2553 SDLoc DL(N);
2554
2555 int Reg = getIntOperandFromRegisterString(RegString->getString());
Justin Bogner283e3bd2016-05-12 23:10:30 +00002556 if (Reg != -1) {
2557 ReplaceNode(
2558 N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002559 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
Justin Bogner283e3bd2016-05-12 23:10:30 +00002560 N->getOperand(2), N->getOperand(0)));
2561 return true;
2562 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002563
2564 // Check if the register was one of those allowed as the pstatefield value in
2565 // the MSR (immediate) instruction. To accept the values allowed in the
2566 // pstatefield for the MSR (immediate) instruction, we also require that an
2567 // immediate value has been provided as an argument, we know that this is
2568 // the case as it has been ensured by semantic checking.
Tim Northovere6ae6762016-07-05 21:23:04 +00002569 auto PMapper = AArch64PState::lookupPStateByName(RegString->getString());;
2570 if (PMapper) {
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002571 assert (isa<ConstantSDNode>(N->getOperand(2))
2572 && "Expected a constant integer expression.");
Tim Northovere6ae6762016-07-05 21:23:04 +00002573 unsigned Reg = PMapper->Encoding;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002574 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
Alexandros Lamprineas1bab1912015-10-05 13:42:31 +00002575 unsigned State;
Oliver Stannard911ea202015-11-26 15:32:30 +00002576 if (Reg == AArch64PState::PAN || Reg == AArch64PState::UAO) {
Alexandros Lamprineas1bab1912015-10-05 13:42:31 +00002577 assert(Immed < 2 && "Bad imm");
2578 State = AArch64::MSRpstateImm1;
2579 } else {
2580 assert(Immed < 16 && "Bad imm");
2581 State = AArch64::MSRpstateImm4;
2582 }
Justin Bogner283e3bd2016-05-12 23:10:30 +00002583 ReplaceNode(N, CurDAG->getMachineNode(
2584 State, DL, MVT::Other,
2585 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2586 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
2587 N->getOperand(0)));
2588 return true;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002589 }
2590
2591 // Use the sysreg mapper to attempt to map the remaining possible strings
2592 // to the value for the register to be used for the MSR (register)
2593 // instruction operand.
Tim Northovere6ae6762016-07-05 21:23:04 +00002594 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
2595 if (TheReg && TheReg->Writeable &&
2596 TheReg->haveFeatures(Subtarget->getFeatureBits()))
2597 Reg = TheReg->Encoding;
2598 else
2599 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
2600 if (Reg != -1) {
2601 ReplaceNode(N, CurDAG->getMachineNode(
2602 AArch64::MSR, DL, MVT::Other,
2603 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2604 N->getOperand(2), N->getOperand(0)));
Justin Bogner283e3bd2016-05-12 23:10:30 +00002605 return true;
2606 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002607
Justin Bogner283e3bd2016-05-12 23:10:30 +00002608 return false;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002609}
2610
Tim Northovercdf15292016-04-14 17:03:29 +00002611/// We've got special pseudo-instructions for these
2612void AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
2613 unsigned Opcode;
2614 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
2615 if (MemTy == MVT::i8)
2616 Opcode = AArch64::CMP_SWAP_8;
2617 else if (MemTy == MVT::i16)
2618 Opcode = AArch64::CMP_SWAP_16;
2619 else if (MemTy == MVT::i32)
2620 Opcode = AArch64::CMP_SWAP_32;
2621 else if (MemTy == MVT::i64)
2622 Opcode = AArch64::CMP_SWAP_64;
2623 else
2624 llvm_unreachable("Unknown AtomicCmpSwap type");
2625
2626 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
2627 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
2628 N->getOperand(0)};
2629 SDNode *CmpSwap = CurDAG->getMachineNode(
2630 Opcode, SDLoc(N),
2631 CurDAG->getVTList(RegTy, MVT::i32, MVT::Other), Ops);
2632
2633 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2634 MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
2635 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
2636
2637 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
2638 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00002639 CurDAG->RemoveDeadNode(N);
Tim Northovercdf15292016-04-14 17:03:29 +00002640}
2641
Justin Bogner283e3bd2016-05-12 23:10:30 +00002642void AArch64DAGToDAGISel::Select(SDNode *Node) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002643 // Dump information about the Node being selected
2644 DEBUG(errs() << "Selecting: ");
2645 DEBUG(Node->dump(CurDAG));
2646 DEBUG(errs() << "\n");
2647
2648 // If we have a custom node, we already have selected!
2649 if (Node->isMachineOpcode()) {
2650 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2651 Node->setNodeId(-1);
Justin Bogner283e3bd2016-05-12 23:10:30 +00002652 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002653 }
2654
2655 // Few custom selection stuff.
Tim Northover3b0846e2014-05-24 12:50:23 +00002656 EVT VT = Node->getValueType(0);
2657
2658 switch (Node->getOpcode()) {
2659 default:
2660 break;
2661
Tim Northovercdf15292016-04-14 17:03:29 +00002662 case ISD::ATOMIC_CMP_SWAP:
2663 SelectCMP_SWAP(Node);
Quentin Colombet35a47012017-04-01 01:26:17 +00002664 return;
Tim Northovercdf15292016-04-14 17:03:29 +00002665
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002666 case ISD::READ_REGISTER:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002667 if (tryReadRegister(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002668 return;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002669 break;
2670
2671 case ISD::WRITE_REGISTER:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002672 if (tryWriteRegister(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002673 return;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002674 break;
2675
Tim Northover3b0846e2014-05-24 12:50:23 +00002676 case ISD::ADD:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002677 if (tryMLAV64LaneV128(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002678 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002679 break;
2680
2681 case ISD::LOAD: {
2682 // Try to select as an indexed load. Fall through to normal processing
2683 // if we can't.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002684 if (tryIndexedLoad(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002685 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002686 break;
2687 }
2688
2689 case ISD::SRL:
2690 case ISD::AND:
2691 case ISD::SRA:
Chad Rosier2d658702016-06-03 15:00:09 +00002692 case ISD::SIGN_EXTEND_INREG:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002693 if (tryBitfieldExtractOp(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002694 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002695 if (tryBitfieldInsertInZeroOp(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002696 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002697 break;
2698
Chad Rosierbe879ea2016-06-03 20:05:49 +00002699 case ISD::SIGN_EXTEND:
2700 if (tryBitfieldExtractOpFromSExt(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002701 return;
Chad Rosierbe879ea2016-06-03 20:05:49 +00002702 break;
2703
Tim Northover3b0846e2014-05-24 12:50:23 +00002704 case ISD::OR:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002705 if (tryBitfieldInsertOp(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002706 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002707 break;
2708
2709 case ISD::EXTRACT_VECTOR_ELT: {
2710 // Extracting lane zero is a special case where we can just use a plain
2711 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2712 // the rest of the compiler, especially the register allocator and copyi
2713 // propagation, to reason about, so is preferred when it's possible to
2714 // use it.
2715 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2716 // Bail and use the default Select() for non-zero lanes.
2717 if (LaneNode->getZExtValue() != 0)
2718 break;
2719 // If the element type is not the same as the result type, likewise
2720 // bail and use the default Select(), as there's more to do than just
2721 // a cross-class COPY. This catches extracts of i8 and i16 elements
2722 // since they will need an explicit zext.
2723 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2724 break;
2725 unsigned SubReg;
2726 switch (Node->getOperand(0)
2727 .getValueType()
2728 .getVectorElementType()
2729 .getSizeInBits()) {
2730 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002731 llvm_unreachable("Unexpected vector element type!");
Tim Northover3b0846e2014-05-24 12:50:23 +00002732 case 64:
2733 SubReg = AArch64::dsub;
2734 break;
2735 case 32:
2736 SubReg = AArch64::ssub;
2737 break;
Oliver Stannard89d15422014-08-27 16:16:04 +00002738 case 16:
2739 SubReg = AArch64::hsub;
2740 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00002741 case 8:
2742 llvm_unreachable("unexpected zext-requiring extract element!");
2743 }
2744 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2745 Node->getOperand(0));
2746 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2747 DEBUG(Extract->dumpr(CurDAG));
2748 DEBUG(dbgs() << "\n");
Justin Bogner283e3bd2016-05-12 23:10:30 +00002749 ReplaceNode(Node, Extract.getNode());
Quentin Colombet35a47012017-04-01 01:26:17 +00002750 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002751 }
2752 case ISD::Constant: {
2753 // Materialize zero constants as copies from WZR/XZR. This allows
2754 // the coalescer to propagate these into other instructions.
2755 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2756 if (ConstNode->isNullValue()) {
Justin Bogner283e3bd2016-05-12 23:10:30 +00002757 if (VT == MVT::i32) {
2758 SDValue New = CurDAG->getCopyFromReg(
2759 CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32);
2760 ReplaceNode(Node, New.getNode());
Quentin Colombet35a47012017-04-01 01:26:17 +00002761 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002762 } else if (VT == MVT::i64) {
2763 SDValue New = CurDAG->getCopyFromReg(
2764 CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64);
2765 ReplaceNode(Node, New.getNode());
Quentin Colombet35a47012017-04-01 01:26:17 +00002766 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002767 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002768 }
2769 break;
2770 }
2771
2772 case ISD::FrameIndex: {
2773 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2774 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2775 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2776 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +00002777 SDValue TFI = CurDAG->getTargetFrameIndex(
2778 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002779 SDLoc DL(Node);
2780 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2781 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
Justin Bogner283e3bd2016-05-12 23:10:30 +00002782 CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
Quentin Colombet35a47012017-04-01 01:26:17 +00002783 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002784 }
2785 case ISD::INTRINSIC_W_CHAIN: {
2786 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2787 switch (IntNo) {
2788 default:
2789 break;
2790 case Intrinsic::aarch64_ldaxp:
2791 case Intrinsic::aarch64_ldxp: {
2792 unsigned Op =
2793 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2794 SDValue MemAddr = Node->getOperand(2);
2795 SDLoc DL(Node);
2796 SDValue Chain = Node->getOperand(0);
2797
2798 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2799 MVT::Other, MemAddr, Chain);
2800
2801 // Transfer memoperands.
2802 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2803 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2804 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
Justin Bogner283e3bd2016-05-12 23:10:30 +00002805 ReplaceNode(Node, Ld);
Quentin Colombet35a47012017-04-01 01:26:17 +00002806 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002807 }
2808 case Intrinsic::aarch64_stlxp:
2809 case Intrinsic::aarch64_stxp: {
2810 unsigned Op =
2811 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2812 SDLoc DL(Node);
2813 SDValue Chain = Node->getOperand(0);
2814 SDValue ValLo = Node->getOperand(2);
2815 SDValue ValHi = Node->getOperand(3);
2816 SDValue MemAddr = Node->getOperand(4);
2817
2818 // Place arguments in the right order.
Benjamin Kramerea68a942015-02-19 15:26:17 +00002819 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00002820
2821 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2822 // Transfer memoperands.
2823 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2824 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2825 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2826
Justin Bogner283e3bd2016-05-12 23:10:30 +00002827 ReplaceNode(Node, St);
Quentin Colombet35a47012017-04-01 01:26:17 +00002828 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002829 }
2830 case Intrinsic::aarch64_neon_ld1x2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002831 if (VT == MVT::v8i8) {
2832 SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002833 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002834 } else if (VT == MVT::v16i8) {
2835 SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002836 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002837 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2838 SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002839 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002840 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2841 SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002842 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002843 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2844 SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002845 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002846 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2847 SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002848 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002849 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2850 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002851 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002852 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2853 SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002854 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002855 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002856 break;
2857 case Intrinsic::aarch64_neon_ld1x3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002858 if (VT == MVT::v8i8) {
2859 SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002860 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002861 } else if (VT == MVT::v16i8) {
2862 SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002863 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002864 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2865 SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002866 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002867 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2868 SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002869 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002870 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2871 SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002872 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002873 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2874 SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002875 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002876 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2877 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002878 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002879 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2880 SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002881 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002882 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002883 break;
2884 case Intrinsic::aarch64_neon_ld1x4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002885 if (VT == MVT::v8i8) {
2886 SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002887 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002888 } else if (VT == MVT::v16i8) {
2889 SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002890 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002891 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2892 SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002893 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002894 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2895 SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002896 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002897 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2898 SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002899 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002900 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2901 SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002902 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002903 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2904 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002905 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002906 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2907 SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002908 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002909 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002910 break;
2911 case Intrinsic::aarch64_neon_ld2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002912 if (VT == MVT::v8i8) {
2913 SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002914 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002915 } else if (VT == MVT::v16i8) {
2916 SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002917 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002918 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2919 SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002920 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002921 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2922 SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002923 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002924 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2925 SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002926 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002927 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2928 SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002929 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002930 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2931 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002932 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002933 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2934 SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002935 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002936 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002937 break;
2938 case Intrinsic::aarch64_neon_ld3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002939 if (VT == MVT::v8i8) {
2940 SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002941 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002942 } else if (VT == MVT::v16i8) {
2943 SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002944 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002945 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2946 SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002947 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002948 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2949 SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002950 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002951 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2952 SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002953 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002954 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2955 SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002956 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002957 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2958 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002959 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002960 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2961 SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002962 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002963 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002964 break;
2965 case Intrinsic::aarch64_neon_ld4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002966 if (VT == MVT::v8i8) {
2967 SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002968 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002969 } else if (VT == MVT::v16i8) {
2970 SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002971 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002972 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2973 SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002974 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002975 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2976 SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002977 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002978 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2979 SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002980 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002981 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2982 SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002983 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002984 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2985 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002986 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002987 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2988 SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002989 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002990 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002991 break;
2992 case Intrinsic::aarch64_neon_ld2r:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002993 if (VT == MVT::v8i8) {
2994 SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002995 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002996 } else if (VT == MVT::v16i8) {
2997 SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002998 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002999 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3000 SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003001 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003002 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3003 SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003004 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003005 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3006 SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003007 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003008 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3009 SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003010 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003011 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3012 SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003013 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003014 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3015 SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003016 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003017 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003018 break;
3019 case Intrinsic::aarch64_neon_ld3r:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003020 if (VT == MVT::v8i8) {
3021 SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003022 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003023 } else if (VT == MVT::v16i8) {
3024 SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003025 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003026 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3027 SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003028 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003029 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3030 SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003031 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003032 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3033 SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003034 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003035 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3036 SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003037 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003038 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3039 SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003040 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003041 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3042 SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003043 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003044 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003045 break;
3046 case Intrinsic::aarch64_neon_ld4r:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003047 if (VT == MVT::v8i8) {
3048 SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003049 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003050 } else if (VT == MVT::v16i8) {
3051 SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003052 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003053 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3054 SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003055 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003056 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3057 SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003058 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003059 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3060 SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003061 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003062 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3063 SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003064 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003065 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3066 SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003067 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003068 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3069 SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003070 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003071 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003072 break;
3073 case Intrinsic::aarch64_neon_ld2lane:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003074 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3075 SelectLoadLane(Node, 2, AArch64::LD2i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003076 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003077 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3078 VT == MVT::v8f16) {
3079 SelectLoadLane(Node, 2, AArch64::LD2i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003080 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003081 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3082 VT == MVT::v2f32) {
3083 SelectLoadLane(Node, 2, AArch64::LD2i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003084 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003085 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3086 VT == MVT::v1f64) {
3087 SelectLoadLane(Node, 2, AArch64::LD2i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003088 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003089 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003090 break;
3091 case Intrinsic::aarch64_neon_ld3lane:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003092 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3093 SelectLoadLane(Node, 3, AArch64::LD3i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003094 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003095 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3096 VT == MVT::v8f16) {
3097 SelectLoadLane(Node, 3, AArch64::LD3i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003098 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003099 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3100 VT == MVT::v2f32) {
3101 SelectLoadLane(Node, 3, AArch64::LD3i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003102 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003103 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3104 VT == MVT::v1f64) {
3105 SelectLoadLane(Node, 3, AArch64::LD3i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003106 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003107 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003108 break;
3109 case Intrinsic::aarch64_neon_ld4lane:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003110 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3111 SelectLoadLane(Node, 4, AArch64::LD4i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003112 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003113 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3114 VT == MVT::v8f16) {
3115 SelectLoadLane(Node, 4, AArch64::LD4i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003116 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003117 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3118 VT == MVT::v2f32) {
3119 SelectLoadLane(Node, 4, AArch64::LD4i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003120 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003121 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3122 VT == MVT::v1f64) {
3123 SelectLoadLane(Node, 4, AArch64::LD4i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003124 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003125 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003126 break;
3127 }
3128 } break;
3129 case ISD::INTRINSIC_WO_CHAIN: {
3130 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
3131 switch (IntNo) {
3132 default:
3133 break;
3134 case Intrinsic::aarch64_neon_tbl2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003135 SelectTable(Node, 2,
3136 VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
3137 false);
Quentin Colombet35a47012017-04-01 01:26:17 +00003138 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003139 case Intrinsic::aarch64_neon_tbl3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003140 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
3141 : AArch64::TBLv16i8Three,
3142 false);
Quentin Colombet35a47012017-04-01 01:26:17 +00003143 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003144 case Intrinsic::aarch64_neon_tbl4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003145 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
3146 : AArch64::TBLv16i8Four,
3147 false);
Quentin Colombet35a47012017-04-01 01:26:17 +00003148 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003149 case Intrinsic::aarch64_neon_tbx2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003150 SelectTable(Node, 2,
3151 VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
3152 true);
Quentin Colombet35a47012017-04-01 01:26:17 +00003153 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003154 case Intrinsic::aarch64_neon_tbx3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003155 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
3156 : AArch64::TBXv16i8Three,
3157 true);
Quentin Colombet35a47012017-04-01 01:26:17 +00003158 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003159 case Intrinsic::aarch64_neon_tbx4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003160 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
3161 : AArch64::TBXv16i8Four,
3162 true);
Quentin Colombet35a47012017-04-01 01:26:17 +00003163 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003164 case Intrinsic::aarch64_neon_smull:
3165 case Intrinsic::aarch64_neon_umull:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003166 if (tryMULLV64LaneV128(IntNo, Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00003167 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003168 break;
3169 }
3170 break;
3171 }
3172 case ISD::INTRINSIC_VOID: {
3173 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
3174 if (Node->getNumOperands() >= 3)
3175 VT = Node->getOperand(2)->getValueType(0);
3176 switch (IntNo) {
3177 default:
3178 break;
3179 case Intrinsic::aarch64_neon_st1x2: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003180 if (VT == MVT::v8i8) {
3181 SelectStore(Node, 2, AArch64::ST1Twov8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003182 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003183 } else if (VT == MVT::v16i8) {
3184 SelectStore(Node, 2, AArch64::ST1Twov16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003185 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003186 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3187 SelectStore(Node, 2, AArch64::ST1Twov4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003188 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003189 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3190 SelectStore(Node, 2, AArch64::ST1Twov8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003191 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003192 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3193 SelectStore(Node, 2, AArch64::ST1Twov2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003194 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003195 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3196 SelectStore(Node, 2, AArch64::ST1Twov4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003197 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003198 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3199 SelectStore(Node, 2, AArch64::ST1Twov2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003200 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003201 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3202 SelectStore(Node, 2, AArch64::ST1Twov1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003203 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003204 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003205 break;
3206 }
3207 case Intrinsic::aarch64_neon_st1x3: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003208 if (VT == MVT::v8i8) {
3209 SelectStore(Node, 3, AArch64::ST1Threev8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003210 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003211 } else if (VT == MVT::v16i8) {
3212 SelectStore(Node, 3, AArch64::ST1Threev16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003213 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003214 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3215 SelectStore(Node, 3, AArch64::ST1Threev4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003216 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003217 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3218 SelectStore(Node, 3, AArch64::ST1Threev8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003219 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003220 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3221 SelectStore(Node, 3, AArch64::ST1Threev2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003222 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003223 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3224 SelectStore(Node, 3, AArch64::ST1Threev4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003225 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003226 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3227 SelectStore(Node, 3, AArch64::ST1Threev2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003228 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003229 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3230 SelectStore(Node, 3, AArch64::ST1Threev1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003231 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003232 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003233 break;
3234 }
3235 case Intrinsic::aarch64_neon_st1x4: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003236 if (VT == MVT::v8i8) {
3237 SelectStore(Node, 4, AArch64::ST1Fourv8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003238 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003239 } else if (VT == MVT::v16i8) {
3240 SelectStore(Node, 4, AArch64::ST1Fourv16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003241 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003242 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3243 SelectStore(Node, 4, AArch64::ST1Fourv4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003244 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003245 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3246 SelectStore(Node, 4, AArch64::ST1Fourv8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003247 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003248 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3249 SelectStore(Node, 4, AArch64::ST1Fourv2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003250 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003251 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3252 SelectStore(Node, 4, AArch64::ST1Fourv4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003253 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003254 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3255 SelectStore(Node, 4, AArch64::ST1Fourv2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003256 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003257 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3258 SelectStore(Node, 4, AArch64::ST1Fourv1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003259 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003260 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003261 break;
3262 }
3263 case Intrinsic::aarch64_neon_st2: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003264 if (VT == MVT::v8i8) {
3265 SelectStore(Node, 2, AArch64::ST2Twov8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003266 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003267 } else if (VT == MVT::v16i8) {
3268 SelectStore(Node, 2, AArch64::ST2Twov16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003269 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003270 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3271 SelectStore(Node, 2, AArch64::ST2Twov4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003272 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003273 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3274 SelectStore(Node, 2, AArch64::ST2Twov8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003275 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003276 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3277 SelectStore(Node, 2, AArch64::ST2Twov2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003278 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003279 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3280 SelectStore(Node, 2, AArch64::ST2Twov4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003281 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003282 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3283 SelectStore(Node, 2, AArch64::ST2Twov2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003284 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003285 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3286 SelectStore(Node, 2, AArch64::ST1Twov1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003287 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003288 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003289 break;
3290 }
3291 case Intrinsic::aarch64_neon_st3: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003292 if (VT == MVT::v8i8) {
3293 SelectStore(Node, 3, AArch64::ST3Threev8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003294 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003295 } else if (VT == MVT::v16i8) {
3296 SelectStore(Node, 3, AArch64::ST3Threev16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003297 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003298 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3299 SelectStore(Node, 3, AArch64::ST3Threev4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003300 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003301 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3302 SelectStore(Node, 3, AArch64::ST3Threev8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003303 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003304 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3305 SelectStore(Node, 3, AArch64::ST3Threev2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003306 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003307 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3308 SelectStore(Node, 3, AArch64::ST3Threev4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003309 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003310 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3311 SelectStore(Node, 3, AArch64::ST3Threev2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003312 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003313 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3314 SelectStore(Node, 3, AArch64::ST1Threev1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003315 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003316 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003317 break;
3318 }
3319 case Intrinsic::aarch64_neon_st4: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003320 if (VT == MVT::v8i8) {
3321 SelectStore(Node, 4, AArch64::ST4Fourv8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003322 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003323 } else if (VT == MVT::v16i8) {
3324 SelectStore(Node, 4, AArch64::ST4Fourv16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003325 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003326 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3327 SelectStore(Node, 4, AArch64::ST4Fourv4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003328 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003329 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3330 SelectStore(Node, 4, AArch64::ST4Fourv8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003331 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003332 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3333 SelectStore(Node, 4, AArch64::ST4Fourv2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003334 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003335 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3336 SelectStore(Node, 4, AArch64::ST4Fourv4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003337 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003338 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3339 SelectStore(Node, 4, AArch64::ST4Fourv2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003340 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003341 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3342 SelectStore(Node, 4, AArch64::ST1Fourv1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003343 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003344 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003345 break;
3346 }
3347 case Intrinsic::aarch64_neon_st2lane: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003348 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3349 SelectStoreLane(Node, 2, AArch64::ST2i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003350 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003351 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3352 VT == MVT::v8f16) {
3353 SelectStoreLane(Node, 2, AArch64::ST2i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003354 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003355 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3356 VT == MVT::v2f32) {
3357 SelectStoreLane(Node, 2, AArch64::ST2i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003358 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003359 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3360 VT == MVT::v1f64) {
3361 SelectStoreLane(Node, 2, AArch64::ST2i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003362 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003363 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003364 break;
3365 }
3366 case Intrinsic::aarch64_neon_st3lane: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003367 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3368 SelectStoreLane(Node, 3, AArch64::ST3i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003369 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003370 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3371 VT == MVT::v8f16) {
3372 SelectStoreLane(Node, 3, AArch64::ST3i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003373 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003374 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3375 VT == MVT::v2f32) {
3376 SelectStoreLane(Node, 3, AArch64::ST3i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003377 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003378 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3379 VT == MVT::v1f64) {
3380 SelectStoreLane(Node, 3, AArch64::ST3i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003381 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003382 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003383 break;
3384 }
3385 case Intrinsic::aarch64_neon_st4lane: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003386 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3387 SelectStoreLane(Node, 4, AArch64::ST4i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003388 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003389 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3390 VT == MVT::v8f16) {
3391 SelectStoreLane(Node, 4, AArch64::ST4i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003392 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003393 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3394 VT == MVT::v2f32) {
3395 SelectStoreLane(Node, 4, AArch64::ST4i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003396 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003397 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3398 VT == MVT::v1f64) {
3399 SelectStoreLane(Node, 4, AArch64::ST4i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003400 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003401 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003402 break;
3403 }
3404 }
Mehdi Aminia7583982015-08-23 00:42:57 +00003405 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00003406 }
3407 case AArch64ISD::LD2post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003408 if (VT == MVT::v8i8) {
3409 SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003410 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003411 } else if (VT == MVT::v16i8) {
3412 SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003413 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003414 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3415 SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003416 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003417 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3418 SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003419 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003420 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3421 SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003422 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003423 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3424 SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003425 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003426 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3427 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003428 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003429 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3430 SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003431 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003432 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003433 break;
3434 }
3435 case AArch64ISD::LD3post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003436 if (VT == MVT::v8i8) {
3437 SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003438 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003439 } else if (VT == MVT::v16i8) {
3440 SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003441 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003442 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3443 SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003444 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003445 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3446 SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003447 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003448 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3449 SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003450 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003451 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3452 SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003453 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003454 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3455 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003456 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003457 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3458 SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003459 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003460 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003461 break;
3462 }
3463 case AArch64ISD::LD4post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003464 if (VT == MVT::v8i8) {
3465 SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003466 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003467 } else if (VT == MVT::v16i8) {
3468 SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003469 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003470 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3471 SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003472 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003473 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3474 SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003475 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003476 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3477 SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003478 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003479 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3480 SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003481 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003482 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3483 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003484 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003485 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3486 SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003487 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003488 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003489 break;
3490 }
3491 case AArch64ISD::LD1x2post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003492 if (VT == MVT::v8i8) {
3493 SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003494 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003495 } else if (VT == MVT::v16i8) {
3496 SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003497 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003498 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3499 SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003500 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003501 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3502 SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003503 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003504 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3505 SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003506 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003507 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3508 SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003509 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003510 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3511 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003512 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003513 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3514 SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003515 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003516 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003517 break;
3518 }
3519 case AArch64ISD::LD1x3post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003520 if (VT == MVT::v8i8) {
3521 SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003522 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003523 } else if (VT == MVT::v16i8) {
3524 SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003525 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003526 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3527 SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003528 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003529 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3530 SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003531 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003532 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3533 SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003534 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003535 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3536 SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003537 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003538 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3539 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003540 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003541 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3542 SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003543 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003544 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003545 break;
3546 }
3547 case AArch64ISD::LD1x4post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003548 if (VT == MVT::v8i8) {
3549 SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003550 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003551 } else if (VT == MVT::v16i8) {
3552 SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003553 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003554 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3555 SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003556 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003557 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3558 SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003559 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003560 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3561 SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003562 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003563 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3564 SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003565 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003566 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3567 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003568 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003569 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3570 SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003571 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003572 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003573 break;
3574 }
3575 case AArch64ISD::LD1DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003576 if (VT == MVT::v8i8) {
3577 SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003578 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003579 } else if (VT == MVT::v16i8) {
3580 SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003581 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003582 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3583 SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003584 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003585 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3586 SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003587 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003588 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3589 SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003590 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003591 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3592 SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003593 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003594 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3595 SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003596 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003597 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3598 SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003599 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003600 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003601 break;
3602 }
3603 case AArch64ISD::LD2DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003604 if (VT == MVT::v8i8) {
3605 SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003606 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003607 } else if (VT == MVT::v16i8) {
3608 SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003609 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003610 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3611 SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003612 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003613 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3614 SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003615 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003616 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3617 SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003618 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003619 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3620 SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003621 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003622 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3623 SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003624 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003625 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3626 SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003627 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003628 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003629 break;
3630 }
3631 case AArch64ISD::LD3DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003632 if (VT == MVT::v8i8) {
3633 SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003634 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003635 } else if (VT == MVT::v16i8) {
3636 SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003637 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003638 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3639 SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003640 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003641 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3642 SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003643 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003644 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3645 SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003646 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003647 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3648 SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003649 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003650 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3651 SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003652 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003653 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3654 SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003655 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003656 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003657 break;
3658 }
3659 case AArch64ISD::LD4DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003660 if (VT == MVT::v8i8) {
3661 SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003662 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003663 } else if (VT == MVT::v16i8) {
3664 SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003665 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003666 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3667 SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003668 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003669 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3670 SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003671 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003672 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3673 SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003674 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003675 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3676 SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003677 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003678 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3679 SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003680 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003681 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3682 SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003683 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003684 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003685 break;
3686 }
3687 case AArch64ISD::LD1LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003688 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3689 SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003690 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003691 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3692 VT == MVT::v8f16) {
3693 SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003694 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003695 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3696 VT == MVT::v2f32) {
3697 SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003698 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003699 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3700 VT == MVT::v1f64) {
3701 SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003702 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003703 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003704 break;
3705 }
3706 case AArch64ISD::LD2LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003707 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3708 SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003709 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003710 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3711 VT == MVT::v8f16) {
3712 SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003713 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003714 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3715 VT == MVT::v2f32) {
3716 SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003717 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003718 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3719 VT == MVT::v1f64) {
3720 SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003721 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003722 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003723 break;
3724 }
3725 case AArch64ISD::LD3LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003726 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3727 SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003728 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003729 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3730 VT == MVT::v8f16) {
3731 SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003732 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003733 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3734 VT == MVT::v2f32) {
3735 SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003736 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003737 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3738 VT == MVT::v1f64) {
3739 SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003740 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003741 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003742 break;
3743 }
3744 case AArch64ISD::LD4LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003745 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3746 SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003747 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003748 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3749 VT == MVT::v8f16) {
3750 SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003751 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003752 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3753 VT == MVT::v2f32) {
3754 SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003755 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003756 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3757 VT == MVT::v1f64) {
3758 SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003759 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003760 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003761 break;
3762 }
3763 case AArch64ISD::ST2post: {
3764 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003765 if (VT == MVT::v8i8) {
3766 SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003767 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003768 } else if (VT == MVT::v16i8) {
3769 SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003770 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003771 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3772 SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003773 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003774 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3775 SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003776 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003777 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3778 SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003779 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003780 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3781 SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003782 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003783 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3784 SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003785 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003786 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3787 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003788 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003789 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003790 break;
3791 }
3792 case AArch64ISD::ST3post: {
3793 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003794 if (VT == MVT::v8i8) {
3795 SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003796 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003797 } else if (VT == MVT::v16i8) {
3798 SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003799 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003800 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3801 SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003802 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003803 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3804 SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003805 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003806 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3807 SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003808 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003809 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3810 SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003811 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003812 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3813 SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003814 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003815 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3816 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003817 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003818 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003819 break;
3820 }
3821 case AArch64ISD::ST4post: {
3822 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003823 if (VT == MVT::v8i8) {
3824 SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003825 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003826 } else if (VT == MVT::v16i8) {
3827 SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003828 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003829 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3830 SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003831 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003832 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3833 SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003834 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003835 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3836 SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003837 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003838 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3839 SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003840 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003841 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3842 SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003843 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003844 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3845 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003846 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003847 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003848 break;
3849 }
3850 case AArch64ISD::ST1x2post: {
3851 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003852 if (VT == MVT::v8i8) {
3853 SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003854 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003855 } else if (VT == MVT::v16i8) {
3856 SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003857 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003858 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3859 SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003860 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003861 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3862 SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003863 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003864 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3865 SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003866 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003867 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3868 SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003869 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003870 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3871 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003872 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003873 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3874 SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003875 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003876 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003877 break;
3878 }
3879 case AArch64ISD::ST1x3post: {
3880 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003881 if (VT == MVT::v8i8) {
3882 SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003883 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003884 } else if (VT == MVT::v16i8) {
3885 SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003886 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003887 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3888 SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003889 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003890 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3891 SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003892 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003893 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3894 SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003895 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003896 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3897 SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003898 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003899 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3900 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003901 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003902 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3903 SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003904 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003905 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003906 break;
3907 }
3908 case AArch64ISD::ST1x4post: {
3909 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003910 if (VT == MVT::v8i8) {
3911 SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003912 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003913 } else if (VT == MVT::v16i8) {
3914 SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003915 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003916 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3917 SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003918 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003919 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3920 SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003921 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003922 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3923 SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003924 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003925 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3926 SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003927 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003928 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3929 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003930 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003931 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3932 SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003933 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003934 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003935 break;
3936 }
3937 case AArch64ISD::ST2LANEpost: {
3938 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003939 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3940 SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003941 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003942 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3943 VT == MVT::v8f16) {
3944 SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003945 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003946 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3947 VT == MVT::v2f32) {
3948 SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003949 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003950 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3951 VT == MVT::v1f64) {
3952 SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003953 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003954 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003955 break;
3956 }
3957 case AArch64ISD::ST3LANEpost: {
3958 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003959 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3960 SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003961 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003962 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3963 VT == MVT::v8f16) {
3964 SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003965 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003966 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3967 VT == MVT::v2f32) {
3968 SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003969 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003970 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3971 VT == MVT::v1f64) {
3972 SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003973 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003974 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003975 break;
3976 }
3977 case AArch64ISD::ST4LANEpost: {
3978 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003979 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3980 SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003981 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003982 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3983 VT == MVT::v8f16) {
3984 SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003985 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003986 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3987 VT == MVT::v2f32) {
3988 SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003989 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003990 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3991 VT == MVT::v1f64) {
3992 SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003993 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003994 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003995 break;
3996 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003997 }
3998
3999 // Select the default instruction
Justin Bogner283e3bd2016-05-12 23:10:30 +00004000 SelectCode(Node);
Tim Northover3b0846e2014-05-24 12:50:23 +00004001}
4002
4003/// createAArch64ISelDag - This pass converts a legalized DAG into a
4004/// AArch64-specific DAG, ready for instruction scheduling.
4005FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
4006 CodeGenOpt::Level OptLevel) {
4007 return new AArch64DAGToDAGISel(TM, OptLevel);
4008}