blob: 0b10246b0cc827ce0be57063b98291539035ff82 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the AArch64 target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64TargetMachine.h"
15#include "MCTargetDesc/AArch64AddressingModes.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/CodeGen/SelectionDAGISel.h"
18#include "llvm/IR/Function.h" // To access function attributes.
19#include "llvm/IR/GlobalValue.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000023#include "llvm/Support/KnownBits.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000024#include "llvm/Support/MathExtras.h"
25#include "llvm/Support/raw_ostream.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "aarch64-isel"
30
31//===--------------------------------------------------------------------===//
32/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
33/// instructions for SelectionDAG operations.
34///
35namespace {
36
37class AArch64DAGToDAGISel : public SelectionDAGISel {
Tim Northover3b0846e2014-05-24 12:50:23 +000038
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
42
43 bool ForCodeSize;
44
45public:
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
Stephen Canon8216d882015-09-22 11:43:17 +000048 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr),
Tim Northover3b0846e2014-05-24 12:50:23 +000049 ForCodeSize(false) {}
50
Mehdi Amini117296c2016-10-01 02:56:57 +000051 StringRef getPassName() const override {
Tim Northover3b0846e2014-05-24 12:50:23 +000052 return "AArch64 Instruction Selection";
53 }
54
55 bool runOnMachineFunction(MachineFunction &MF) override {
Matthias Braunf1caa282017-12-15 22:22:58 +000056 ForCodeSize = MF.getFunction().optForSize();
Eric Christopher1e513342015-01-30 23:46:40 +000057 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
Tim Northover3b0846e2014-05-24 12:50:23 +000058 return SelectionDAGISel::runOnMachineFunction(MF);
59 }
60
Justin Bogner283e3bd2016-05-12 23:10:30 +000061 void Select(SDNode *Node) override;
Tim Northover3b0846e2014-05-24 12:50:23 +000062
63 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
64 /// inline asm expressions.
65 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
Daniel Sanders60f1db02015-03-13 12:45:09 +000066 unsigned ConstraintID,
Tim Northover3b0846e2014-05-24 12:50:23 +000067 std::vector<SDValue> &OutOps) override;
68
Justin Bogner283e3bd2016-05-12 23:10:30 +000069 bool tryMLAV64LaneV128(SDNode *N);
70 bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +000071 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
76 }
77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
79 }
Ahmed Bougachab8886b52015-09-10 01:42:28 +000080 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
82 }
83 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
85 }
86 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
88 }
89 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
91 }
92 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
94 }
Tim Northover3b0846e2014-05-24 12:50:23 +000095 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeIndexed(N, 1, Base, OffImm);
97 }
98 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeIndexed(N, 2, Base, OffImm);
100 }
101 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeIndexed(N, 4, Base, OffImm);
103 }
104 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeIndexed(N, 8, Base, OffImm);
106 }
107 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeIndexed(N, 16, Base, OffImm);
109 }
110 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
111 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
112 }
113 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
114 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
115 }
116 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
117 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
118 }
119 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
120 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
121 }
122 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
123 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
124 }
125
126 template<int Width>
127 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
128 SDValue &SignExtend, SDValue &DoShift) {
129 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
130 }
131
132 template<int Width>
133 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
134 SDValue &SignExtend, SDValue &DoShift) {
135 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
136 }
137
138
139 /// Form sequences of consecutive 64/128-bit registers for use in NEON
140 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
141 /// between 1 and 4 elements. If it contains a single element that is returned
142 /// unchanged; otherwise a REG_SEQUENCE value is returned.
143 SDValue createDTuple(ArrayRef<SDValue> Vecs);
144 SDValue createQTuple(ArrayRef<SDValue> Vecs);
145
146 /// Generic helper for the createDTuple/createQTuple
147 /// functions. Those should almost always be called instead.
Benjamin Kramerea68a942015-02-19 15:26:17 +0000148 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
149 const unsigned SubRegs[]);
Tim Northover3b0846e2014-05-24 12:50:23 +0000150
Justin Bogner283e3bd2016-05-12 23:10:30 +0000151 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
Tim Northover3b0846e2014-05-24 12:50:23 +0000152
Justin Bogner283e3bd2016-05-12 23:10:30 +0000153 bool tryIndexedLoad(SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000154
Justin Bogner283e3bd2016-05-12 23:10:30 +0000155 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
Tim Northover3b0846e2014-05-24 12:50:23 +0000156 unsigned SubRegIdx);
Justin Bogner283e3bd2016-05-12 23:10:30 +0000157 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
Tim Northover3b0846e2014-05-24 12:50:23 +0000158 unsigned SubRegIdx);
Justin Bogner283e3bd2016-05-12 23:10:30 +0000159 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
160 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000161
Justin Bogner283e3bd2016-05-12 23:10:30 +0000162 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
163 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
164 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
165 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000166
Justin Bogner283e3bd2016-05-12 23:10:30 +0000167 bool tryBitfieldExtractOp(SDNode *N);
Chad Rosierbe879ea2016-06-03 20:05:49 +0000168 bool tryBitfieldExtractOpFromSExt(SDNode *N);
Justin Bogner283e3bd2016-05-12 23:10:30 +0000169 bool tryBitfieldInsertOp(SDNode *N);
170 bool tryBitfieldInsertInZeroOp(SDNode *N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000171
Justin Bogner283e3bd2016-05-12 23:10:30 +0000172 bool tryReadRegister(SDNode *N);
173 bool tryWriteRegister(SDNode *N);
Luke Cheeseman85fd06d2015-06-01 12:02:47 +0000174
Tim Northover3b0846e2014-05-24 12:50:23 +0000175// Include the pieces autogenerated from the target description.
176#include "AArch64GenDAGISel.inc"
177
178private:
179 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
180 SDValue &Shift);
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000181 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
182 SDValue &OffImm);
Tim Northover3b0846e2014-05-24 12:50:23 +0000183 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
184 SDValue &OffImm);
185 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
186 SDValue &OffImm);
187 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
188 SDValue &Offset, SDValue &SignExtend,
189 SDValue &DoShift);
190 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
191 SDValue &Offset, SDValue &SignExtend,
192 SDValue &DoShift);
193 bool isWorthFolding(SDValue V) const;
194 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
195 SDValue &Offset, SDValue &SignExtend);
196
197 template<unsigned RegWidth>
198 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
199 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
200 }
201
202 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
Tim Northovercdf15292016-04-14 17:03:29 +0000203
Christof Doumac1c28052017-06-21 10:58:31 +0000204 bool SelectCMP_SWAP(SDNode *N);
Tim Northovercdf15292016-04-14 17:03:29 +0000205
Tim Northover3b0846e2014-05-24 12:50:23 +0000206};
207} // end anonymous namespace
208
209/// isIntImmediate - This method tests to see if the node is a constant
210/// operand. If so Imm will receive the 32-bit value.
211static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
212 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
213 Imm = C->getZExtValue();
214 return true;
215 }
216 return false;
217}
218
219// isIntImmediate - This method tests to see if a constant operand.
220// If so Imm will receive the value.
221static bool isIntImmediate(SDValue N, uint64_t &Imm) {
222 return isIntImmediate(N.getNode(), Imm);
223}
224
225// isOpcWithIntImmediate - This method tests to see if the node is a specific
226// opcode and that it has a immediate integer right operand.
227// If so Imm will receive the 32 bit value.
228static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
229 uint64_t &Imm) {
230 return N->getOpcode() == Opc &&
231 isIntImmediate(N->getOperand(1).getNode(), Imm);
232}
233
234bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
Daniel Sanders60f1db02015-03-13 12:45:09 +0000235 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
Daniel Sandersf731eee2015-03-23 11:33:15 +0000236 switch(ConstraintID) {
237 default:
238 llvm_unreachable("Unexpected asm memory constraint");
239 case InlineAsm::Constraint_i:
240 case InlineAsm::Constraint_m:
241 case InlineAsm::Constraint_Q:
Yi Kong3b680d82017-07-14 21:46:16 +0000242 // We need to make sure that this one operand does not end up in XZR, thus
243 // require the address to be in a PointerRegClass register.
244 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
245 const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF);
246 SDLoc dl(Op);
247 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i64);
248 SDValue NewOp =
249 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
250 dl, Op.getValueType(),
251 Op, RC), 0);
252 OutOps.push_back(NewOp);
Daniel Sandersf731eee2015-03-23 11:33:15 +0000253 return false;
254 }
255 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000256}
257
258/// SelectArithImmed - Select an immediate value that can be represented as
259/// a 12-bit value shifted left by either 0 or 12. If so, return true with
260/// Val set to the 12-bit value and Shift set to the shifter operand.
261bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
262 SDValue &Shift) {
263 // This function is called from the addsub_shifted_imm ComplexPattern,
264 // which lists [imm] as the list of opcode it's interested in, however
265 // we still need to check whether the operand is actually an immediate
266 // here because the ComplexPattern opcode list is only used in
267 // root-level opcode matching.
268 if (!isa<ConstantSDNode>(N.getNode()))
269 return false;
270
271 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
272 unsigned ShiftAmt;
273
274 if (Immed >> 12 == 0) {
275 ShiftAmt = 0;
276 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
277 ShiftAmt = 12;
278 Immed = Immed >> 12;
279 } else
280 return false;
281
282 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000283 SDLoc dl(N);
284 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
285 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000286 return true;
287}
288
289/// SelectNegArithImmed - As above, but negates the value before trying to
290/// select it.
291bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
292 SDValue &Shift) {
293 // This function is called from the addsub_shifted_imm ComplexPattern,
294 // which lists [imm] as the list of opcode it's interested in, however
295 // we still need to check whether the operand is actually an immediate
296 // here because the ComplexPattern opcode list is only used in
297 // root-level opcode matching.
298 if (!isa<ConstantSDNode>(N.getNode()))
299 return false;
300
301 // The immediate operand must be a 24-bit zero-extended immediate.
302 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
303
304 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
305 // have the opposite effect on the C flag, so this pattern mustn't match under
306 // those circumstances.
307 if (Immed == 0)
308 return false;
309
310 if (N.getValueType() == MVT::i32)
311 Immed = ~((uint32_t)Immed) + 1;
312 else
313 Immed = ~Immed + 1ULL;
314 if (Immed & 0xFFFFFFFFFF000000ULL)
315 return false;
316
317 Immed &= 0xFFFFFFULL;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000318 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
319 Shift);
Tim Northover3b0846e2014-05-24 12:50:23 +0000320}
321
322/// getShiftTypeForNode - Translate a shift node to the corresponding
323/// ShiftType value.
324static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
325 switch (N.getOpcode()) {
326 default:
327 return AArch64_AM::InvalidShiftExtend;
328 case ISD::SHL:
329 return AArch64_AM::LSL;
330 case ISD::SRL:
331 return AArch64_AM::LSR;
332 case ISD::SRA:
333 return AArch64_AM::ASR;
334 case ISD::ROTR:
335 return AArch64_AM::ROR;
336 }
337}
338
Balaram Makam2aba753e2017-03-31 18:16:53 +0000339/// \brief Determine whether it is worth it to fold SHL into the addressing
340/// mode.
341static bool isWorthFoldingSHL(SDValue V) {
342 assert(V.getOpcode() == ISD::SHL && "invalid opcode");
343 // It is worth folding logical shift of up to three places.
344 auto *CSD = dyn_cast<ConstantSDNode>(V.getOperand(1));
345 if (!CSD)
346 return false;
347 unsigned ShiftVal = CSD->getZExtValue();
348 if (ShiftVal > 3)
349 return false;
350
351 // Check if this particular node is reused in any non-memory related
352 // operation. If yes, do not try to fold this node into the address
353 // computation, since the computation will be kept.
354 const SDNode *Node = V.getNode();
355 for (SDNode *UI : Node->uses())
356 if (!isa<MemSDNode>(*UI))
357 for (SDNode *UII : UI->uses())
358 if (!isa<MemSDNode>(*UII))
359 return false;
360 return true;
361}
362
Eric Christopher25dbdeb2015-03-07 01:39:09 +0000363/// \brief Determine whether it is worth to fold V into an extended register.
Tim Northover3b0846e2014-05-24 12:50:23 +0000364bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
Balaram Makam2aba753e2017-03-31 18:16:53 +0000365 // Trivial if we are optimizing for code size or if there is only
366 // one use of the value.
367 if (ForCodeSize || V.hasOneUse())
368 return true;
369 // If a subtarget has a fastpath LSL we can fold a logical shift into
370 // the addressing mode and save a cycle.
371 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::SHL &&
372 isWorthFoldingSHL(V))
373 return true;
374 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::ADD) {
375 const SDValue LHS = V.getOperand(0);
376 const SDValue RHS = V.getOperand(1);
377 if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(LHS))
378 return true;
379 if (RHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(RHS))
380 return true;
381 }
382
383 // It hurts otherwise, since the value will be reused.
384 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000385}
386
387/// SelectShiftedRegister - Select a "shifted register" operand. If the value
388/// is not shifted, set the Shift operand to default of "LSL 0". The logical
389/// instructions allow the shifted register to be rotated, but the arithmetic
390/// instructions do not. The AllowROR parameter specifies whether ROR is
391/// supported.
392bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
393 SDValue &Reg, SDValue &Shift) {
394 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
395 if (ShType == AArch64_AM::InvalidShiftExtend)
396 return false;
397 if (!AllowROR && ShType == AArch64_AM::ROR)
398 return false;
399
400 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
Sanjay Patelb1f0a0f2016-09-14 16:05:51 +0000401 unsigned BitSize = N.getValueSizeInBits();
Tim Northover3b0846e2014-05-24 12:50:23 +0000402 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
403 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
404
405 Reg = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000406 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000407 return isWorthFolding(N);
408 }
409
410 return false;
411}
412
413/// getExtendTypeForNode - Translate an extend node to the corresponding
414/// ExtendType value.
415static AArch64_AM::ShiftExtendType
416getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
417 if (N.getOpcode() == ISD::SIGN_EXTEND ||
418 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
419 EVT SrcVT;
420 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
421 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
422 else
423 SrcVT = N.getOperand(0).getValueType();
424
425 if (!IsLoadStore && SrcVT == MVT::i8)
426 return AArch64_AM::SXTB;
427 else if (!IsLoadStore && SrcVT == MVT::i16)
428 return AArch64_AM::SXTH;
429 else if (SrcVT == MVT::i32)
430 return AArch64_AM::SXTW;
431 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
432
433 return AArch64_AM::InvalidShiftExtend;
434 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
435 N.getOpcode() == ISD::ANY_EXTEND) {
436 EVT SrcVT = N.getOperand(0).getValueType();
437 if (!IsLoadStore && SrcVT == MVT::i8)
438 return AArch64_AM::UXTB;
439 else if (!IsLoadStore && SrcVT == MVT::i16)
440 return AArch64_AM::UXTH;
441 else if (SrcVT == MVT::i32)
442 return AArch64_AM::UXTW;
443 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
444
445 return AArch64_AM::InvalidShiftExtend;
446 } else if (N.getOpcode() == ISD::AND) {
447 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
448 if (!CSD)
449 return AArch64_AM::InvalidShiftExtend;
450 uint64_t AndMask = CSD->getZExtValue();
451
452 switch (AndMask) {
453 default:
454 return AArch64_AM::InvalidShiftExtend;
455 case 0xFF:
456 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
457 case 0xFFFF:
458 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
459 case 0xFFFFFFFF:
460 return AArch64_AM::UXTW;
461 }
462 }
463
464 return AArch64_AM::InvalidShiftExtend;
465}
466
467// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
468static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
469 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
470 DL->getOpcode() != AArch64ISD::DUPLANE32)
471 return false;
472
473 SDValue SV = DL->getOperand(0);
474 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
475 return false;
476
477 SDValue EV = SV.getOperand(1);
478 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
479 return false;
480
481 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
482 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
483 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
484 LaneOp = EV.getOperand(0);
485
486 return true;
487}
488
Chad Rosier6c1f0932015-09-17 13:10:27 +0000489// Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
Tim Northover3b0846e2014-05-24 12:50:23 +0000490// high lane extract.
491static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
492 SDValue &LaneOp, int &LaneIdx) {
493
494 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
495 std::swap(Op0, Op1);
496 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
497 return false;
498 }
499 StdOp = Op1;
500 return true;
501}
502
503/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
504/// is a lane in the upper half of a 128-bit vector. Recognize and select this
505/// so that we don't emit unnecessary lane extracts.
Justin Bogner283e3bd2016-05-12 23:10:30 +0000506bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000507 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000508 SDValue Op0 = N->getOperand(0);
509 SDValue Op1 = N->getOperand(1);
510 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
511 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
512 int LaneIdx = -1; // Will hold the lane index.
513
514 if (Op1.getOpcode() != ISD::MUL ||
515 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
516 LaneIdx)) {
517 std::swap(Op0, Op1);
518 if (Op1.getOpcode() != ISD::MUL ||
519 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
520 LaneIdx))
Justin Bogner283e3bd2016-05-12 23:10:30 +0000521 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000522 }
523
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000524 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000525
526 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
527
528 unsigned MLAOpc = ~0U;
529
530 switch (N->getSimpleValueType(0).SimpleTy) {
531 default:
532 llvm_unreachable("Unrecognized MLA.");
533 case MVT::v4i16:
534 MLAOpc = AArch64::MLAv4i16_indexed;
535 break;
536 case MVT::v8i16:
537 MLAOpc = AArch64::MLAv8i16_indexed;
538 break;
539 case MVT::v2i32:
540 MLAOpc = AArch64::MLAv2i32_indexed;
541 break;
542 case MVT::v4i32:
543 MLAOpc = AArch64::MLAv4i32_indexed;
544 break;
545 }
546
Justin Bogner283e3bd2016-05-12 23:10:30 +0000547 ReplaceNode(N, CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops));
548 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000549}
550
Justin Bogner283e3bd2016-05-12 23:10:30 +0000551bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo, SDNode *N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000552 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000553 SDValue SMULLOp0;
554 SDValue SMULLOp1;
555 int LaneIdx;
556
557 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
558 LaneIdx))
Justin Bogner283e3bd2016-05-12 23:10:30 +0000559 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000560
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000561 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000562
563 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
564
565 unsigned SMULLOpc = ~0U;
566
567 if (IntNo == Intrinsic::aarch64_neon_smull) {
568 switch (N->getSimpleValueType(0).SimpleTy) {
569 default:
570 llvm_unreachable("Unrecognized SMULL.");
571 case MVT::v4i32:
572 SMULLOpc = AArch64::SMULLv4i16_indexed;
573 break;
574 case MVT::v2i64:
575 SMULLOpc = AArch64::SMULLv2i32_indexed;
576 break;
577 }
578 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
579 switch (N->getSimpleValueType(0).SimpleTy) {
580 default:
581 llvm_unreachable("Unrecognized SMULL.");
582 case MVT::v4i32:
583 SMULLOpc = AArch64::UMULLv4i16_indexed;
584 break;
585 case MVT::v2i64:
586 SMULLOpc = AArch64::UMULLv2i32_indexed;
587 break;
588 }
589 } else
590 llvm_unreachable("Unrecognized intrinsic.");
591
Justin Bogner283e3bd2016-05-12 23:10:30 +0000592 ReplaceNode(N, CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops));
593 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000594}
595
596/// Instructions that accept extend modifiers like UXTW expect the register
597/// being extended to be a GPR32, but the incoming DAG might be acting on a
598/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
599/// this is the case.
600static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
601 if (N.getValueType() == MVT::i32)
602 return N;
603
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000604 SDLoc dl(N);
605 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000606 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000607 dl, MVT::i32, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000608 return SDValue(Node, 0);
609}
610
611
612/// SelectArithExtendedRegister - Select a "extended register" operand. This
613/// operand folds in an extend followed by an optional left shift.
614bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
615 SDValue &Shift) {
616 unsigned ShiftVal = 0;
617 AArch64_AM::ShiftExtendType Ext;
618
619 if (N.getOpcode() == ISD::SHL) {
620 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
621 if (!CSD)
622 return false;
623 ShiftVal = CSD->getZExtValue();
624 if (ShiftVal > 4)
625 return false;
626
627 Ext = getExtendTypeForNode(N.getOperand(0));
628 if (Ext == AArch64_AM::InvalidShiftExtend)
629 return false;
630
631 Reg = N.getOperand(0).getOperand(0);
632 } else {
633 Ext = getExtendTypeForNode(N);
634 if (Ext == AArch64_AM::InvalidShiftExtend)
635 return false;
636
637 Reg = N.getOperand(0);
Geoff Berry256fcf92016-09-26 15:34:47 +0000638
639 // Don't match if free 32-bit -> 64-bit zext can be used instead.
640 if (Ext == AArch64_AM::UXTW &&
641 Reg->getValueType(0).getSizeInBits() == 32 && isDef32(*Reg.getNode()))
642 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000643 }
644
645 // AArch64 mandates that the RHS of the operation must use the smallest
Chad Rosier6c1f0932015-09-17 13:10:27 +0000646 // register class that could contain the size being extended from. Thus,
Tim Northover3b0846e2014-05-24 12:50:23 +0000647 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
648 // there might not be an actual 32-bit value in the program. We can
649 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
650 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
651 Reg = narrowIfNeeded(CurDAG, Reg);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000652 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
653 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000654 return isWorthFolding(N);
655}
656
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000657/// If there's a use of this ADDlow that's not itself a load/store then we'll
658/// need to create a real ADD instruction from it anyway and there's no point in
659/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
660/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
Chad Rosier6c1f0932015-09-17 13:10:27 +0000661/// leads to duplicated ADRP instructions.
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000662static bool isWorthFoldingADDlow(SDValue N) {
663 for (auto Use : N->uses()) {
664 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
665 Use->getOpcode() != ISD::ATOMIC_LOAD &&
666 Use->getOpcode() != ISD::ATOMIC_STORE)
667 return false;
668
669 // ldar and stlr have much more restrictive addressing modes (just a
670 // register).
JF Bastien800f87a2016-04-06 21:19:33 +0000671 if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getOrdering()))
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000672 return false;
673 }
674
675 return true;
676}
677
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000678/// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit
679/// immediate" address. The "Size" argument is the size in bytes of the memory
680/// reference, which determines the scale.
681bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size,
682 SDValue &Base,
683 SDValue &OffImm) {
684 SDLoc dl(N);
Ahmed Bougacha05541452015-09-10 01:54:43 +0000685 const DataLayout &DL = CurDAG->getDataLayout();
686 const TargetLowering *TLI = getTargetLowering();
687 if (N.getOpcode() == ISD::FrameIndex) {
688 int FI = cast<FrameIndexSDNode>(N)->getIndex();
689 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
690 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
691 return true;
692 }
693
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000694 // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed
695 // selected here doesn't support labels/immediates, only base+offset.
696
697 if (CurDAG->isBaseWithConstantOffset(N)) {
698 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
699 int64_t RHSC = RHS->getSExtValue();
700 unsigned Scale = Log2_32(Size);
Steven Wue3b1f2b2015-09-10 16:32:28 +0000701 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(0x40 << Scale) &&
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000702 RHSC < (0x40 << Scale)) {
703 Base = N.getOperand(0);
Ahmed Bougacha05541452015-09-10 01:54:43 +0000704 if (Base.getOpcode() == ISD::FrameIndex) {
705 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
706 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
707 }
Ahmed Bougachac0ac38d2015-09-10 01:48:29 +0000708 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
709 return true;
710 }
711 }
712 }
713
Ahmed Bougachab8886b52015-09-10 01:42:28 +0000714 // Base only. The address will be materialized into a register before
715 // the memory is accessed.
716 // add x0, Xbase, #offset
717 // stp x1, x2, [x0]
718 Base = N;
719 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
720 return true;
721}
722
Tim Northover3b0846e2014-05-24 12:50:23 +0000723/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
724/// immediate" address. The "Size" argument is the size in bytes of the memory
725/// reference, which determines the scale.
726bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
727 SDValue &Base, SDValue &OffImm) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000728 SDLoc dl(N);
Mehdi Amini44ede332015-07-09 02:09:04 +0000729 const DataLayout &DL = CurDAG->getDataLayout();
Tim Northover3b0846e2014-05-24 12:50:23 +0000730 const TargetLowering *TLI = getTargetLowering();
731 if (N.getOpcode() == ISD::FrameIndex) {
732 int FI = cast<FrameIndexSDNode>(N)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000733 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000734 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000735 return true;
736 }
737
Tim Northoverec7ebeb2014-12-02 23:13:39 +0000738 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000739 GlobalAddressSDNode *GAN =
740 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
741 Base = N.getOperand(0);
742 OffImm = N.getOperand(1);
743 if (!GAN)
744 return true;
745
746 const GlobalValue *GV = GAN->getGlobal();
747 unsigned Alignment = GV->getAlignment();
Manuel Jacob5f6eaac2016-01-16 20:30:46 +0000748 Type *Ty = GV->getValueType();
Tim Northover4a8ac262014-12-02 23:53:43 +0000749 if (Alignment == 0 && Ty->isSized())
Mehdi Amini44ede332015-07-09 02:09:04 +0000750 Alignment = DL.getABITypeAlignment(Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000751
752 if (Alignment >= Size)
753 return true;
754 }
755
756 if (CurDAG->isBaseWithConstantOffset(N)) {
757 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
758 int64_t RHSC = (int64_t)RHS->getZExtValue();
759 unsigned Scale = Log2_32(Size);
760 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
761 Base = N.getOperand(0);
762 if (Base.getOpcode() == ISD::FrameIndex) {
763 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Mehdi Amini44ede332015-07-09 02:09:04 +0000764 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
Tim Northover3b0846e2014-05-24 12:50:23 +0000765 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000766 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000767 return true;
768 }
769 }
770 }
771
772 // Before falling back to our general case, check if the unscaled
773 // instructions can handle this. If so, that's preferable.
774 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
775 return false;
776
777 // Base only. The address will be materialized into a register before
778 // the memory is accessed.
779 // add x0, Xbase, #offset
780 // ldr x0, [x0]
781 Base = N;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000782 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000783 return true;
784}
785
786/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
787/// immediate" address. This should only match when there is an offset that
788/// is not valid for a scaled immediate addressing mode. The "Size" argument
789/// is the size in bytes of the memory reference, which is needed here to know
790/// what is valid for a scaled immediate.
791bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
792 SDValue &Base,
793 SDValue &OffImm) {
794 if (!CurDAG->isBaseWithConstantOffset(N))
795 return false;
796 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
797 int64_t RHSC = RHS->getSExtValue();
798 // If the offset is valid as a scaled immediate, don't match here.
799 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
800 RHSC < (0x1000 << Log2_32(Size)))
801 return false;
802 if (RHSC >= -256 && RHSC < 256) {
803 Base = N.getOperand(0);
804 if (Base.getOpcode() == ISD::FrameIndex) {
805 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
806 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +0000807 Base = CurDAG->getTargetFrameIndex(
808 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Tim Northover3b0846e2014-05-24 12:50:23 +0000809 }
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000810 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +0000811 return true;
812 }
813 }
814 return false;
815}
816
817static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000818 SDLoc dl(N);
819 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000820 SDValue ImpDef = SDValue(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000821 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000822 MachineSDNode *Node = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000823 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
Tim Northover3b0846e2014-05-24 12:50:23 +0000824 return SDValue(Node, 0);
825}
826
827/// \brief Check if the given SHL node (\p N), can be used to form an
828/// extended register for an addressing mode.
829bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
830 bool WantExtend, SDValue &Offset,
831 SDValue &SignExtend) {
832 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
833 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
834 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
835 return false;
836
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000837 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000838 if (WantExtend) {
839 AArch64_AM::ShiftExtendType Ext =
840 getExtendTypeForNode(N.getOperand(0), true);
841 if (Ext == AArch64_AM::InvalidShiftExtend)
842 return false;
843
844 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000845 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
846 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000847 } else {
848 Offset = N.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000849 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000850 }
851
852 unsigned LegalShiftVal = Log2_32(Size);
853 unsigned ShiftVal = CSD->getZExtValue();
854
855 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
856 return false;
857
Eric Christopher114fa1c2016-02-29 22:50:49 +0000858 return isWorthFolding(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000859}
860
861bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
862 SDValue &Base, SDValue &Offset,
863 SDValue &SignExtend,
864 SDValue &DoShift) {
865 if (N.getOpcode() != ISD::ADD)
866 return false;
867 SDValue LHS = N.getOperand(0);
868 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000869 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000870
871 // We don't want to match immediate adds here, because they are better lowered
872 // to the register-immediate addressing modes.
873 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
874 return false;
875
876 // Check if this particular node is reused in any non-memory related
877 // operation. If yes, do not try to fold this node into the address
878 // computation, since the computation will be kept.
879 const SDNode *Node = N.getNode();
880 for (SDNode *UI : Node->uses()) {
881 if (!isa<MemSDNode>(*UI))
882 return false;
883 }
884
885 // Remember if it is worth folding N when it produces extended register.
886 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
887
888 // Try to match a shifted extend on the RHS.
889 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
890 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
891 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000892 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000893 return true;
894 }
895
896 // Try to match a shifted extend on the LHS.
897 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
898 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
899 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000900 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000901 return true;
902 }
903
904 // There was no shift, whatever else we find.
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000905 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000906
907 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
908 // Try to match an unshifted extend on the LHS.
909 if (IsExtendedRegisterWorthFolding &&
910 (Ext = getExtendTypeForNode(LHS, true)) !=
911 AArch64_AM::InvalidShiftExtend) {
912 Base = RHS;
913 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000914 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
915 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000916 if (isWorthFolding(LHS))
917 return true;
918 }
919
920 // Try to match an unshifted extend on the RHS.
921 if (IsExtendedRegisterWorthFolding &&
922 (Ext = getExtendTypeForNode(RHS, true)) !=
923 AArch64_AM::InvalidShiftExtend) {
924 Base = LHS;
925 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000926 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
927 MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000928 if (isWorthFolding(RHS))
929 return true;
930 }
931
932 return false;
933}
934
Hao Liu3cb826c2014-10-14 06:50:36 +0000935// Check if the given immediate is preferred by ADD. If an immediate can be
936// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
937// encoded by one MOVZ, return true.
938static bool isPreferredADD(int64_t ImmOff) {
939 // Constant in [0x0, 0xfff] can be encoded in ADD.
940 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
941 return true;
942 // Check if it can be encoded in an "ADD LSL #12".
943 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
944 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
945 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
946 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
947 return false;
948}
949
Tim Northover3b0846e2014-05-24 12:50:23 +0000950bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
951 SDValue &Base, SDValue &Offset,
952 SDValue &SignExtend,
953 SDValue &DoShift) {
954 if (N.getOpcode() != ISD::ADD)
955 return false;
956 SDValue LHS = N.getOperand(0);
957 SDValue RHS = N.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000958 SDLoc DL(N);
Tim Northover3b0846e2014-05-24 12:50:23 +0000959
Tim Northover3b0846e2014-05-24 12:50:23 +0000960 // Check if this particular node is reused in any non-memory related
961 // operation. If yes, do not try to fold this node into the address
962 // computation, since the computation will be kept.
963 const SDNode *Node = N.getNode();
964 for (SDNode *UI : Node->uses()) {
965 if (!isa<MemSDNode>(*UI))
966 return false;
967 }
968
Hao Liu3cb826c2014-10-14 06:50:36 +0000969 // Watch out if RHS is a wide immediate, it can not be selected into
970 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
971 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
972 // instructions like:
973 // MOV X0, WideImmediate
974 // ADD X1, BaseReg, X0
975 // LDR X2, [X1, 0]
976 // For such situation, using [BaseReg, XReg] addressing mode can save one
977 // ADD/SUB:
978 // MOV X0, WideImmediate
979 // LDR X2, [BaseReg, X0]
980 if (isa<ConstantSDNode>(RHS)) {
Benjamin Kramer619c4e52015-04-10 11:24:51 +0000981 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
Hao Liu3cb826c2014-10-14 06:50:36 +0000982 unsigned Scale = Log2_32(Size);
Chad Rosier6c1f0932015-09-17 13:10:27 +0000983 // Skip the immediate can be selected by load/store addressing mode.
Hao Liu3cb826c2014-10-14 06:50:36 +0000984 // Also skip the immediate can be encoded by a single ADD (SUB is also
985 // checked by using -ImmOff).
986 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
987 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
988 return false;
989
Hao Liu3cb826c2014-10-14 06:50:36 +0000990 SDValue Ops[] = { RHS };
991 SDNode *MOVI =
992 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
993 SDValue MOVIV = SDValue(MOVI, 0);
994 // This ADD of two X register will be selected into [Reg+Reg] mode.
995 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
996 }
997
Tim Northover3b0846e2014-05-24 12:50:23 +0000998 // Remember if it is worth folding N when it produces extended register.
999 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
1000
1001 // Try to match a shifted extend on the RHS.
1002 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1003 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
1004 Base = LHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001005 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001006 return true;
1007 }
1008
1009 // Try to match a shifted extend on the LHS.
1010 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1011 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
1012 Base = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001013 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001014 return true;
1015 }
1016
1017 // Match any non-shifted, non-extend, non-immediate add expression.
1018 Base = LHS;
1019 Offset = RHS;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001020 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
1021 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001022 // Reg1 + Reg2 is free: no check needed.
1023 return true;
1024}
1025
1026SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +00001027 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +00001028 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +00001029 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
1030 AArch64::dsub2, AArch64::dsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +00001031
1032 return createTuple(Regs, RegClassIDs, SubRegs);
1033}
1034
1035SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
Benjamin Kramerea68a942015-02-19 15:26:17 +00001036 static const unsigned RegClassIDs[] = {
Tim Northover3b0846e2014-05-24 12:50:23 +00001037 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
Benjamin Kramerea68a942015-02-19 15:26:17 +00001038 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
1039 AArch64::qsub2, AArch64::qsub3};
Tim Northover3b0846e2014-05-24 12:50:23 +00001040
1041 return createTuple(Regs, RegClassIDs, SubRegs);
1042}
1043
1044SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
Benjamin Kramerea68a942015-02-19 15:26:17 +00001045 const unsigned RegClassIDs[],
1046 const unsigned SubRegs[]) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001047 // There's no special register-class for a vector-list of 1 element: it's just
1048 // a vector.
1049 if (Regs.size() == 1)
1050 return Regs[0];
1051
1052 assert(Regs.size() >= 2 && Regs.size() <= 4);
1053
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001054 SDLoc DL(Regs[0]);
Tim Northover3b0846e2014-05-24 12:50:23 +00001055
1056 SmallVector<SDValue, 4> Ops;
1057
1058 // First operand of REG_SEQUENCE is the desired RegClass.
1059 Ops.push_back(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001060 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +00001061
1062 // Then we get pairs of source & subregister-position for the components.
1063 for (unsigned i = 0; i < Regs.size(); ++i) {
1064 Ops.push_back(Regs[i]);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001065 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
Tim Northover3b0846e2014-05-24 12:50:23 +00001066 }
1067
1068 SDNode *N =
1069 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1070 return SDValue(N, 0);
1071}
1072
Justin Bogner283e3bd2016-05-12 23:10:30 +00001073void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1074 bool isExt) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001075 SDLoc dl(N);
1076 EVT VT = N->getValueType(0);
1077
1078 unsigned ExtOff = isExt;
1079
1080 // Form a REG_SEQUENCE to force register allocation.
1081 unsigned Vec0Off = ExtOff + 1;
1082 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1083 N->op_begin() + Vec0Off + NumVecs);
1084 SDValue RegSeq = createQTuple(Regs);
1085
1086 SmallVector<SDValue, 6> Ops;
1087 if (isExt)
1088 Ops.push_back(N->getOperand(1));
1089 Ops.push_back(RegSeq);
1090 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
Justin Bogner283e3bd2016-05-12 23:10:30 +00001091 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
Tim Northover3b0846e2014-05-24 12:50:23 +00001092}
1093
Justin Bogner283e3bd2016-05-12 23:10:30 +00001094bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001095 LoadSDNode *LD = cast<LoadSDNode>(N);
1096 if (LD->isUnindexed())
Justin Bogner283e3bd2016-05-12 23:10:30 +00001097 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001098 EVT VT = LD->getMemoryVT();
1099 EVT DstVT = N->getValueType(0);
1100 ISD::MemIndexedMode AM = LD->getAddressingMode();
1101 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1102
1103 // We're not doing validity checking here. That was done when checking
1104 // if we should mark the load as indexed or not. We're just selecting
1105 // the right instruction.
1106 unsigned Opcode = 0;
1107
1108 ISD::LoadExtType ExtType = LD->getExtensionType();
1109 bool InsertTo64 = false;
1110 if (VT == MVT::i64)
1111 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1112 else if (VT == MVT::i32) {
1113 if (ExtType == ISD::NON_EXTLOAD)
1114 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1115 else if (ExtType == ISD::SEXTLOAD)
1116 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1117 else {
1118 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1119 InsertTo64 = true;
1120 // The result of the load is only i32. It's the subreg_to_reg that makes
1121 // it into an i64.
1122 DstVT = MVT::i32;
1123 }
1124 } else if (VT == MVT::i16) {
1125 if (ExtType == ISD::SEXTLOAD) {
1126 if (DstVT == MVT::i64)
1127 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1128 else
1129 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1130 } else {
1131 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1132 InsertTo64 = DstVT == MVT::i64;
1133 // The result of the load is only i32. It's the subreg_to_reg that makes
1134 // it into an i64.
1135 DstVT = MVT::i32;
1136 }
1137 } else if (VT == MVT::i8) {
1138 if (ExtType == ISD::SEXTLOAD) {
1139 if (DstVT == MVT::i64)
1140 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1141 else
1142 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1143 } else {
1144 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1145 InsertTo64 = DstVT == MVT::i64;
1146 // The result of the load is only i32. It's the subreg_to_reg that makes
1147 // it into an i64.
1148 DstVT = MVT::i32;
1149 }
Ahmed Bougachae0e12db2015-08-04 01:29:38 +00001150 } else if (VT == MVT::f16) {
1151 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
Tim Northover3b0846e2014-05-24 12:50:23 +00001152 } else if (VT == MVT::f32) {
1153 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1154 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1155 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1156 } else if (VT.is128BitVector()) {
1157 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1158 } else
Justin Bogner283e3bd2016-05-12 23:10:30 +00001159 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001160 SDValue Chain = LD->getChain();
1161 SDValue Base = LD->getBasePtr();
1162 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1163 int OffsetVal = (int)OffsetOp->getZExtValue();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001164 SDLoc dl(N);
1165 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
Tim Northover3b0846e2014-05-24 12:50:23 +00001166 SDValue Ops[] = { Base, Offset, Chain };
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001167 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
Tim Northover3b0846e2014-05-24 12:50:23 +00001168 MVT::Other, Ops);
1169 // Either way, we're replacing the node, so tell the caller that.
Tim Northover3b0846e2014-05-24 12:50:23 +00001170 SDValue LoadedVal = SDValue(Res, 1);
1171 if (InsertTo64) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001172 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001173 LoadedVal =
1174 SDValue(CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001175 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1176 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1177 SubReg),
Tim Northover3b0846e2014-05-24 12:50:23 +00001178 0);
1179 }
1180
1181 ReplaceUses(SDValue(N, 0), LoadedVal);
1182 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1183 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00001184 CurDAG->RemoveDeadNode(N);
Justin Bogner283e3bd2016-05-12 23:10:30 +00001185 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001186}
1187
Justin Bogner283e3bd2016-05-12 23:10:30 +00001188void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1189 unsigned SubRegIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001190 SDLoc dl(N);
1191 EVT VT = N->getValueType(0);
1192 SDValue Chain = N->getOperand(0);
1193
Benjamin Kramerea68a942015-02-19 15:26:17 +00001194 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1195 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001196
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001197 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001198
1199 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1200 SDValue SuperReg = SDValue(Ld, 0);
1201 for (unsigned i = 0; i < NumVecs; ++i)
1202 ReplaceUses(SDValue(N, i),
1203 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1204
1205 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
Sanjin Sijaric6f020d92016-11-07 22:39:02 +00001206
1207 // Transfer memoperands.
1208 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1209 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1210 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
1211
Justin Bogner3525da72016-05-12 20:54:27 +00001212 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001213}
1214
Justin Bogner283e3bd2016-05-12 23:10:30 +00001215void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1216 unsigned Opc, unsigned SubRegIdx) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001217 SDLoc dl(N);
1218 EVT VT = N->getValueType(0);
1219 SDValue Chain = N->getOperand(0);
1220
Benjamin Kramerea68a942015-02-19 15:26:17 +00001221 SDValue Ops[] = {N->getOperand(1), // Mem operand
1222 N->getOperand(2), // Incremental
1223 Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00001224
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001225 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1226 MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001227
1228 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1229
1230 // Update uses of write back register
1231 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1232
1233 // Update uses of vector list
1234 SDValue SuperReg = SDValue(Ld, 1);
1235 if (NumVecs == 1)
1236 ReplaceUses(SDValue(N, 0), SuperReg);
1237 else
1238 for (unsigned i = 0; i < NumVecs; ++i)
1239 ReplaceUses(SDValue(N, i),
1240 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1241
1242 // Update the chain
1243 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00001244 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001245}
1246
Justin Bogner283e3bd2016-05-12 23:10:30 +00001247void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1248 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001249 SDLoc dl(N);
1250 EVT VT = N->getOperand(2)->getValueType(0);
1251
1252 // Form a REG_SEQUENCE to force register allocation.
1253 bool Is128Bit = VT.getSizeInBits() == 128;
1254 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1255 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1256
Benjamin Kramerea68a942015-02-19 15:26:17 +00001257 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001258 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1259
Sanjin Sijaric6f020d92016-11-07 22:39:02 +00001260 // Transfer memoperands.
1261 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1262 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1263 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1264
Justin Bogner283e3bd2016-05-12 23:10:30 +00001265 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001266}
1267
Justin Bogner283e3bd2016-05-12 23:10:30 +00001268void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1269 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001270 SDLoc dl(N);
1271 EVT VT = N->getOperand(2)->getValueType(0);
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001272 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1273 MVT::Other}; // Type for the Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001274
1275 // Form a REG_SEQUENCE to force register allocation.
1276 bool Is128Bit = VT.getSizeInBits() == 128;
1277 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1278 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1279
Benjamin Kramerea68a942015-02-19 15:26:17 +00001280 SDValue Ops[] = {RegSeq,
1281 N->getOperand(NumVecs + 1), // base register
1282 N->getOperand(NumVecs + 2), // Incremental
1283 N->getOperand(0)}; // Chain
Tim Northover3b0846e2014-05-24 12:50:23 +00001284 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1285
Justin Bogner283e3bd2016-05-12 23:10:30 +00001286 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001287}
1288
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001289namespace {
Tim Northover3b0846e2014-05-24 12:50:23 +00001290/// WidenVector - Given a value in the V64 register class, produce the
1291/// equivalent value in the V128 register class.
1292class WidenVector {
1293 SelectionDAG &DAG;
1294
1295public:
1296 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1297
1298 SDValue operator()(SDValue V64Reg) {
1299 EVT VT = V64Reg.getValueType();
1300 unsigned NarrowSize = VT.getVectorNumElements();
1301 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1302 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1303 SDLoc DL(V64Reg);
1304
1305 SDValue Undef =
1306 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1307 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1308 }
1309};
Benjamin Kramer51f6096c2015-03-23 12:30:58 +00001310} // namespace
Tim Northover3b0846e2014-05-24 12:50:23 +00001311
1312/// NarrowVector - Given a value in the V128 register class, produce the
1313/// equivalent value in the V64 register class.
1314static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1315 EVT VT = V128Reg.getValueType();
1316 unsigned WideSize = VT.getVectorNumElements();
1317 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1318 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1319
1320 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1321 V128Reg);
1322}
1323
Justin Bogner283e3bd2016-05-12 23:10:30 +00001324void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1325 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001326 SDLoc dl(N);
1327 EVT VT = N->getValueType(0);
1328 bool Narrow = VT.getSizeInBits() == 64;
1329
1330 // Form a REG_SEQUENCE to force register allocation.
1331 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1332
1333 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001334 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001335 WidenVector(*CurDAG));
1336
1337 SDValue RegSeq = createQTuple(Regs);
1338
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001339 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001340
1341 unsigned LaneNo =
1342 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1343
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001344 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001345 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001346 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1347 SDValue SuperReg = SDValue(Ld, 0);
1348
1349 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
Craig Topper26260942015-10-18 05:15:34 +00001350 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1351 AArch64::qsub2, AArch64::qsub3 };
Tim Northover3b0846e2014-05-24 12:50:23 +00001352 for (unsigned i = 0; i < NumVecs; ++i) {
1353 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1354 if (Narrow)
1355 NV = NarrowVector(NV, *CurDAG);
1356 ReplaceUses(SDValue(N, i), NV);
1357 }
1358
1359 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
Justin Bogner3525da72016-05-12 20:54:27 +00001360 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001361}
1362
Justin Bogner283e3bd2016-05-12 23:10:30 +00001363void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1364 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001365 SDLoc dl(N);
1366 EVT VT = N->getValueType(0);
1367 bool Narrow = VT.getSizeInBits() == 64;
1368
1369 // Form a REG_SEQUENCE to force register allocation.
1370 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1371
1372 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001373 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001374 WidenVector(*CurDAG));
1375
1376 SDValue RegSeq = createQTuple(Regs);
1377
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001378 const EVT ResTys[] = {MVT::i64, // Type of the write back register
Ahmed Bougachae14a4d42015-04-17 23:43:33 +00001379 RegSeq->getValueType(0), MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001380
1381 unsigned LaneNo =
1382 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1383
Benjamin Kramerea68a942015-02-19 15:26:17 +00001384 SDValue Ops[] = {RegSeq,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001385 CurDAG->getTargetConstant(LaneNo, dl,
1386 MVT::i64), // Lane Number
Benjamin Kramerea68a942015-02-19 15:26:17 +00001387 N->getOperand(NumVecs + 2), // Base register
1388 N->getOperand(NumVecs + 3), // Incremental
1389 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001390 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1391
1392 // Update uses of the write back register
1393 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1394
1395 // Update uses of the vector list
1396 SDValue SuperReg = SDValue(Ld, 1);
1397 if (NumVecs == 1) {
1398 ReplaceUses(SDValue(N, 0),
1399 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1400 } else {
1401 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
Craig Topper26260942015-10-18 05:15:34 +00001402 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1403 AArch64::qsub2, AArch64::qsub3 };
Tim Northover3b0846e2014-05-24 12:50:23 +00001404 for (unsigned i = 0; i < NumVecs; ++i) {
1405 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1406 SuperReg);
1407 if (Narrow)
1408 NV = NarrowVector(NV, *CurDAG);
1409 ReplaceUses(SDValue(N, i), NV);
1410 }
1411 }
1412
1413 // Update the Chain
1414 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00001415 CurDAG->RemoveDeadNode(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001416}
1417
Justin Bogner283e3bd2016-05-12 23:10:30 +00001418void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1419 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001420 SDLoc dl(N);
1421 EVT VT = N->getOperand(2)->getValueType(0);
1422 bool Narrow = VT.getSizeInBits() == 64;
1423
1424 // Form a REG_SEQUENCE to force register allocation.
1425 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1426
1427 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001428 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001429 WidenVector(*CurDAG));
1430
1431 SDValue RegSeq = createQTuple(Regs);
1432
1433 unsigned LaneNo =
1434 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1435
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001436 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001437 N->getOperand(NumVecs + 3), N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001438 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1439
1440 // Transfer memoperands.
1441 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1442 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1443 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1444
Justin Bogner283e3bd2016-05-12 23:10:30 +00001445 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001446}
1447
Justin Bogner283e3bd2016-05-12 23:10:30 +00001448void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1449 unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001450 SDLoc dl(N);
1451 EVT VT = N->getOperand(2)->getValueType(0);
1452 bool Narrow = VT.getSizeInBits() == 64;
1453
1454 // Form a REG_SEQUENCE to force register allocation.
1455 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1456
1457 if (Narrow)
David Majnemer2d006e72016-08-12 04:32:42 +00001458 transform(Regs, Regs.begin(),
Tim Northover3b0846e2014-05-24 12:50:23 +00001459 WidenVector(*CurDAG));
1460
1461 SDValue RegSeq = createQTuple(Regs);
1462
Benjamin Kramer867bfc52015-03-07 17:41:00 +00001463 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1464 MVT::Other};
Tim Northover3b0846e2014-05-24 12:50:23 +00001465
1466 unsigned LaneNo =
1467 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1468
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001469 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
Benjamin Kramerea68a942015-02-19 15:26:17 +00001470 N->getOperand(NumVecs + 2), // Base Register
1471 N->getOperand(NumVecs + 3), // Incremental
1472 N->getOperand(0)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001473 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1474
1475 // Transfer memoperands.
1476 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1477 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1478 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1479
Justin Bogner283e3bd2016-05-12 23:10:30 +00001480 ReplaceNode(N, St);
Tim Northover3b0846e2014-05-24 12:50:23 +00001481}
1482
1483static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1484 unsigned &Opc, SDValue &Opd0,
1485 unsigned &LSB, unsigned &MSB,
1486 unsigned NumberOfIgnoredLowBits,
1487 bool BiggerPattern) {
1488 assert(N->getOpcode() == ISD::AND &&
1489 "N must be a AND operation to call this function");
1490
1491 EVT VT = N->getValueType(0);
1492
1493 // Here we can test the type of VT and return false when the type does not
1494 // match, but since it is done prior to that call in the current context
1495 // we turned that into an assert to avoid redundant code.
1496 assert((VT == MVT::i32 || VT == MVT::i64) &&
1497 "Type checking must have been done before calling this function");
1498
1499 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1500 // changed the AND node to a 32-bit mask operation. We'll have to
1501 // undo that as part of the transform here if we want to catch all
1502 // the opportunities.
1503 // Currently the NumberOfIgnoredLowBits argument helps to recover
1504 // form these situations when matching bigger pattern (bitfield insert).
1505
1506 // For unsigned extracts, check for a shift right and mask
Chad Rosier7e8dd512016-05-14 18:56:28 +00001507 uint64_t AndImm = 0;
1508 if (!isOpcWithIntImmediate(N, ISD::AND, AndImm))
Tim Northover3b0846e2014-05-24 12:50:23 +00001509 return false;
1510
1511 const SDNode *Op0 = N->getOperand(0).getNode();
1512
1513 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1514 // simplified. Try to undo that
Chad Rosier7e8dd512016-05-14 18:56:28 +00001515 AndImm |= (1 << NumberOfIgnoredLowBits) - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001516
1517 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
Chad Rosier7e8dd512016-05-14 18:56:28 +00001518 if (AndImm & (AndImm + 1))
Tim Northover3b0846e2014-05-24 12:50:23 +00001519 return false;
1520
1521 bool ClampMSB = false;
Chad Rosier7e8dd512016-05-14 18:56:28 +00001522 uint64_t SrlImm = 0;
Tim Northover3b0846e2014-05-24 12:50:23 +00001523 // Handle the SRL + ANY_EXTEND case.
1524 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
Chad Rosier7e8dd512016-05-14 18:56:28 +00001525 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001526 // Extend the incoming operand of the SRL to 64-bit.
1527 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1528 // Make sure to clamp the MSB so that we preserve the semantics of the
1529 // original operations.
1530 ClampMSB = true;
1531 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1532 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
Chad Rosier7e8dd512016-05-14 18:56:28 +00001533 SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001534 // If the shift result was truncated, we can still combine them.
1535 Opd0 = Op0->getOperand(0).getOperand(0);
1536
1537 // Use the type of SRL node.
1538 VT = Opd0->getValueType(0);
Chad Rosier7e8dd512016-05-14 18:56:28 +00001539 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001540 Opd0 = Op0->getOperand(0);
1541 } else if (BiggerPattern) {
1542 // Let's pretend a 0 shift right has been performed.
1543 // The resulting code will be at least as good as the original one
1544 // plus it may expose more opportunities for bitfield insert pattern.
1545 // FIXME: Currently we limit this to the bigger pattern, because
Chad Rosier6c1f0932015-09-17 13:10:27 +00001546 // some optimizations expect AND and not UBFM.
Tim Northover3b0846e2014-05-24 12:50:23 +00001547 Opd0 = N->getOperand(0);
1548 } else
1549 return false;
1550
Matthias Braun75260352015-02-24 18:52:04 +00001551 // Bail out on large immediates. This happens when no proper
1552 // combining/constant folding was performed.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001553 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
Matthias Braun02892ec2015-02-25 18:03:50 +00001554 DEBUG((dbgs() << N
1555 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001556 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001557 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001558
Chad Rosier7e8dd512016-05-14 18:56:28 +00001559 LSB = SrlImm;
1560 MSB = SrlImm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(AndImm)
1561 : countTrailingOnes<uint64_t>(AndImm)) -
Tim Northover3b0846e2014-05-24 12:50:23 +00001562 1;
1563 if (ClampMSB)
1564 // Since we're moving the extend before the right shift operation, we need
1565 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1566 // the zeros which would get shifted in with the original right shift
1567 // operation.
1568 MSB = MSB > 31 ? 31 : MSB;
1569
1570 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1571 return true;
1572}
1573
Chad Rosier2d658702016-06-03 15:00:09 +00001574static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
1575 SDValue &Opd0, unsigned &Immr,
1576 unsigned &Imms) {
1577 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
1578
1579 EVT VT = N->getValueType(0);
1580 unsigned BitWidth = VT.getSizeInBits();
1581 assert((VT == MVT::i32 || VT == MVT::i64) &&
1582 "Type checking must have been done before calling this function");
1583
1584 SDValue Op = N->getOperand(0);
1585 if (Op->getOpcode() == ISD::TRUNCATE) {
1586 Op = Op->getOperand(0);
1587 VT = Op->getValueType(0);
1588 BitWidth = VT.getSizeInBits();
1589 }
1590
1591 uint64_t ShiftImm;
1592 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) &&
1593 !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
1594 return false;
1595
1596 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
1597 if (ShiftImm + Width > BitWidth)
1598 return false;
1599
1600 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
1601 Opd0 = Op.getOperand(0);
1602 Immr = ShiftImm;
1603 Imms = ShiftImm + Width - 1;
1604 return true;
1605}
1606
David Xu052b9d92014-09-02 09:33:56 +00001607static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1608 SDValue &Opd0, unsigned &LSB,
1609 unsigned &MSB) {
1610 // We are looking for the following pattern which basically extracts several
1611 // continuous bits from the source value and places it from the LSB of the
1612 // destination value, all other bits of the destination value or set to zero:
Tim Northover3b0846e2014-05-24 12:50:23 +00001613 //
1614 // Value2 = AND Value, MaskImm
1615 // SRL Value2, ShiftImm
1616 //
David Xu052b9d92014-09-02 09:33:56 +00001617 // with MaskImm >> ShiftImm to search for the bit width.
Tim Northover3b0846e2014-05-24 12:50:23 +00001618 //
1619 // This gets selected into a single UBFM:
1620 //
Chad Rosier7e8dd512016-05-14 18:56:28 +00001621 // UBFM Value, ShiftImm, BitWide + SrlImm -1
Tim Northover3b0846e2014-05-24 12:50:23 +00001622 //
1623
1624 if (N->getOpcode() != ISD::SRL)
1625 return false;
1626
Chad Rosier7e8dd512016-05-14 18:56:28 +00001627 uint64_t AndMask = 0;
1628 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask))
Tim Northover3b0846e2014-05-24 12:50:23 +00001629 return false;
1630
1631 Opd0 = N->getOperand(0).getOperand(0);
1632
Chad Rosier7e8dd512016-05-14 18:56:28 +00001633 uint64_t SrlImm = 0;
1634 if (!isIntImmediate(N->getOperand(1), SrlImm))
Tim Northover3b0846e2014-05-24 12:50:23 +00001635 return false;
1636
David Xu052b9d92014-09-02 09:33:56 +00001637 // Check whether we really have several bits extract here.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001638 unsigned BitWide = 64 - countLeadingOnes(~(AndMask >> SrlImm));
1639 if (BitWide && isMask_64(AndMask >> SrlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001640 if (N->getValueType(0) == MVT::i32)
1641 Opc = AArch64::UBFMWri;
1642 else
1643 Opc = AArch64::UBFMXri;
1644
Chad Rosier7e8dd512016-05-14 18:56:28 +00001645 LSB = SrlImm;
1646 MSB = BitWide + SrlImm - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001647 return true;
1648 }
1649
1650 return false;
1651}
1652
1653static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001654 unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001655 bool BiggerPattern) {
1656 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1657 "N must be a SHR/SRA operation to call this function");
1658
1659 EVT VT = N->getValueType(0);
1660
1661 // Here we can test the type of VT and return false when the type does not
1662 // match, but since it is done prior to that call in the current context
1663 // we turned that into an assert to avoid redundant code.
1664 assert((VT == MVT::i32 || VT == MVT::i64) &&
1665 "Type checking must have been done before calling this function");
1666
David Xu052b9d92014-09-02 09:33:56 +00001667 // Check for AND + SRL doing several bits extract.
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001668 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
Tim Northover3b0846e2014-05-24 12:50:23 +00001669 return true;
1670
Chad Rosierc73d5592016-05-16 12:55:01 +00001671 // We're looking for a shift of a shift.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001672 uint64_t ShlImm = 0;
1673 uint64_t TruncBits = 0;
1674 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00001675 Opd0 = N->getOperand(0).getOperand(0);
1676 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1677 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1678 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1679 // be considered as setting high 32 bits as zero. Our strategy here is to
1680 // always generate 64bit UBFM. This consistency will help the CSE pass
1681 // later find more redundancy.
1682 Opd0 = N->getOperand(0).getOperand(0);
Chad Rosier7e8dd512016-05-14 18:56:28 +00001683 TruncBits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
Tim Northover3b0846e2014-05-24 12:50:23 +00001684 VT = Opd0->getValueType(0);
1685 assert(VT == MVT::i64 && "the promoted type should be i64");
1686 } else if (BiggerPattern) {
1687 // Let's pretend a 0 shift left has been performed.
1688 // FIXME: Currently we limit this to the bigger pattern case,
1689 // because some optimizations expect AND and not UBFM
1690 Opd0 = N->getOperand(0);
1691 } else
1692 return false;
1693
Matthias Braun75260352015-02-24 18:52:04 +00001694 // Missing combines/constant folding may have left us with strange
1695 // constants.
Chad Rosier7e8dd512016-05-14 18:56:28 +00001696 if (ShlImm >= VT.getSizeInBits()) {
Matthias Braun02892ec2015-02-25 18:03:50 +00001697 DEBUG((dbgs() << N
1698 << ": Found large shift immediate, this should not happen\n"));
Matthias Braun75260352015-02-24 18:52:04 +00001699 return false;
Matthias Braun02892ec2015-02-25 18:03:50 +00001700 }
Matthias Braun75260352015-02-24 18:52:04 +00001701
Chad Rosier7e8dd512016-05-14 18:56:28 +00001702 uint64_t SrlImm = 0;
1703 if (!isIntImmediate(N->getOperand(1), SrlImm))
Tim Northover3b0846e2014-05-24 12:50:23 +00001704 return false;
1705
Chad Rosier7e8dd512016-05-14 18:56:28 +00001706 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00001707 "bad amount in shift node!");
Chad Rosier7e8dd512016-05-14 18:56:28 +00001708 int immr = SrlImm - ShlImm;
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001709 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
Chad Rosier7e8dd512016-05-14 18:56:28 +00001710 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001711 // SRA requires a signed extraction
1712 if (VT == MVT::i32)
1713 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1714 else
1715 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1716 return true;
1717}
1718
Chad Rosierbe879ea2016-06-03 20:05:49 +00001719bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
1720 assert(N->getOpcode() == ISD::SIGN_EXTEND);
1721
1722 EVT VT = N->getValueType(0);
1723 EVT NarrowVT = N->getOperand(0)->getValueType(0);
1724 if (VT != MVT::i64 || NarrowVT != MVT::i32)
1725 return false;
1726
1727 uint64_t ShiftImm;
1728 SDValue Op = N->getOperand(0);
1729 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
1730 return false;
1731
1732 SDLoc dl(N);
1733 // Extend the incoming operand of the shift to 64-bits.
1734 SDValue Opd0 = Widen(CurDAG, Op.getOperand(0));
1735 unsigned Immr = ShiftImm;
1736 unsigned Imms = NarrowVT.getSizeInBits() - 1;
1737 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1738 CurDAG->getTargetConstant(Imms, dl, VT)};
1739 CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops);
1740 return true;
1741}
1742
Tim Northover3b0846e2014-05-24 12:50:23 +00001743static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001744 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001745 unsigned NumberOfIgnoredLowBits = 0,
1746 bool BiggerPattern = false) {
1747 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1748 return false;
1749
1750 switch (N->getOpcode()) {
1751 default:
1752 if (!N->isMachineOpcode())
1753 return false;
1754 break;
1755 case ISD::AND:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001756 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
Tim Northover3b0846e2014-05-24 12:50:23 +00001757 NumberOfIgnoredLowBits, BiggerPattern);
1758 case ISD::SRL:
1759 case ISD::SRA:
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001760 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
Chad Rosier2d658702016-06-03 15:00:09 +00001761
1762 case ISD::SIGN_EXTEND_INREG:
1763 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
Tim Northover3b0846e2014-05-24 12:50:23 +00001764 }
1765
1766 unsigned NOpc = N->getMachineOpcode();
1767 switch (NOpc) {
1768 default:
1769 return false;
1770 case AArch64::SBFMWri:
1771 case AArch64::UBFMWri:
1772 case AArch64::SBFMXri:
1773 case AArch64::UBFMXri:
1774 Opc = NOpc;
1775 Opd0 = N->getOperand(0);
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001776 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1777 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
Tim Northover3b0846e2014-05-24 12:50:23 +00001778 return true;
1779 }
1780 // Unreachable
1781 return false;
1782}
1783
Justin Bogner283e3bd2016-05-12 23:10:30 +00001784bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001785 unsigned Opc, Immr, Imms;
Tim Northover3b0846e2014-05-24 12:50:23 +00001786 SDValue Opd0;
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001787 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
Justin Bogner283e3bd2016-05-12 23:10:30 +00001788 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001789
1790 EVT VT = N->getValueType(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001791 SDLoc dl(N);
Tim Northover3b0846e2014-05-24 12:50:23 +00001792
1793 // If the bit extract operation is 64bit but the original type is 32bit, we
1794 // need to add one EXTRACT_SUBREG.
1795 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001796 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
1797 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
Tim Northover3b0846e2014-05-24 12:50:23 +00001798
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001799 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1800 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
Justin Bogner283e3bd2016-05-12 23:10:30 +00001801 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
1802 MVT::i32, SDValue(BFM, 0), SubReg));
1803 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001804 }
1805
Arnaud A. de Grandmaisonf40f99e2015-07-09 14:33:38 +00001806 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1807 CurDAG->getTargetConstant(Imms, dl, VT)};
Justin Bogner283e3bd2016-05-12 23:10:30 +00001808 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1809 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001810}
1811
1812/// Does DstMask form a complementary pair with the mask provided by
1813/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1814/// this asks whether DstMask zeroes precisely those bits that will be set by
1815/// the other half.
Benjamin Kramerc321e532016-06-08 19:09:22 +00001816static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
Tim Northover3b0846e2014-05-24 12:50:23 +00001817 unsigned NumberOfIgnoredHighBits, EVT VT) {
1818 assert((VT == MVT::i32 || VT == MVT::i64) &&
1819 "i32 or i64 mask type expected!");
1820 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1821
1822 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1823 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1824
1825 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1826 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1827}
1828
1829// Look for bits that will be useful for later uses.
1830// A bit is consider useless as soon as it is dropped and never used
1831// before it as been dropped.
1832// E.g., looking for useful bit of x
1833// 1. y = x & 0x7
1834// 2. z = y >> 2
1835// After #1, x useful bits are 0x7, then the useful bits of x, live through
1836// y.
1837// After #2, the useful bits of x are 0x4.
1838// However, if x is used on an unpredicatable instruction, then all its bits
1839// are useful.
1840// E.g.
1841// 1. y = x & 0x7
1842// 2. z = y >> 2
1843// 3. str x, [@x]
1844static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1845
1846static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1847 unsigned Depth) {
1848 uint64_t Imm =
1849 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1850 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1851 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1852 getUsefulBits(Op, UsefulBits, Depth + 1);
1853}
1854
1855static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1856 uint64_t Imm, uint64_t MSB,
1857 unsigned Depth) {
1858 // inherit the bitwidth value
1859 APInt OpUsefulBits(UsefulBits);
1860 OpUsefulBits = 1;
1861
1862 if (MSB >= Imm) {
Craig Topper24e71012017-04-28 03:36:24 +00001863 OpUsefulBits <<= MSB - Imm + 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001864 --OpUsefulBits;
1865 // The interesting part will be in the lower part of the result
1866 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1867 // The interesting part was starting at Imm in the argument
Craig Topper24e71012017-04-28 03:36:24 +00001868 OpUsefulBits <<= Imm;
Tim Northover3b0846e2014-05-24 12:50:23 +00001869 } else {
Craig Topper24e71012017-04-28 03:36:24 +00001870 OpUsefulBits <<= MSB + 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00001871 --OpUsefulBits;
1872 // The interesting part will be shifted in the result
Craig Topper24e71012017-04-28 03:36:24 +00001873 OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
Tim Northover3b0846e2014-05-24 12:50:23 +00001874 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1875 // The interesting part was at zero in the argument
Craig Topperfc947bc2017-04-18 17:14:21 +00001876 OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
Tim Northover3b0846e2014-05-24 12:50:23 +00001877 }
1878
1879 UsefulBits &= OpUsefulBits;
1880}
1881
1882static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1883 unsigned Depth) {
1884 uint64_t Imm =
1885 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1886 uint64_t MSB =
1887 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1888
1889 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1890}
1891
1892static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1893 unsigned Depth) {
1894 uint64_t ShiftTypeAndValue =
1895 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1896 APInt Mask(UsefulBits);
1897 Mask.clearAllBits();
1898 Mask.flipAllBits();
1899
1900 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1901 // Shift Left
1902 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
Craig Topper24e71012017-04-28 03:36:24 +00001903 Mask <<= ShiftAmt;
Tim Northover3b0846e2014-05-24 12:50:23 +00001904 getUsefulBits(Op, Mask, Depth + 1);
Craig Topperfc947bc2017-04-18 17:14:21 +00001905 Mask.lshrInPlace(ShiftAmt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001906 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1907 // Shift Right
1908 // We do not handle AArch64_AM::ASR, because the sign will change the
1909 // number of useful bits
1910 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
Craig Topperfc947bc2017-04-18 17:14:21 +00001911 Mask.lshrInPlace(ShiftAmt);
Tim Northover3b0846e2014-05-24 12:50:23 +00001912 getUsefulBits(Op, Mask, Depth + 1);
Craig Topper24e71012017-04-28 03:36:24 +00001913 Mask <<= ShiftAmt;
Tim Northover3b0846e2014-05-24 12:50:23 +00001914 } else
1915 return;
1916
1917 UsefulBits &= Mask;
1918}
1919
1920static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1921 unsigned Depth) {
1922 uint64_t Imm =
1923 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1924 uint64_t MSB =
1925 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1926
Tim Northover3b0846e2014-05-24 12:50:23 +00001927 APInt OpUsefulBits(UsefulBits);
1928 OpUsefulBits = 1;
1929
Silviu Barangaaab65b12016-11-30 17:04:22 +00001930 APInt ResultUsefulBits(UsefulBits.getBitWidth(), 0);
1931 ResultUsefulBits.flipAllBits();
1932 APInt Mask(UsefulBits.getBitWidth(), 0);
1933
1934 getUsefulBits(Op, ResultUsefulBits, Depth + 1);
1935
Tim Northover3b0846e2014-05-24 12:50:23 +00001936 if (MSB >= Imm) {
Silviu Barangaaab65b12016-11-30 17:04:22 +00001937 // The instruction is a BFXIL.
1938 uint64_t Width = MSB - Imm + 1;
1939 uint64_t LSB = Imm;
1940
Craig Topper24e71012017-04-28 03:36:24 +00001941 OpUsefulBits <<= Width;
Tim Northover3b0846e2014-05-24 12:50:23 +00001942 --OpUsefulBits;
Silviu Barangaaab65b12016-11-30 17:04:22 +00001943
1944 if (Op.getOperand(1) == Orig) {
1945 // Copy the low bits from the result to bits starting from LSB.
1946 Mask = ResultUsefulBits & OpUsefulBits;
Craig Topper24e71012017-04-28 03:36:24 +00001947 Mask <<= LSB;
Silviu Barangaaab65b12016-11-30 17:04:22 +00001948 }
1949
1950 if (Op.getOperand(0) == Orig)
1951 // Bits starting from LSB in the input contribute to the result.
1952 Mask |= (ResultUsefulBits & ~OpUsefulBits);
Tim Northover3b0846e2014-05-24 12:50:23 +00001953 } else {
Silviu Barangaaab65b12016-11-30 17:04:22 +00001954 // The instruction is a BFI.
1955 uint64_t Width = MSB + 1;
1956 uint64_t LSB = UsefulBits.getBitWidth() - Imm;
1957
Craig Topper24e71012017-04-28 03:36:24 +00001958 OpUsefulBits <<= Width;
Tim Northover3b0846e2014-05-24 12:50:23 +00001959 --OpUsefulBits;
Craig Topper24e71012017-04-28 03:36:24 +00001960 OpUsefulBits <<= LSB;
Silviu Barangaaab65b12016-11-30 17:04:22 +00001961
1962 if (Op.getOperand(1) == Orig) {
1963 // Copy the bits from the result to the zero bits.
1964 Mask = ResultUsefulBits & OpUsefulBits;
Craig Topperfc947bc2017-04-18 17:14:21 +00001965 Mask.lshrInPlace(LSB);
Silviu Barangaaab65b12016-11-30 17:04:22 +00001966 }
1967
1968 if (Op.getOperand(0) == Orig)
1969 Mask |= (ResultUsefulBits & ~OpUsefulBits);
Tim Northover3b0846e2014-05-24 12:50:23 +00001970 }
Silviu Barangaaab65b12016-11-30 17:04:22 +00001971
1972 UsefulBits &= Mask;
Tim Northover3b0846e2014-05-24 12:50:23 +00001973}
1974
1975static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1976 SDValue Orig, unsigned Depth) {
1977
1978 // Users of this node should have already been instruction selected
1979 // FIXME: Can we turn that into an assert?
1980 if (!UserNode->isMachineOpcode())
1981 return;
1982
1983 switch (UserNode->getMachineOpcode()) {
1984 default:
1985 return;
1986 case AArch64::ANDSWri:
1987 case AArch64::ANDSXri:
1988 case AArch64::ANDWri:
1989 case AArch64::ANDXri:
1990 // We increment Depth only when we call the getUsefulBits
1991 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1992 Depth);
1993 case AArch64::UBFMWri:
1994 case AArch64::UBFMXri:
1995 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1996
1997 case AArch64::ORRWrs:
1998 case AArch64::ORRXrs:
1999 if (UserNode->getOperand(1) != Orig)
2000 return;
2001 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
2002 Depth);
2003 case AArch64::BFMWri:
2004 case AArch64::BFMXri:
2005 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002006
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002007 case AArch64::STRBBui:
Chad Rosier9926a5e2016-05-12 01:42:01 +00002008 case AArch64::STURBBi:
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002009 if (UserNode->getOperand(0) != Orig)
2010 return;
2011 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
2012 return;
2013
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002014 case AArch64::STRHHui:
Chad Rosier9926a5e2016-05-12 01:42:01 +00002015 case AArch64::STURHHi:
Chad Rosier23a1a9a2016-05-11 20:19:54 +00002016 if (UserNode->getOperand(0) != Orig)
2017 return;
2018 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
2019 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002020 }
2021}
2022
2023static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
2024 if (Depth >= 6)
2025 return;
2026 // Initialize UsefulBits
2027 if (!Depth) {
Sanjay Patel5f6bb6c2016-09-14 15:43:44 +00002028 unsigned Bitwidth = Op.getScalarValueSizeInBits();
Tim Northover3b0846e2014-05-24 12:50:23 +00002029 // At the beginning, assume every produced bits is useful
2030 UsefulBits = APInt(Bitwidth, 0);
2031 UsefulBits.flipAllBits();
2032 }
2033 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
2034
2035 for (SDNode *Node : Op.getNode()->uses()) {
2036 // A use cannot produce useful bits
2037 APInt UsefulBitsForUse = APInt(UsefulBits);
2038 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
2039 UsersUsefulBits |= UsefulBitsForUse;
2040 }
2041 // UsefulBits contains the produced bits that are meaningful for the
2042 // current definition, thus a user cannot make a bit meaningful at
2043 // this point
2044 UsefulBits &= UsersUsefulBits;
2045}
2046
2047/// Create a machine node performing a notional SHL of Op by ShlAmount. If
2048/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
2049/// 0, return Op unchanged.
2050static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
2051 if (ShlAmount == 0)
2052 return Op;
2053
2054 EVT VT = Op.getValueType();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002055 SDLoc dl(Op);
Tim Northover3b0846e2014-05-24 12:50:23 +00002056 unsigned BitWidth = VT.getSizeInBits();
2057 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2058
2059 SDNode *ShiftNode;
2060 if (ShlAmount > 0) {
2061 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
2062 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002063 UBFMOpc, dl, VT, Op,
2064 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
2065 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00002066 } else {
2067 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
2068 assert(ShlAmount < 0 && "expected right shift");
2069 int ShrAmount = -ShlAmount;
2070 ShiftNode = CurDAG->getMachineNode(
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002071 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
2072 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
Tim Northover3b0846e2014-05-24 12:50:23 +00002073 }
2074
2075 return SDValue(ShiftNode, 0);
2076}
2077
2078/// Does this tree qualify as an attempt to move a bitfield into position,
2079/// essentially "(and (shl VAL, N), Mask)".
2080static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
Geoff Berry43ec15e2015-09-18 17:11:53 +00002081 bool BiggerPattern,
Tim Northover3b0846e2014-05-24 12:50:23 +00002082 SDValue &Src, int &ShiftAmount,
2083 int &MaskWidth) {
2084 EVT VT = Op.getValueType();
2085 unsigned BitWidth = VT.getSizeInBits();
2086 (void)BitWidth;
2087 assert(BitWidth == 32 || BitWidth == 64);
2088
Craig Topperd0af7e82017-04-28 05:31:46 +00002089 KnownBits Known;
2090 CurDAG->computeKnownBits(Op, Known);
Tim Northover3b0846e2014-05-24 12:50:23 +00002091
2092 // Non-zero in the sense that they're not provably zero, which is the key
2093 // point if we want to use this value
Craig Topperd0af7e82017-04-28 05:31:46 +00002094 uint64_t NonZeroBits = (~Known.Zero).getZExtValue();
Tim Northover3b0846e2014-05-24 12:50:23 +00002095
2096 // Discard a constant AND mask if present. It's safe because the node will
2097 // already have been factored into the computeKnownBits calculation above.
2098 uint64_t AndImm;
2099 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
Craig Topperd0af7e82017-04-28 05:31:46 +00002100 assert((~APInt(BitWidth, AndImm) & ~Known.Zero) == 0);
Tim Northover3b0846e2014-05-24 12:50:23 +00002101 Op = Op.getOperand(0);
2102 }
2103
Geoff Berry43ec15e2015-09-18 17:11:53 +00002104 // Don't match if the SHL has more than one use, since then we'll end up
2105 // generating SHL+UBFIZ instead of just keeping SHL+AND.
2106 if (!BiggerPattern && !Op.hasOneUse())
2107 return false;
2108
Tim Northover3b0846e2014-05-24 12:50:23 +00002109 uint64_t ShlImm;
2110 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
2111 return false;
2112 Op = Op.getOperand(0);
2113
2114 if (!isShiftedMask_64(NonZeroBits))
2115 return false;
2116
2117 ShiftAmount = countTrailingZeros(NonZeroBits);
Benjamin Kramer5f6a9072015-02-12 15:35:40 +00002118 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
Tim Northover3b0846e2014-05-24 12:50:23 +00002119
2120 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
2121 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
Geoff Berry43ec15e2015-09-18 17:11:53 +00002122 // amount. BiggerPattern is true when this pattern is being matched for BFI,
2123 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
2124 // which case it is not profitable to insert an extra shift.
2125 if (ShlImm - ShiftAmount != 0 && !BiggerPattern)
2126 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002127 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
2128
2129 return true;
2130}
2131
Chad Rosier02f25a92016-05-19 14:19:47 +00002132static bool isShiftedMask(uint64_t Mask, EVT VT) {
2133 assert(VT == MVT::i32 || VT == MVT::i64);
2134 if (VT == MVT::i32)
2135 return isShiftedMask_32(Mask);
2136 return isShiftedMask_64(Mask);
2137}
2138
Chad Rosier816a67d2016-05-26 13:27:56 +00002139// Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
2140// inserted only sets known zero bits.
2141static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
2142 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2143
2144 EVT VT = N->getValueType(0);
2145 if (VT != MVT::i32 && VT != MVT::i64)
2146 return false;
2147
2148 unsigned BitWidth = VT.getSizeInBits();
2149
2150 uint64_t OrImm;
2151 if (!isOpcWithIntImmediate(N, ISD::OR, OrImm))
2152 return false;
2153
2154 // Skip this transformation if the ORR immediate can be encoded in the ORR.
2155 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
2156 // performance neutral.
2157 if (AArch64_AM::isLogicalImmediate(OrImm, BitWidth))
2158 return false;
2159
2160 uint64_t MaskImm;
2161 SDValue And = N->getOperand(0);
2162 // Must be a single use AND with an immediate operand.
2163 if (!And.hasOneUse() ||
2164 !isOpcWithIntImmediate(And.getNode(), ISD::AND, MaskImm))
2165 return false;
2166
2167 // Compute the Known Zero for the AND as this allows us to catch more general
2168 // cases than just looking for AND with imm.
Craig Topperd0af7e82017-04-28 05:31:46 +00002169 KnownBits Known;
2170 CurDAG->computeKnownBits(And, Known);
Chad Rosier816a67d2016-05-26 13:27:56 +00002171
2172 // Non-zero in the sense that they're not provably zero, which is the key
2173 // point if we want to use this value.
Craig Topperd0af7e82017-04-28 05:31:46 +00002174 uint64_t NotKnownZero = (~Known.Zero).getZExtValue();
Chad Rosier816a67d2016-05-26 13:27:56 +00002175
2176 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
Craig Topperd0af7e82017-04-28 05:31:46 +00002177 if (!isShiftedMask(Known.Zero.getZExtValue(), VT))
Chad Rosier816a67d2016-05-26 13:27:56 +00002178 return false;
2179
2180 // The bits being inserted must only set those bits that are known to be zero.
2181 if ((OrImm & NotKnownZero) != 0) {
2182 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
2183 // currently handle this case.
2184 return false;
2185 }
2186
2187 // BFI/BFXIL dst, src, #lsb, #width.
2188 int LSB = countTrailingOnes(NotKnownZero);
2189 int Width = BitWidth - APInt(BitWidth, NotKnownZero).countPopulation();
2190
2191 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
2192 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2193 unsigned ImmS = Width - 1;
2194
2195 // If we're creating a BFI instruction avoid cases where we need more
2196 // instructions to materialize the BFI constant as compared to the original
2197 // ORR. A BFXIL will use the same constant as the original ORR, so the code
2198 // should be no worse in this case.
2199 bool IsBFI = LSB != 0;
2200 uint64_t BFIImm = OrImm >> LSB;
2201 if (IsBFI && !AArch64_AM::isLogicalImmediate(BFIImm, BitWidth)) {
2202 // We have a BFI instruction and we know the constant can't be materialized
2203 // with a ORR-immediate with the zero register.
2204 unsigned OrChunks = 0, BFIChunks = 0;
2205 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
2206 if (((OrImm >> Shift) & 0xFFFF) != 0)
2207 ++OrChunks;
2208 if (((BFIImm >> Shift) & 0xFFFF) != 0)
2209 ++BFIChunks;
2210 }
2211 if (BFIChunks > OrChunks)
2212 return false;
2213 }
2214
2215 // Materialize the constant to be inserted.
2216 SDLoc DL(N);
2217 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
2218 SDNode *MOVI = CurDAG->getMachineNode(
2219 MOVIOpc, DL, VT, CurDAG->getTargetConstant(BFIImm, DL, VT));
2220
2221 // Create the BFI/BFXIL instruction.
2222 SDValue Ops[] = {And.getOperand(0), SDValue(MOVI, 0),
2223 CurDAG->getTargetConstant(ImmR, DL, VT),
2224 CurDAG->getTargetConstant(ImmS, DL, VT)};
2225 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2226 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2227 return true;
2228}
2229
Justin Bogner283e3bd2016-05-12 23:10:30 +00002230static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
2231 SelectionDAG *CurDAG) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002232 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2233
Tim Northover3b0846e2014-05-24 12:50:23 +00002234 EVT VT = N->getValueType(0);
Chad Rosier042ac2c2016-05-12 19:38:18 +00002235 if (VT != MVT::i32 && VT != MVT::i64)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002236 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002237
Chad Rosier91294c52016-05-18 17:43:11 +00002238 unsigned BitWidth = VT.getSizeInBits();
2239
Tim Northover3b0846e2014-05-24 12:50:23 +00002240 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
2241 // have the expected shape. Try to undo that.
Tim Northover3b0846e2014-05-24 12:50:23 +00002242
2243 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
2244 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
2245
Chad Rosiere0062022016-05-18 23:51:17 +00002246 // Given a OR operation, check if we have the following pattern
2247 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
2248 // isBitfieldExtractOp)
2249 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
2250 // countTrailingZeros(mask2) == imm2 - imm + 1
2251 // f = d | c
2252 // if yes, replace the OR instruction with:
2253 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
2254
Geoff Berry43ec15e2015-09-18 17:11:53 +00002255 // OR is commutative, check all combinations of operand order and values of
2256 // BiggerPattern, i.e.
2257 // Opd0, Opd1, BiggerPattern=false
2258 // Opd1, Opd0, BiggerPattern=false
2259 // Opd0, Opd1, BiggerPattern=true
2260 // Opd1, Opd0, BiggerPattern=true
2261 // Several of these combinations may match, so check with BiggerPattern=false
2262 // first since that will produce better results by matching more instructions
2263 // and/or inserting fewer extra instructions.
2264 for (int I = 0; I < 4; ++I) {
2265
Chad Rosier91294c52016-05-18 17:43:11 +00002266 SDValue Dst, Src;
2267 unsigned ImmR, ImmS;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002268 bool BiggerPattern = I / 2;
Tim Northover01dff9d2016-07-05 18:02:57 +00002269 SDValue OrOpd0Val = N->getOperand(I % 2);
2270 SDNode *OrOpd0 = OrOpd0Val.getNode();
Geoff Berry43ec15e2015-09-18 17:11:53 +00002271 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
2272 SDNode *OrOpd1 = OrOpd1Val.getNode();
2273
Tim Northover3b0846e2014-05-24 12:50:23 +00002274 unsigned BFXOpc;
2275 int DstLSB, Width;
2276 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
Geoff Berry43ec15e2015-09-18 17:11:53 +00002277 NumberOfIgnoredLowBits, BiggerPattern)) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002278 // Check that the returned opcode is compatible with the pattern,
2279 // i.e., same type and zero extended (U and not S)
2280 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
2281 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
2282 continue;
2283
2284 // Compute the width of the bitfield insertion
2285 DstLSB = 0;
2286 Width = ImmS - ImmR + 1;
2287 // FIXME: This constraint is to catch bitfield insertion we may
2288 // want to widen the pattern if we want to grab general bitfied
2289 // move case
2290 if (Width <= 0)
2291 continue;
2292
2293 // If the mask on the insertee is correct, we have a BFXIL operation. We
2294 // can share the ImmR and ImmS values from the already-computed UBFM.
Tim Northover01dff9d2016-07-05 18:02:57 +00002295 } else if (isBitfieldPositioningOp(CurDAG, OrOpd0Val,
Geoff Berry43ec15e2015-09-18 17:11:53 +00002296 BiggerPattern,
2297 Src, DstLSB, Width)) {
Chad Rosier91294c52016-05-18 17:43:11 +00002298 ImmR = (BitWidth - DstLSB) % BitWidth;
Tim Northover3b0846e2014-05-24 12:50:23 +00002299 ImmS = Width - 1;
2300 } else
2301 continue;
2302
2303 // Check the second part of the pattern
2304 EVT VT = OrOpd1->getValueType(0);
2305 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2306
2307 // Compute the Known Zero for the candidate of the first operand.
2308 // This allows to catch more general case than just looking for
2309 // AND with imm. Indeed, simplify-demanded-bits may have removed
2310 // the AND instruction because it proves it was useless.
Craig Topperd0af7e82017-04-28 05:31:46 +00002311 KnownBits Known;
2312 CurDAG->computeKnownBits(OrOpd1Val, Known);
Tim Northover3b0846e2014-05-24 12:50:23 +00002313
2314 // Check if there is enough room for the second operand to appear
2315 // in the first one
2316 APInt BitsToBeInserted =
Craig Topperd0af7e82017-04-28 05:31:46 +00002317 APInt::getBitsSet(Known.getBitWidth(), DstLSB, DstLSB + Width);
Tim Northover3b0846e2014-05-24 12:50:23 +00002318
Craig Topperd0af7e82017-04-28 05:31:46 +00002319 if ((BitsToBeInserted & ~Known.Zero) != 0)
Tim Northover3b0846e2014-05-24 12:50:23 +00002320 continue;
2321
2322 // Set the first operand
2323 uint64_t Imm;
2324 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2325 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2326 // In that case, we can eliminate the AND
2327 Dst = OrOpd1->getOperand(0);
2328 else
2329 // Maybe the AND has been removed by simplify-demanded-bits
2330 // or is useful because it discards more bits
2331 Dst = OrOpd1Val;
2332
2333 // both parts match
Chad Rosier042ac2c2016-05-12 19:38:18 +00002334 SDLoc DL(N);
2335 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT),
2336 CurDAG->getTargetConstant(ImmS, DL, VT)};
2337 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002338 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2339 return true;
Tim Northover3b0846e2014-05-24 12:50:23 +00002340 }
Chad Rosier02f25a92016-05-19 14:19:47 +00002341
2342 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
2343 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
2344 // mask (e.g., 0x000ffff0).
2345 uint64_t Mask0Imm, Mask1Imm;
2346 SDValue And0 = N->getOperand(0);
2347 SDValue And1 = N->getOperand(1);
2348 if (And0.hasOneUse() && And1.hasOneUse() &&
2349 isOpcWithIntImmediate(And0.getNode(), ISD::AND, Mask0Imm) &&
2350 isOpcWithIntImmediate(And1.getNode(), ISD::AND, Mask1Imm) &&
2351 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
2352 (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) {
2353
Chad Rosier02f25a92016-05-19 14:19:47 +00002354 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
2355 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
2356 // bits to be inserted.
2357 if (isShiftedMask(Mask0Imm, VT)) {
2358 std::swap(And0, And1);
2359 std::swap(Mask0Imm, Mask1Imm);
2360 }
2361
2362 SDValue Src = And1->getOperand(0);
2363 SDValue Dst = And0->getOperand(0);
2364 unsigned LSB = countTrailingZeros(Mask1Imm);
2365 int Width = BitWidth - APInt(BitWidth, Mask0Imm).countPopulation();
2366
2367 // The BFXIL inserts the low-order bits from a source register, so right
2368 // shift the needed bits into place.
2369 SDLoc DL(N);
2370 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
2371 SDNode *LSR = CurDAG->getMachineNode(
2372 ShiftOpc, DL, VT, Src, CurDAG->getTargetConstant(LSB, DL, VT),
2373 CurDAG->getTargetConstant(BitWidth - 1, DL, VT));
2374
2375 // BFXIL is an alias of BFM, so translate to BFM operands.
2376 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2377 unsigned ImmS = Width - 1;
2378
2379 // Create the BFXIL instruction.
2380 SDValue Ops[] = {Dst, SDValue(LSR, 0),
2381 CurDAG->getTargetConstant(ImmR, DL, VT),
2382 CurDAG->getTargetConstant(ImmS, DL, VT)};
2383 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2384 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2385 return true;
2386 }
2387
Justin Bogner283e3bd2016-05-12 23:10:30 +00002388 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002389}
2390
Justin Bogner283e3bd2016-05-12 23:10:30 +00002391bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002392 if (N->getOpcode() != ISD::OR)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002393 return false;
Tim Northover3b0846e2014-05-24 12:50:23 +00002394
Weiming Zhao56ab5182015-12-01 19:17:49 +00002395 APInt NUsefulBits;
2396 getUsefulBits(SDValue(N, 0), NUsefulBits);
Tim Northover3b0846e2014-05-24 12:50:23 +00002397
Weiming Zhao56ab5182015-12-01 19:17:49 +00002398 // If all bits are not useful, just return UNDEF.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002399 if (!NUsefulBits) {
2400 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2401 return true;
2402 }
Weiming Zhao56ab5182015-12-01 19:17:49 +00002403
Chad Rosier816a67d2016-05-26 13:27:56 +00002404 if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG))
2405 return true;
2406
2407 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
Tim Northover3b0846e2014-05-24 12:50:23 +00002408}
2409
Geoff Berry43ec15e2015-09-18 17:11:53 +00002410/// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2411/// equivalent of a left shift by a constant amount followed by an and masking
2412/// out a contiguous set of bits.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002413bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
Geoff Berry43ec15e2015-09-18 17:11:53 +00002414 if (N->getOpcode() != ISD::AND)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002415 return false;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002416
2417 EVT VT = N->getValueType(0);
Chad Rosier08d99082016-05-13 22:53:13 +00002418 if (VT != MVT::i32 && VT != MVT::i64)
Justin Bogner283e3bd2016-05-12 23:10:30 +00002419 return false;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002420
2421 SDValue Op0;
2422 int DstLSB, Width;
2423 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
2424 Op0, DstLSB, Width))
Justin Bogner283e3bd2016-05-12 23:10:30 +00002425 return false;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002426
2427 // ImmR is the rotate right amount.
2428 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2429 // ImmS is the most significant bit of the source to be moved.
2430 unsigned ImmS = Width - 1;
2431
2432 SDLoc DL(N);
2433 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
2434 CurDAG->getTargetConstant(ImmS, DL, VT)};
Chad Rosier08d99082016-05-13 22:53:13 +00002435 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002436 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2437 return true;
Geoff Berry43ec15e2015-09-18 17:11:53 +00002438}
2439
Tim Northover3b0846e2014-05-24 12:50:23 +00002440bool
2441AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2442 unsigned RegWidth) {
2443 APFloat FVal(0.0);
2444 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2445 FVal = CN->getValueAPF();
2446 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2447 // Some otherwise illegal constants are allowed in this case.
2448 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2449 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2450 return false;
2451
2452 ConstantPoolSDNode *CN =
2453 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2454 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2455 } else
2456 return false;
2457
2458 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2459 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2460 // x-register.
2461 //
2462 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2463 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2464 // integers.
2465 bool IsExact;
2466
2467 // fbits is between 1 and 64 in the worst-case, which means the fmul
2468 // could have 2^64 as an actual operand. Need 65 bits of precision.
2469 APSInt IntVal(65, true);
2470 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2471
2472 // N.b. isPowerOf2 also checks for > 0.
2473 if (!IsExact || !IntVal.isPowerOf2()) return false;
2474 unsigned FBits = IntVal.logBase2();
2475
2476 // Checks above should have guaranteed that we haven't lost information in
2477 // finding FBits, but it must still be in range.
2478 if (FBits == 0 || FBits > RegWidth) return false;
2479
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002480 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
Tim Northover3b0846e2014-05-24 12:50:23 +00002481 return true;
2482}
2483
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002484// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2485// of the string and obtains the integer values from them and combines these
2486// into a single value to be used in the MRS/MSR instruction.
2487static int getIntOperandFromRegisterString(StringRef RegString) {
2488 SmallVector<StringRef, 5> Fields;
Chandler Carruthe4405e92015-09-10 06:12:31 +00002489 RegString.split(Fields, ':');
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002490
2491 if (Fields.size() == 1)
2492 return -1;
2493
2494 assert(Fields.size() == 5
2495 && "Invalid number of fields in read register string");
2496
2497 SmallVector<int, 5> Ops;
2498 bool AllIntFields = true;
2499
2500 for (StringRef Field : Fields) {
2501 unsigned IntField;
2502 AllIntFields &= !Field.getAsInteger(10, IntField);
2503 Ops.push_back(IntField);
2504 }
2505
2506 assert(AllIntFields &&
2507 "Unexpected non-integer value in special register string.");
2508
2509 // Need to combine the integer fields of the string into a single value
2510 // based on the bit encoding of MRS/MSR instruction.
2511 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2512 (Ops[3] << 3) | (Ops[4]);
2513}
2514
2515// Lower the read_register intrinsic to an MRS instruction node if the special
2516// register string argument is either of the form detailed in the ALCE (the
2517// form described in getIntOperandsFromRegsterString) or is a named register
2518// known by the MRS SysReg mapper.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002519bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002520 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2521 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2522 SDLoc DL(N);
2523
2524 int Reg = getIntOperandFromRegisterString(RegString->getString());
Justin Bogner283e3bd2016-05-12 23:10:30 +00002525 if (Reg != -1) {
2526 ReplaceNode(N, CurDAG->getMachineNode(
2527 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2528 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2529 N->getOperand(0)));
2530 return true;
2531 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002532
2533 // Use the sysreg mapper to map the remaining possible strings to the
2534 // value for the register to be used for the instruction operand.
Tim Northovere6ae6762016-07-05 21:23:04 +00002535 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
2536 if (TheReg && TheReg->Readable &&
2537 TheReg->haveFeatures(Subtarget->getFeatureBits()))
2538 Reg = TheReg->Encoding;
2539 else
2540 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
2541
2542 if (Reg != -1) {
Justin Bogner283e3bd2016-05-12 23:10:30 +00002543 ReplaceNode(N, CurDAG->getMachineNode(
2544 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2545 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2546 N->getOperand(0)));
2547 return true;
2548 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002549
Justin Bogner283e3bd2016-05-12 23:10:30 +00002550 return false;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002551}
2552
2553// Lower the write_register intrinsic to an MSR instruction node if the special
2554// register string argument is either of the form detailed in the ALCE (the
2555// form described in getIntOperandsFromRegsterString) or is a named register
2556// known by the MSR SysReg mapper.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002557bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002558 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2559 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2560 SDLoc DL(N);
2561
2562 int Reg = getIntOperandFromRegisterString(RegString->getString());
Justin Bogner283e3bd2016-05-12 23:10:30 +00002563 if (Reg != -1) {
2564 ReplaceNode(
2565 N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002566 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
Justin Bogner283e3bd2016-05-12 23:10:30 +00002567 N->getOperand(2), N->getOperand(0)));
2568 return true;
2569 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002570
2571 // Check if the register was one of those allowed as the pstatefield value in
2572 // the MSR (immediate) instruction. To accept the values allowed in the
2573 // pstatefield for the MSR (immediate) instruction, we also require that an
2574 // immediate value has been provided as an argument, we know that this is
2575 // the case as it has been ensured by semantic checking.
Mandeep Singh Grang5e1697e2017-06-06 05:08:36 +00002576 auto PMapper = AArch64PState::lookupPStateByName(RegString->getString());
Tim Northovere6ae6762016-07-05 21:23:04 +00002577 if (PMapper) {
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002578 assert (isa<ConstantSDNode>(N->getOperand(2))
2579 && "Expected a constant integer expression.");
Tim Northovere6ae6762016-07-05 21:23:04 +00002580 unsigned Reg = PMapper->Encoding;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002581 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
Alexandros Lamprineas1bab1912015-10-05 13:42:31 +00002582 unsigned State;
Oliver Stannard911ea202015-11-26 15:32:30 +00002583 if (Reg == AArch64PState::PAN || Reg == AArch64PState::UAO) {
Alexandros Lamprineas1bab1912015-10-05 13:42:31 +00002584 assert(Immed < 2 && "Bad imm");
2585 State = AArch64::MSRpstateImm1;
2586 } else {
2587 assert(Immed < 16 && "Bad imm");
2588 State = AArch64::MSRpstateImm4;
2589 }
Justin Bogner283e3bd2016-05-12 23:10:30 +00002590 ReplaceNode(N, CurDAG->getMachineNode(
2591 State, DL, MVT::Other,
2592 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2593 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
2594 N->getOperand(0)));
2595 return true;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002596 }
2597
2598 // Use the sysreg mapper to attempt to map the remaining possible strings
2599 // to the value for the register to be used for the MSR (register)
2600 // instruction operand.
Tim Northovere6ae6762016-07-05 21:23:04 +00002601 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
2602 if (TheReg && TheReg->Writeable &&
2603 TheReg->haveFeatures(Subtarget->getFeatureBits()))
2604 Reg = TheReg->Encoding;
2605 else
2606 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
2607 if (Reg != -1) {
2608 ReplaceNode(N, CurDAG->getMachineNode(
2609 AArch64::MSR, DL, MVT::Other,
2610 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2611 N->getOperand(2), N->getOperand(0)));
Justin Bogner283e3bd2016-05-12 23:10:30 +00002612 return true;
2613 }
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002614
Justin Bogner283e3bd2016-05-12 23:10:30 +00002615 return false;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002616}
2617
Tim Northovercdf15292016-04-14 17:03:29 +00002618/// We've got special pseudo-instructions for these
Christof Doumac1c28052017-06-21 10:58:31 +00002619bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
Tim Northovercdf15292016-04-14 17:03:29 +00002620 unsigned Opcode;
2621 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
Christof Doumac1c28052017-06-21 10:58:31 +00002622
2623 // Leave IR for LSE if subtarget supports it.
2624 if (Subtarget->hasLSE()) return false;
2625
Tim Northovercdf15292016-04-14 17:03:29 +00002626 if (MemTy == MVT::i8)
2627 Opcode = AArch64::CMP_SWAP_8;
2628 else if (MemTy == MVT::i16)
2629 Opcode = AArch64::CMP_SWAP_16;
2630 else if (MemTy == MVT::i32)
2631 Opcode = AArch64::CMP_SWAP_32;
2632 else if (MemTy == MVT::i64)
2633 Opcode = AArch64::CMP_SWAP_64;
2634 else
2635 llvm_unreachable("Unknown AtomicCmpSwap type");
2636
2637 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
2638 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
2639 N->getOperand(0)};
2640 SDNode *CmpSwap = CurDAG->getMachineNode(
2641 Opcode, SDLoc(N),
2642 CurDAG->getVTList(RegTy, MVT::i32, MVT::Other), Ops);
2643
2644 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2645 MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
2646 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
2647
2648 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
2649 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
Justin Bogner3525da72016-05-12 20:54:27 +00002650 CurDAG->RemoveDeadNode(N);
Christof Doumac1c28052017-06-21 10:58:31 +00002651
2652 return true;
Tim Northovercdf15292016-04-14 17:03:29 +00002653}
2654
Justin Bogner283e3bd2016-05-12 23:10:30 +00002655void AArch64DAGToDAGISel::Select(SDNode *Node) {
Tim Northover3b0846e2014-05-24 12:50:23 +00002656 // Dump information about the Node being selected
2657 DEBUG(errs() << "Selecting: ");
2658 DEBUG(Node->dump(CurDAG));
2659 DEBUG(errs() << "\n");
2660
2661 // If we have a custom node, we already have selected!
2662 if (Node->isMachineOpcode()) {
2663 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2664 Node->setNodeId(-1);
Justin Bogner283e3bd2016-05-12 23:10:30 +00002665 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002666 }
2667
2668 // Few custom selection stuff.
Tim Northover3b0846e2014-05-24 12:50:23 +00002669 EVT VT = Node->getValueType(0);
2670
2671 switch (Node->getOpcode()) {
2672 default:
2673 break;
2674
Tim Northovercdf15292016-04-14 17:03:29 +00002675 case ISD::ATOMIC_CMP_SWAP:
Christof Doumac1c28052017-06-21 10:58:31 +00002676 if (SelectCMP_SWAP(Node))
2677 return;
2678 break;
Tim Northovercdf15292016-04-14 17:03:29 +00002679
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002680 case ISD::READ_REGISTER:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002681 if (tryReadRegister(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002682 return;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002683 break;
2684
2685 case ISD::WRITE_REGISTER:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002686 if (tryWriteRegister(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002687 return;
Luke Cheeseman85fd06d2015-06-01 12:02:47 +00002688 break;
2689
Tim Northover3b0846e2014-05-24 12:50:23 +00002690 case ISD::ADD:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002691 if (tryMLAV64LaneV128(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002692 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002693 break;
2694
2695 case ISD::LOAD: {
2696 // Try to select as an indexed load. Fall through to normal processing
2697 // if we can't.
Justin Bogner283e3bd2016-05-12 23:10:30 +00002698 if (tryIndexedLoad(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002699 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002700 break;
2701 }
2702
2703 case ISD::SRL:
2704 case ISD::AND:
2705 case ISD::SRA:
Chad Rosier2d658702016-06-03 15:00:09 +00002706 case ISD::SIGN_EXTEND_INREG:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002707 if (tryBitfieldExtractOp(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002708 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002709 if (tryBitfieldInsertInZeroOp(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002710 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002711 break;
2712
Chad Rosierbe879ea2016-06-03 20:05:49 +00002713 case ISD::SIGN_EXTEND:
2714 if (tryBitfieldExtractOpFromSExt(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002715 return;
Chad Rosierbe879ea2016-06-03 20:05:49 +00002716 break;
2717
Tim Northover3b0846e2014-05-24 12:50:23 +00002718 case ISD::OR:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002719 if (tryBitfieldInsertOp(Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00002720 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002721 break;
2722
2723 case ISD::EXTRACT_VECTOR_ELT: {
2724 // Extracting lane zero is a special case where we can just use a plain
2725 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2726 // the rest of the compiler, especially the register allocator and copyi
2727 // propagation, to reason about, so is preferred when it's possible to
2728 // use it.
2729 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2730 // Bail and use the default Select() for non-zero lanes.
2731 if (LaneNode->getZExtValue() != 0)
2732 break;
2733 // If the element type is not the same as the result type, likewise
2734 // bail and use the default Select(), as there's more to do than just
2735 // a cross-class COPY. This catches extracts of i8 and i16 elements
2736 // since they will need an explicit zext.
2737 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2738 break;
2739 unsigned SubReg;
2740 switch (Node->getOperand(0)
2741 .getValueType()
2742 .getVectorElementType()
2743 .getSizeInBits()) {
2744 default:
Craig Topper2a30d782014-06-18 05:05:13 +00002745 llvm_unreachable("Unexpected vector element type!");
Tim Northover3b0846e2014-05-24 12:50:23 +00002746 case 64:
2747 SubReg = AArch64::dsub;
2748 break;
2749 case 32:
2750 SubReg = AArch64::ssub;
2751 break;
Oliver Stannard89d15422014-08-27 16:16:04 +00002752 case 16:
2753 SubReg = AArch64::hsub;
2754 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00002755 case 8:
2756 llvm_unreachable("unexpected zext-requiring extract element!");
2757 }
2758 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2759 Node->getOperand(0));
2760 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2761 DEBUG(Extract->dumpr(CurDAG));
2762 DEBUG(dbgs() << "\n");
Justin Bogner283e3bd2016-05-12 23:10:30 +00002763 ReplaceNode(Node, Extract.getNode());
Quentin Colombet35a47012017-04-01 01:26:17 +00002764 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002765 }
2766 case ISD::Constant: {
2767 // Materialize zero constants as copies from WZR/XZR. This allows
2768 // the coalescer to propagate these into other instructions.
2769 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2770 if (ConstNode->isNullValue()) {
Justin Bogner283e3bd2016-05-12 23:10:30 +00002771 if (VT == MVT::i32) {
2772 SDValue New = CurDAG->getCopyFromReg(
2773 CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32);
2774 ReplaceNode(Node, New.getNode());
Quentin Colombet35a47012017-04-01 01:26:17 +00002775 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002776 } else if (VT == MVT::i64) {
2777 SDValue New = CurDAG->getCopyFromReg(
2778 CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64);
2779 ReplaceNode(Node, New.getNode());
Quentin Colombet35a47012017-04-01 01:26:17 +00002780 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002781 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002782 }
2783 break;
2784 }
2785
2786 case ISD::FrameIndex: {
2787 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2788 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2789 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2790 const TargetLowering *TLI = getTargetLowering();
Mehdi Amini44ede332015-07-09 02:09:04 +00002791 SDValue TFI = CurDAG->getTargetFrameIndex(
2792 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002793 SDLoc DL(Node);
2794 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2795 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
Justin Bogner283e3bd2016-05-12 23:10:30 +00002796 CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
Quentin Colombet35a47012017-04-01 01:26:17 +00002797 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002798 }
2799 case ISD::INTRINSIC_W_CHAIN: {
2800 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2801 switch (IntNo) {
2802 default:
2803 break;
2804 case Intrinsic::aarch64_ldaxp:
2805 case Intrinsic::aarch64_ldxp: {
2806 unsigned Op =
2807 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2808 SDValue MemAddr = Node->getOperand(2);
2809 SDLoc DL(Node);
2810 SDValue Chain = Node->getOperand(0);
2811
2812 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2813 MVT::Other, MemAddr, Chain);
2814
2815 // Transfer memoperands.
2816 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2817 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2818 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
Justin Bogner283e3bd2016-05-12 23:10:30 +00002819 ReplaceNode(Node, Ld);
Quentin Colombet35a47012017-04-01 01:26:17 +00002820 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002821 }
2822 case Intrinsic::aarch64_stlxp:
2823 case Intrinsic::aarch64_stxp: {
2824 unsigned Op =
2825 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2826 SDLoc DL(Node);
2827 SDValue Chain = Node->getOperand(0);
2828 SDValue ValLo = Node->getOperand(2);
2829 SDValue ValHi = Node->getOperand(3);
2830 SDValue MemAddr = Node->getOperand(4);
2831
2832 // Place arguments in the right order.
Benjamin Kramerea68a942015-02-19 15:26:17 +00002833 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
Tim Northover3b0846e2014-05-24 12:50:23 +00002834
2835 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2836 // Transfer memoperands.
2837 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2838 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2839 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2840
Justin Bogner283e3bd2016-05-12 23:10:30 +00002841 ReplaceNode(Node, St);
Quentin Colombet35a47012017-04-01 01:26:17 +00002842 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00002843 }
2844 case Intrinsic::aarch64_neon_ld1x2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002845 if (VT == MVT::v8i8) {
2846 SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002847 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002848 } else if (VT == MVT::v16i8) {
2849 SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002850 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002851 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2852 SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002853 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002854 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2855 SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002856 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002857 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2858 SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002859 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002860 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2861 SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002862 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002863 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2864 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002865 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002866 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2867 SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002868 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002869 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002870 break;
2871 case Intrinsic::aarch64_neon_ld1x3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002872 if (VT == MVT::v8i8) {
2873 SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002874 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002875 } else if (VT == MVT::v16i8) {
2876 SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002877 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002878 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2879 SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002880 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002881 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2882 SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002883 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002884 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2885 SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002886 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002887 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2888 SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002889 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002890 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2891 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002892 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002893 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2894 SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002895 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002896 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002897 break;
2898 case Intrinsic::aarch64_neon_ld1x4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002899 if (VT == MVT::v8i8) {
2900 SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002901 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002902 } else if (VT == MVT::v16i8) {
2903 SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002904 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002905 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2906 SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002907 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002908 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2909 SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002910 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002911 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2912 SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002913 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002914 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2915 SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002916 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002917 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2918 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002919 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002920 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2921 SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002922 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002923 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002924 break;
2925 case Intrinsic::aarch64_neon_ld2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002926 if (VT == MVT::v8i8) {
2927 SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002928 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002929 } else if (VT == MVT::v16i8) {
2930 SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002931 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002932 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2933 SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002934 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002935 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2936 SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002937 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002938 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2939 SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002940 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002941 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2942 SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002943 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002944 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2945 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002946 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002947 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2948 SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002949 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002950 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002951 break;
2952 case Intrinsic::aarch64_neon_ld3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002953 if (VT == MVT::v8i8) {
2954 SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002955 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002956 } else if (VT == MVT::v16i8) {
2957 SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002958 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002959 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2960 SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002961 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002962 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2963 SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002964 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002965 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2966 SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002967 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002968 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2969 SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002970 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002971 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2972 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002973 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002974 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
2975 SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002976 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002977 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002978 break;
2979 case Intrinsic::aarch64_neon_ld4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00002980 if (VT == MVT::v8i8) {
2981 SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002982 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002983 } else if (VT == MVT::v16i8) {
2984 SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002985 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002986 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
2987 SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002988 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002989 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
2990 SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002991 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002992 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
2993 SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002994 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002995 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
2996 SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00002997 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00002998 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
2999 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003000 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003001 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3002 SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003003 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003004 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003005 break;
3006 case Intrinsic::aarch64_neon_ld2r:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003007 if (VT == MVT::v8i8) {
3008 SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003009 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003010 } else if (VT == MVT::v16i8) {
3011 SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003012 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003013 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3014 SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003015 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003016 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3017 SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003018 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003019 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3020 SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003021 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003022 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3023 SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003024 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003025 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3026 SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003027 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003028 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3029 SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003030 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003031 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003032 break;
3033 case Intrinsic::aarch64_neon_ld3r:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003034 if (VT == MVT::v8i8) {
3035 SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003036 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003037 } else if (VT == MVT::v16i8) {
3038 SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003039 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003040 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3041 SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003042 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003043 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3044 SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003045 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003046 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3047 SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003048 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003049 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3050 SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003051 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003052 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3053 SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003054 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003055 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3056 SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003057 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003058 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003059 break;
3060 case Intrinsic::aarch64_neon_ld4r:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003061 if (VT == MVT::v8i8) {
3062 SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003063 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003064 } else if (VT == MVT::v16i8) {
3065 SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003066 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003067 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3068 SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003069 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003070 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3071 SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003072 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003073 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3074 SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003075 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003076 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3077 SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003078 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003079 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3080 SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003081 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003082 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3083 SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003084 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003085 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003086 break;
3087 case Intrinsic::aarch64_neon_ld2lane:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003088 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3089 SelectLoadLane(Node, 2, AArch64::LD2i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003090 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003091 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3092 VT == MVT::v8f16) {
3093 SelectLoadLane(Node, 2, AArch64::LD2i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003094 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003095 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3096 VT == MVT::v2f32) {
3097 SelectLoadLane(Node, 2, AArch64::LD2i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003098 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003099 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3100 VT == MVT::v1f64) {
3101 SelectLoadLane(Node, 2, AArch64::LD2i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003102 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003103 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003104 break;
3105 case Intrinsic::aarch64_neon_ld3lane:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003106 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3107 SelectLoadLane(Node, 3, AArch64::LD3i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003108 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003109 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3110 VT == MVT::v8f16) {
3111 SelectLoadLane(Node, 3, AArch64::LD3i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003112 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003113 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3114 VT == MVT::v2f32) {
3115 SelectLoadLane(Node, 3, AArch64::LD3i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003116 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003117 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3118 VT == MVT::v1f64) {
3119 SelectLoadLane(Node, 3, AArch64::LD3i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003120 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003121 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003122 break;
3123 case Intrinsic::aarch64_neon_ld4lane:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003124 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3125 SelectLoadLane(Node, 4, AArch64::LD4i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003126 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003127 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3128 VT == MVT::v8f16) {
3129 SelectLoadLane(Node, 4, AArch64::LD4i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003130 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003131 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3132 VT == MVT::v2f32) {
3133 SelectLoadLane(Node, 4, AArch64::LD4i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003134 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003135 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3136 VT == MVT::v1f64) {
3137 SelectLoadLane(Node, 4, AArch64::LD4i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003138 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003139 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003140 break;
3141 }
3142 } break;
3143 case ISD::INTRINSIC_WO_CHAIN: {
3144 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
3145 switch (IntNo) {
3146 default:
3147 break;
3148 case Intrinsic::aarch64_neon_tbl2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003149 SelectTable(Node, 2,
3150 VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
3151 false);
Quentin Colombet35a47012017-04-01 01:26:17 +00003152 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003153 case Intrinsic::aarch64_neon_tbl3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003154 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
3155 : AArch64::TBLv16i8Three,
3156 false);
Quentin Colombet35a47012017-04-01 01:26:17 +00003157 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003158 case Intrinsic::aarch64_neon_tbl4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003159 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
3160 : AArch64::TBLv16i8Four,
3161 false);
Quentin Colombet35a47012017-04-01 01:26:17 +00003162 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003163 case Intrinsic::aarch64_neon_tbx2:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003164 SelectTable(Node, 2,
3165 VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
3166 true);
Quentin Colombet35a47012017-04-01 01:26:17 +00003167 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003168 case Intrinsic::aarch64_neon_tbx3:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003169 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
3170 : AArch64::TBXv16i8Three,
3171 true);
Quentin Colombet35a47012017-04-01 01:26:17 +00003172 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003173 case Intrinsic::aarch64_neon_tbx4:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003174 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
3175 : AArch64::TBXv16i8Four,
3176 true);
Quentin Colombet35a47012017-04-01 01:26:17 +00003177 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003178 case Intrinsic::aarch64_neon_smull:
3179 case Intrinsic::aarch64_neon_umull:
Justin Bogner283e3bd2016-05-12 23:10:30 +00003180 if (tryMULLV64LaneV128(IntNo, Node))
Quentin Colombet35a47012017-04-01 01:26:17 +00003181 return;
Tim Northover3b0846e2014-05-24 12:50:23 +00003182 break;
3183 }
3184 break;
3185 }
3186 case ISD::INTRINSIC_VOID: {
3187 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
3188 if (Node->getNumOperands() >= 3)
3189 VT = Node->getOperand(2)->getValueType(0);
3190 switch (IntNo) {
3191 default:
3192 break;
3193 case Intrinsic::aarch64_neon_st1x2: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003194 if (VT == MVT::v8i8) {
3195 SelectStore(Node, 2, AArch64::ST1Twov8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003196 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003197 } else if (VT == MVT::v16i8) {
3198 SelectStore(Node, 2, AArch64::ST1Twov16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003199 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003200 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3201 SelectStore(Node, 2, AArch64::ST1Twov4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003202 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003203 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3204 SelectStore(Node, 2, AArch64::ST1Twov8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003205 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003206 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3207 SelectStore(Node, 2, AArch64::ST1Twov2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003208 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003209 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3210 SelectStore(Node, 2, AArch64::ST1Twov4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003211 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003212 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3213 SelectStore(Node, 2, AArch64::ST1Twov2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003214 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003215 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3216 SelectStore(Node, 2, AArch64::ST1Twov1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003217 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003218 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003219 break;
3220 }
3221 case Intrinsic::aarch64_neon_st1x3: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003222 if (VT == MVT::v8i8) {
3223 SelectStore(Node, 3, AArch64::ST1Threev8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003224 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003225 } else if (VT == MVT::v16i8) {
3226 SelectStore(Node, 3, AArch64::ST1Threev16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003227 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003228 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3229 SelectStore(Node, 3, AArch64::ST1Threev4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003230 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003231 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3232 SelectStore(Node, 3, AArch64::ST1Threev8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003233 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003234 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3235 SelectStore(Node, 3, AArch64::ST1Threev2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003236 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003237 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3238 SelectStore(Node, 3, AArch64::ST1Threev4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003239 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003240 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3241 SelectStore(Node, 3, AArch64::ST1Threev2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003242 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003243 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3244 SelectStore(Node, 3, AArch64::ST1Threev1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003245 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003246 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003247 break;
3248 }
3249 case Intrinsic::aarch64_neon_st1x4: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003250 if (VT == MVT::v8i8) {
3251 SelectStore(Node, 4, AArch64::ST1Fourv8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003252 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003253 } else if (VT == MVT::v16i8) {
3254 SelectStore(Node, 4, AArch64::ST1Fourv16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003255 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003256 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3257 SelectStore(Node, 4, AArch64::ST1Fourv4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003258 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003259 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3260 SelectStore(Node, 4, AArch64::ST1Fourv8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003261 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003262 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3263 SelectStore(Node, 4, AArch64::ST1Fourv2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003264 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003265 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3266 SelectStore(Node, 4, AArch64::ST1Fourv4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003267 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003268 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3269 SelectStore(Node, 4, AArch64::ST1Fourv2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003270 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003271 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3272 SelectStore(Node, 4, AArch64::ST1Fourv1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003273 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003274 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003275 break;
3276 }
3277 case Intrinsic::aarch64_neon_st2: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003278 if (VT == MVT::v8i8) {
3279 SelectStore(Node, 2, AArch64::ST2Twov8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003280 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003281 } else if (VT == MVT::v16i8) {
3282 SelectStore(Node, 2, AArch64::ST2Twov16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003283 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003284 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3285 SelectStore(Node, 2, AArch64::ST2Twov4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003286 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003287 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3288 SelectStore(Node, 2, AArch64::ST2Twov8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003289 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003290 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3291 SelectStore(Node, 2, AArch64::ST2Twov2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003292 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003293 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3294 SelectStore(Node, 2, AArch64::ST2Twov4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003295 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003296 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3297 SelectStore(Node, 2, AArch64::ST2Twov2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003298 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003299 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3300 SelectStore(Node, 2, AArch64::ST1Twov1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003301 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003302 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003303 break;
3304 }
3305 case Intrinsic::aarch64_neon_st3: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003306 if (VT == MVT::v8i8) {
3307 SelectStore(Node, 3, AArch64::ST3Threev8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003308 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003309 } else if (VT == MVT::v16i8) {
3310 SelectStore(Node, 3, AArch64::ST3Threev16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003311 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003312 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3313 SelectStore(Node, 3, AArch64::ST3Threev4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003314 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003315 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3316 SelectStore(Node, 3, AArch64::ST3Threev8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003317 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003318 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3319 SelectStore(Node, 3, AArch64::ST3Threev2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003320 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003321 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3322 SelectStore(Node, 3, AArch64::ST3Threev4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003323 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003324 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3325 SelectStore(Node, 3, AArch64::ST3Threev2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003326 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003327 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3328 SelectStore(Node, 3, AArch64::ST1Threev1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003329 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003330 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003331 break;
3332 }
3333 case Intrinsic::aarch64_neon_st4: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003334 if (VT == MVT::v8i8) {
3335 SelectStore(Node, 4, AArch64::ST4Fourv8b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003336 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003337 } else if (VT == MVT::v16i8) {
3338 SelectStore(Node, 4, AArch64::ST4Fourv16b);
Quentin Colombet35a47012017-04-01 01:26:17 +00003339 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003340 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3341 SelectStore(Node, 4, AArch64::ST4Fourv4h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003342 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003343 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3344 SelectStore(Node, 4, AArch64::ST4Fourv8h);
Quentin Colombet35a47012017-04-01 01:26:17 +00003345 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003346 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3347 SelectStore(Node, 4, AArch64::ST4Fourv2s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003348 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003349 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3350 SelectStore(Node, 4, AArch64::ST4Fourv4s);
Quentin Colombet35a47012017-04-01 01:26:17 +00003351 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003352 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3353 SelectStore(Node, 4, AArch64::ST4Fourv2d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003354 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003355 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3356 SelectStore(Node, 4, AArch64::ST1Fourv1d);
Quentin Colombet35a47012017-04-01 01:26:17 +00003357 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003358 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003359 break;
3360 }
3361 case Intrinsic::aarch64_neon_st2lane: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003362 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3363 SelectStoreLane(Node, 2, AArch64::ST2i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003364 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003365 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3366 VT == MVT::v8f16) {
3367 SelectStoreLane(Node, 2, AArch64::ST2i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003368 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003369 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3370 VT == MVT::v2f32) {
3371 SelectStoreLane(Node, 2, AArch64::ST2i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003372 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003373 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3374 VT == MVT::v1f64) {
3375 SelectStoreLane(Node, 2, AArch64::ST2i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003376 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003377 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003378 break;
3379 }
3380 case Intrinsic::aarch64_neon_st3lane: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003381 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3382 SelectStoreLane(Node, 3, AArch64::ST3i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003383 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003384 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3385 VT == MVT::v8f16) {
3386 SelectStoreLane(Node, 3, AArch64::ST3i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003387 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003388 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3389 VT == MVT::v2f32) {
3390 SelectStoreLane(Node, 3, AArch64::ST3i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003391 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003392 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3393 VT == MVT::v1f64) {
3394 SelectStoreLane(Node, 3, AArch64::ST3i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003395 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003396 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003397 break;
3398 }
3399 case Intrinsic::aarch64_neon_st4lane: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003400 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3401 SelectStoreLane(Node, 4, AArch64::ST4i8);
Quentin Colombet35a47012017-04-01 01:26:17 +00003402 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003403 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3404 VT == MVT::v8f16) {
3405 SelectStoreLane(Node, 4, AArch64::ST4i16);
Quentin Colombet35a47012017-04-01 01:26:17 +00003406 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003407 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3408 VT == MVT::v2f32) {
3409 SelectStoreLane(Node, 4, AArch64::ST4i32);
Quentin Colombet35a47012017-04-01 01:26:17 +00003410 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003411 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3412 VT == MVT::v1f64) {
3413 SelectStoreLane(Node, 4, AArch64::ST4i64);
Quentin Colombet35a47012017-04-01 01:26:17 +00003414 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003415 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003416 break;
3417 }
3418 }
Mehdi Aminia7583982015-08-23 00:42:57 +00003419 break;
Tim Northover3b0846e2014-05-24 12:50:23 +00003420 }
3421 case AArch64ISD::LD2post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003422 if (VT == MVT::v8i8) {
3423 SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003424 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003425 } else if (VT == MVT::v16i8) {
3426 SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003427 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003428 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3429 SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003430 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003431 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3432 SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003433 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003434 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3435 SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003436 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003437 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3438 SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003439 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003440 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3441 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003442 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003443 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3444 SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003445 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003446 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003447 break;
3448 }
3449 case AArch64ISD::LD3post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003450 if (VT == MVT::v8i8) {
3451 SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003452 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003453 } else if (VT == MVT::v16i8) {
3454 SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003455 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003456 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3457 SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003458 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003459 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3460 SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003461 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003462 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3463 SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003464 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003465 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3466 SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003467 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003468 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3469 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003470 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003471 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3472 SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003473 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003474 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003475 break;
3476 }
3477 case AArch64ISD::LD4post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003478 if (VT == MVT::v8i8) {
3479 SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003480 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003481 } else if (VT == MVT::v16i8) {
3482 SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003483 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003484 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3485 SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003486 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003487 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3488 SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003489 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003490 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3491 SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003492 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003493 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3494 SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003495 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003496 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3497 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003498 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003499 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3500 SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003501 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003502 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003503 break;
3504 }
3505 case AArch64ISD::LD1x2post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003506 if (VT == MVT::v8i8) {
3507 SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003508 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003509 } else if (VT == MVT::v16i8) {
3510 SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003511 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003512 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3513 SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003514 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003515 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3516 SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003517 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003518 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3519 SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003520 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003521 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3522 SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003523 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003524 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3525 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003526 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003527 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3528 SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003529 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003530 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003531 break;
3532 }
3533 case AArch64ISD::LD1x3post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003534 if (VT == MVT::v8i8) {
3535 SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003536 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003537 } else if (VT == MVT::v16i8) {
3538 SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003539 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003540 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3541 SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003542 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003543 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3544 SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003545 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003546 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3547 SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003548 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003549 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3550 SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003551 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003552 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3553 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003554 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003555 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3556 SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003557 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003558 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003559 break;
3560 }
3561 case AArch64ISD::LD1x4post: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003562 if (VT == MVT::v8i8) {
3563 SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003564 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003565 } else if (VT == MVT::v16i8) {
3566 SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003567 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003568 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3569 SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003570 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003571 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3572 SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003573 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003574 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3575 SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003576 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003577 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3578 SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003579 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003580 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3581 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003582 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003583 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3584 SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003585 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003586 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003587 break;
3588 }
3589 case AArch64ISD::LD1DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003590 if (VT == MVT::v8i8) {
3591 SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003592 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003593 } else if (VT == MVT::v16i8) {
3594 SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003595 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003596 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3597 SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003598 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003599 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3600 SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003601 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003602 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3603 SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003604 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003605 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3606 SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003607 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003608 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3609 SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003610 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003611 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3612 SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003613 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003614 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003615 break;
3616 }
3617 case AArch64ISD::LD2DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003618 if (VT == MVT::v8i8) {
3619 SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003620 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003621 } else if (VT == MVT::v16i8) {
3622 SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003623 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003624 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3625 SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003626 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003627 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3628 SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003629 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003630 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3631 SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003632 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003633 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3634 SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003635 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003636 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3637 SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003638 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003639 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3640 SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003641 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003642 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003643 break;
3644 }
3645 case AArch64ISD::LD3DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003646 if (VT == MVT::v8i8) {
3647 SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003648 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003649 } else if (VT == MVT::v16i8) {
3650 SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003651 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003652 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3653 SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003654 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003655 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3656 SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003657 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003658 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3659 SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003660 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003661 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3662 SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003663 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003664 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3665 SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003666 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003667 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3668 SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003669 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003670 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003671 break;
3672 }
3673 case AArch64ISD::LD4DUPpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003674 if (VT == MVT::v8i8) {
3675 SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003676 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003677 } else if (VT == MVT::v16i8) {
3678 SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003679 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003680 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3681 SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003682 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003683 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3684 SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003685 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003686 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3687 SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003688 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003689 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3690 SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003691 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003692 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3693 SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003694 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003695 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3696 SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
Quentin Colombet35a47012017-04-01 01:26:17 +00003697 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003698 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003699 break;
3700 }
3701 case AArch64ISD::LD1LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003702 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3703 SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003704 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003705 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3706 VT == MVT::v8f16) {
3707 SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003708 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003709 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3710 VT == MVT::v2f32) {
3711 SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003712 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003713 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3714 VT == MVT::v1f64) {
3715 SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003716 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003717 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003718 break;
3719 }
3720 case AArch64ISD::LD2LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003721 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3722 SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003723 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003724 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3725 VT == MVT::v8f16) {
3726 SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003727 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003728 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3729 VT == MVT::v2f32) {
3730 SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003731 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003732 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3733 VT == MVT::v1f64) {
3734 SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003735 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003736 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003737 break;
3738 }
3739 case AArch64ISD::LD3LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003740 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3741 SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003742 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003743 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3744 VT == MVT::v8f16) {
3745 SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003746 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003747 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3748 VT == MVT::v2f32) {
3749 SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003750 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003751 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3752 VT == MVT::v1f64) {
3753 SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003754 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003755 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003756 break;
3757 }
3758 case AArch64ISD::LD4LANEpost: {
Justin Bogner283e3bd2016-05-12 23:10:30 +00003759 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3760 SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003761 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003762 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3763 VT == MVT::v8f16) {
3764 SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003765 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003766 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3767 VT == MVT::v2f32) {
3768 SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003769 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003770 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3771 VT == MVT::v1f64) {
3772 SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003773 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003774 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003775 break;
3776 }
3777 case AArch64ISD::ST2post: {
3778 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003779 if (VT == MVT::v8i8) {
3780 SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003781 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003782 } else if (VT == MVT::v16i8) {
3783 SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003784 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003785 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3786 SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003787 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003788 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3789 SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003790 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003791 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3792 SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003793 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003794 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3795 SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003796 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003797 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3798 SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003799 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003800 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3801 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003802 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003803 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003804 break;
3805 }
3806 case AArch64ISD::ST3post: {
3807 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003808 if (VT == MVT::v8i8) {
3809 SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003810 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003811 } else if (VT == MVT::v16i8) {
3812 SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003813 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003814 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3815 SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003816 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003817 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3818 SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003819 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003820 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3821 SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003822 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003823 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3824 SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003825 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003826 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3827 SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003828 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003829 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3830 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003831 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003832 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003833 break;
3834 }
3835 case AArch64ISD::ST4post: {
3836 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003837 if (VT == MVT::v8i8) {
3838 SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003839 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003840 } else if (VT == MVT::v16i8) {
3841 SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003842 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003843 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3844 SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003845 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003846 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3847 SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003848 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003849 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3850 SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003851 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003852 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3853 SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003854 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003855 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3856 SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003857 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003858 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3859 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003860 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003861 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003862 break;
3863 }
3864 case AArch64ISD::ST1x2post: {
3865 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003866 if (VT == MVT::v8i8) {
3867 SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003868 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003869 } else if (VT == MVT::v16i8) {
3870 SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003871 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003872 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3873 SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003874 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003875 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3876 SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003877 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003878 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3879 SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003880 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003881 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3882 SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003883 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003884 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3885 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003886 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003887 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3888 SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003889 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003890 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003891 break;
3892 }
3893 case AArch64ISD::ST1x3post: {
3894 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003895 if (VT == MVT::v8i8) {
3896 SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003897 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003898 } else if (VT == MVT::v16i8) {
3899 SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003900 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003901 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3902 SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003903 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003904 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3905 SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003906 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003907 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3908 SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003909 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003910 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3911 SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003912 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003913 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3914 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003915 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003916 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3917 SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003918 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003919 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003920 break;
3921 }
3922 case AArch64ISD::ST1x4post: {
3923 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003924 if (VT == MVT::v8i8) {
3925 SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003926 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003927 } else if (VT == MVT::v16i8) {
3928 SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003929 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003930 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
3931 SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003932 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003933 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
3934 SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003935 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003936 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3937 SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003938 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003939 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3940 SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003941 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003942 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3943 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003944 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003945 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3946 SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003947 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003948 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003949 break;
3950 }
3951 case AArch64ISD::ST2LANEpost: {
3952 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003953 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3954 SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003955 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003956 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3957 VT == MVT::v8f16) {
3958 SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003959 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003960 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3961 VT == MVT::v2f32) {
3962 SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003963 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003964 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3965 VT == MVT::v1f64) {
3966 SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003967 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003968 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003969 break;
3970 }
3971 case AArch64ISD::ST3LANEpost: {
3972 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003973 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3974 SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003975 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003976 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3977 VT == MVT::v8f16) {
3978 SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003979 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003980 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3981 VT == MVT::v2f32) {
3982 SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003983 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003984 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3985 VT == MVT::v1f64) {
3986 SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003987 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003988 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003989 break;
3990 }
3991 case AArch64ISD::ST4LANEpost: {
3992 VT = Node->getOperand(1).getValueType();
Justin Bogner283e3bd2016-05-12 23:10:30 +00003993 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3994 SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003995 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00003996 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3997 VT == MVT::v8f16) {
3998 SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00003999 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00004000 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4001 VT == MVT::v2f32) {
4002 SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00004003 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00004004 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4005 VT == MVT::v1f64) {
4006 SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
Quentin Colombet35a47012017-04-01 01:26:17 +00004007 return;
Justin Bogner283e3bd2016-05-12 23:10:30 +00004008 }
Tim Northover3b0846e2014-05-24 12:50:23 +00004009 break;
4010 }
Tim Northover3b0846e2014-05-24 12:50:23 +00004011 }
4012
4013 // Select the default instruction
Justin Bogner283e3bd2016-05-12 23:10:30 +00004014 SelectCode(Node);
Tim Northover3b0846e2014-05-24 12:50:23 +00004015}
4016
4017/// createAArch64ISelDag - This pass converts a legalized DAG into a
4018/// AArch64-specific DAG, ready for instruction scheduling.
4019FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
4020 CodeGenOpt::Level OptLevel) {
4021 return new AArch64DAGToDAGISel(TM, OptLevel);
4022}